1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/uio.h>
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/poll.h>
22 #include <linux/file.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
28 #include <linux/module.h>
29 #include <linux/sort.h>
33 static ushort max_mem_regions
= 64;
34 module_param(max_mem_regions
, ushort
, 0444);
35 MODULE_PARM_DESC(max_mem_regions
,
36 "Maximum number of memory regions in memory map. (default: 64)");
39 VHOST_MEMORY_F_LOG
= 0x1,
42 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
43 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
45 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
46 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
48 vq
->user_be
= !virtio_legacy_is_little_endian();
51 static void vhost_enable_cross_endian_big(struct vhost_virtqueue
*vq
)
56 static void vhost_enable_cross_endian_little(struct vhost_virtqueue
*vq
)
61 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
63 struct vhost_vring_state s
;
68 if (copy_from_user(&s
, argp
, sizeof(s
)))
71 if (s
.num
!= VHOST_VRING_LITTLE_ENDIAN
&&
72 s
.num
!= VHOST_VRING_BIG_ENDIAN
)
75 if (s
.num
== VHOST_VRING_BIG_ENDIAN
)
76 vhost_enable_cross_endian_big(vq
);
78 vhost_enable_cross_endian_little(vq
);
83 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
86 struct vhost_vring_state s
= {
91 if (copy_to_user(argp
, &s
, sizeof(s
)))
97 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
99 /* Note for legacy virtio: user_be is initialized at reset time
100 * according to the host endianness. If userspace does not set an
101 * explicit endianness, the default behavior is native endian, as
102 * expected by legacy virtio.
104 vq
->is_le
= vhost_has_feature(vq
, VIRTIO_F_VERSION_1
) || !vq
->user_be
;
107 static void vhost_disable_cross_endian(struct vhost_virtqueue
*vq
)
111 static long vhost_set_vring_endian(struct vhost_virtqueue
*vq
, int __user
*argp
)
116 static long vhost_get_vring_endian(struct vhost_virtqueue
*vq
, u32 idx
,
122 static void vhost_init_is_le(struct vhost_virtqueue
*vq
)
124 if (vhost_has_feature(vq
, VIRTIO_F_VERSION_1
))
127 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
129 static void vhost_reset_is_le(struct vhost_virtqueue
*vq
)
131 vq
->is_le
= virtio_legacy_is_little_endian();
134 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
137 struct vhost_poll
*poll
;
139 poll
= container_of(pt
, struct vhost_poll
, table
);
141 add_wait_queue(wqh
, &poll
->wait
);
144 static int vhost_poll_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
,
147 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
149 if (!((unsigned long)key
& poll
->mask
))
152 vhost_poll_queue(poll
);
156 void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
158 INIT_LIST_HEAD(&work
->node
);
160 init_waitqueue_head(&work
->done
);
162 work
->queue_seq
= work
->done_seq
= 0;
164 EXPORT_SYMBOL_GPL(vhost_work_init
);
166 /* Init poll structure */
167 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
168 unsigned long mask
, struct vhost_dev
*dev
)
170 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
171 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
176 vhost_work_init(&poll
->work
, fn
);
178 EXPORT_SYMBOL_GPL(vhost_poll_init
);
180 /* Start polling a file. We add ourselves to file's wait queue. The caller must
181 * keep a reference to a file until after vhost_poll_stop is called. */
182 int vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
190 mask
= file
->f_op
->poll(file
, &poll
->table
);
192 vhost_poll_wakeup(&poll
->wait
, 0, 0, (void *)mask
);
193 if (mask
& POLLERR
) {
195 remove_wait_queue(poll
->wqh
, &poll
->wait
);
201 EXPORT_SYMBOL_GPL(vhost_poll_start
);
203 /* Stop polling a file. After this function returns, it becomes safe to drop the
204 * file reference. You must also flush afterwards. */
205 void vhost_poll_stop(struct vhost_poll
*poll
)
208 remove_wait_queue(poll
->wqh
, &poll
->wait
);
212 EXPORT_SYMBOL_GPL(vhost_poll_stop
);
214 static bool vhost_work_seq_done(struct vhost_dev
*dev
, struct vhost_work
*work
,
219 spin_lock_irq(&dev
->work_lock
);
220 left
= seq
- work
->done_seq
;
221 spin_unlock_irq(&dev
->work_lock
);
225 void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
230 spin_lock_irq(&dev
->work_lock
);
231 seq
= work
->queue_seq
;
233 spin_unlock_irq(&dev
->work_lock
);
234 wait_event(work
->done
, vhost_work_seq_done(dev
, work
, seq
));
235 spin_lock_irq(&dev
->work_lock
);
236 flushing
= --work
->flushing
;
237 spin_unlock_irq(&dev
->work_lock
);
238 BUG_ON(flushing
< 0);
240 EXPORT_SYMBOL_GPL(vhost_work_flush
);
242 /* Flush any work that has been scheduled. When calling this, don't hold any
243 * locks that are also used by the callback. */
244 void vhost_poll_flush(struct vhost_poll
*poll
)
246 vhost_work_flush(poll
->dev
, &poll
->work
);
248 EXPORT_SYMBOL_GPL(vhost_poll_flush
);
250 void vhost_work_queue(struct vhost_dev
*dev
, struct vhost_work
*work
)
254 spin_lock_irqsave(&dev
->work_lock
, flags
);
255 if (list_empty(&work
->node
)) {
256 list_add_tail(&work
->node
, &dev
->work_list
);
258 spin_unlock_irqrestore(&dev
->work_lock
, flags
);
259 wake_up_process(dev
->worker
);
261 spin_unlock_irqrestore(&dev
->work_lock
, flags
);
264 EXPORT_SYMBOL_GPL(vhost_work_queue
);
266 /* A lockless hint for busy polling code to exit the loop */
267 bool vhost_has_work(struct vhost_dev
*dev
)
269 return !list_empty(&dev
->work_list
);
271 EXPORT_SYMBOL_GPL(vhost_has_work
);
273 void vhost_poll_queue(struct vhost_poll
*poll
)
275 vhost_work_queue(poll
->dev
, &poll
->work
);
277 EXPORT_SYMBOL_GPL(vhost_poll_queue
);
279 static void vhost_vq_reset(struct vhost_dev
*dev
,
280 struct vhost_virtqueue
*vq
)
286 vq
->last_avail_idx
= 0;
288 vq
->last_used_idx
= 0;
289 vq
->signalled_used
= 0;
290 vq
->signalled_used_valid
= false;
292 vq
->log_used
= false;
293 vq
->log_addr
= -1ull;
294 vq
->private_data
= NULL
;
295 vq
->acked_features
= 0;
297 vq
->error_ctx
= NULL
;
304 vhost_reset_is_le(vq
);
305 vhost_disable_cross_endian(vq
);
308 static int vhost_worker(void *data
)
310 struct vhost_dev
*dev
= data
;
311 struct vhost_work
*work
= NULL
;
312 unsigned uninitialized_var(seq
);
313 mm_segment_t oldfs
= get_fs();
319 /* mb paired w/ kthread_stop */
320 set_current_state(TASK_INTERRUPTIBLE
);
322 spin_lock_irq(&dev
->work_lock
);
324 work
->done_seq
= seq
;
326 wake_up_all(&work
->done
);
329 if (kthread_should_stop()) {
330 spin_unlock_irq(&dev
->work_lock
);
331 __set_current_state(TASK_RUNNING
);
334 if (!list_empty(&dev
->work_list
)) {
335 work
= list_first_entry(&dev
->work_list
,
336 struct vhost_work
, node
);
337 list_del_init(&work
->node
);
338 seq
= work
->queue_seq
;
341 spin_unlock_irq(&dev
->work_lock
);
344 __set_current_state(TASK_RUNNING
);
357 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
367 /* Helper to allocate iovec buffers for all vqs. */
368 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
370 struct vhost_virtqueue
*vq
;
373 for (i
= 0; i
< dev
->nvqs
; ++i
) {
375 vq
->indirect
= kmalloc(sizeof *vq
->indirect
* UIO_MAXIOV
,
377 vq
->log
= kmalloc(sizeof *vq
->log
* UIO_MAXIOV
, GFP_KERNEL
);
378 vq
->heads
= kmalloc(sizeof *vq
->heads
* UIO_MAXIOV
, GFP_KERNEL
);
379 if (!vq
->indirect
|| !vq
->log
|| !vq
->heads
)
386 vhost_vq_free_iovecs(dev
->vqs
[i
]);
390 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
394 for (i
= 0; i
< dev
->nvqs
; ++i
)
395 vhost_vq_free_iovecs(dev
->vqs
[i
]);
398 void vhost_dev_init(struct vhost_dev
*dev
,
399 struct vhost_virtqueue
**vqs
, int nvqs
)
401 struct vhost_virtqueue
*vq
;
406 mutex_init(&dev
->mutex
);
408 dev
->log_file
= NULL
;
411 spin_lock_init(&dev
->work_lock
);
412 INIT_LIST_HEAD(&dev
->work_list
);
415 for (i
= 0; i
< dev
->nvqs
; ++i
) {
421 mutex_init(&vq
->mutex
);
422 vhost_vq_reset(dev
, vq
);
424 vhost_poll_init(&vq
->poll
, vq
->handle_kick
,
428 EXPORT_SYMBOL_GPL(vhost_dev_init
);
430 /* Caller should have device mutex */
431 long vhost_dev_check_owner(struct vhost_dev
*dev
)
433 /* Are you the owner? If not, I don't think you mean to do that */
434 return dev
->mm
== current
->mm
? 0 : -EPERM
;
436 EXPORT_SYMBOL_GPL(vhost_dev_check_owner
);
438 struct vhost_attach_cgroups_struct
{
439 struct vhost_work work
;
440 struct task_struct
*owner
;
444 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
446 struct vhost_attach_cgroups_struct
*s
;
448 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
449 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
452 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
454 struct vhost_attach_cgroups_struct attach
;
456 attach
.owner
= current
;
457 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
458 vhost_work_queue(dev
, &attach
.work
);
459 vhost_work_flush(dev
, &attach
.work
);
463 /* Caller should have device mutex */
464 bool vhost_dev_has_owner(struct vhost_dev
*dev
)
468 EXPORT_SYMBOL_GPL(vhost_dev_has_owner
);
470 /* Caller should have device mutex */
471 long vhost_dev_set_owner(struct vhost_dev
*dev
)
473 struct task_struct
*worker
;
476 /* Is there an owner already? */
477 if (vhost_dev_has_owner(dev
)) {
482 /* No owner, become one */
483 dev
->mm
= get_task_mm(current
);
484 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
485 if (IS_ERR(worker
)) {
486 err
= PTR_ERR(worker
);
490 dev
->worker
= worker
;
491 wake_up_process(worker
); /* avoid contributing to loadavg */
493 err
= vhost_attach_cgroups(dev
);
497 err
= vhost_dev_alloc_iovecs(dev
);
503 kthread_stop(worker
);
512 EXPORT_SYMBOL_GPL(vhost_dev_set_owner
);
514 struct vhost_memory
*vhost_dev_reset_owner_prepare(void)
516 return kmalloc(offsetof(struct vhost_memory
, regions
), GFP_KERNEL
);
518 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare
);
520 /* Caller should have device mutex */
521 void vhost_dev_reset_owner(struct vhost_dev
*dev
, struct vhost_memory
*memory
)
525 vhost_dev_cleanup(dev
, true);
527 /* Restore memory to default empty mapping. */
528 memory
->nregions
= 0;
529 dev
->memory
= memory
;
530 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
531 * VQs aren't running.
533 for (i
= 0; i
< dev
->nvqs
; ++i
)
534 dev
->vqs
[i
]->memory
= memory
;
536 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner
);
538 void vhost_dev_stop(struct vhost_dev
*dev
)
542 for (i
= 0; i
< dev
->nvqs
; ++i
) {
543 if (dev
->vqs
[i
]->kick
&& dev
->vqs
[i
]->handle_kick
) {
544 vhost_poll_stop(&dev
->vqs
[i
]->poll
);
545 vhost_poll_flush(&dev
->vqs
[i
]->poll
);
549 EXPORT_SYMBOL_GPL(vhost_dev_stop
);
551 /* Caller should have device mutex if and only if locked is set */
552 void vhost_dev_cleanup(struct vhost_dev
*dev
, bool locked
)
556 for (i
= 0; i
< dev
->nvqs
; ++i
) {
557 if (dev
->vqs
[i
]->error_ctx
)
558 eventfd_ctx_put(dev
->vqs
[i
]->error_ctx
);
559 if (dev
->vqs
[i
]->error
)
560 fput(dev
->vqs
[i
]->error
);
561 if (dev
->vqs
[i
]->kick
)
562 fput(dev
->vqs
[i
]->kick
);
563 if (dev
->vqs
[i
]->call_ctx
)
564 eventfd_ctx_put(dev
->vqs
[i
]->call_ctx
);
565 if (dev
->vqs
[i
]->call
)
566 fput(dev
->vqs
[i
]->call
);
567 vhost_vq_reset(dev
, dev
->vqs
[i
]);
569 vhost_dev_free_iovecs(dev
);
571 eventfd_ctx_put(dev
->log_ctx
);
575 dev
->log_file
= NULL
;
576 /* No one will access memory at this point */
579 WARN_ON(!list_empty(&dev
->work_list
));
581 kthread_stop(dev
->worker
);
588 EXPORT_SYMBOL_GPL(vhost_dev_cleanup
);
590 static int log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
592 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
594 /* Make sure 64 bit math will not overflow. */
595 if (a
> ULONG_MAX
- (unsigned long)log_base
||
596 a
+ (unsigned long)log_base
> ULONG_MAX
)
599 return access_ok(VERIFY_WRITE
, log_base
+ a
,
600 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
603 /* Caller should have vq mutex and device mutex. */
604 static int vq_memory_access_ok(void __user
*log_base
, struct vhost_memory
*mem
,
612 for (i
= 0; i
< mem
->nregions
; ++i
) {
613 struct vhost_memory_region
*m
= mem
->regions
+ i
;
614 unsigned long a
= m
->userspace_addr
;
615 if (m
->memory_size
> ULONG_MAX
)
617 else if (!access_ok(VERIFY_WRITE
, (void __user
*)a
,
620 else if (log_all
&& !log_access_ok(log_base
,
628 /* Can we switch to this memory table? */
629 /* Caller should have device mutex but not vq mutex */
630 static int memory_access_ok(struct vhost_dev
*d
, struct vhost_memory
*mem
,
635 for (i
= 0; i
< d
->nvqs
; ++i
) {
639 mutex_lock(&d
->vqs
[i
]->mutex
);
640 log
= log_all
|| vhost_has_feature(d
->vqs
[i
], VHOST_F_LOG_ALL
);
641 /* If ring is inactive, will check when it's enabled. */
642 if (d
->vqs
[i
]->private_data
)
643 ok
= vq_memory_access_ok(d
->vqs
[i
]->log_base
, mem
, log
);
646 mutex_unlock(&d
->vqs
[i
]->mutex
);
653 static int vq_access_ok(struct vhost_virtqueue
*vq
, unsigned int num
,
654 struct vring_desc __user
*desc
,
655 struct vring_avail __user
*avail
,
656 struct vring_used __user
*used
)
658 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
659 return access_ok(VERIFY_READ
, desc
, num
* sizeof *desc
) &&
660 access_ok(VERIFY_READ
, avail
,
661 sizeof *avail
+ num
* sizeof *avail
->ring
+ s
) &&
662 access_ok(VERIFY_WRITE
, used
,
663 sizeof *used
+ num
* sizeof *used
->ring
+ s
);
666 /* Can we log writes? */
667 /* Caller should have device mutex but not vq mutex */
668 int vhost_log_access_ok(struct vhost_dev
*dev
)
670 return memory_access_ok(dev
, dev
->memory
, 1);
672 EXPORT_SYMBOL_GPL(vhost_log_access_ok
);
674 /* Verify access for write logging. */
675 /* Caller should have vq mutex and device mutex */
676 static int vq_log_access_ok(struct vhost_virtqueue
*vq
,
677 void __user
*log_base
)
679 size_t s
= vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
681 return vq_memory_access_ok(log_base
, vq
->memory
,
682 vhost_has_feature(vq
, VHOST_F_LOG_ALL
)) &&
683 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
685 vq
->num
* sizeof *vq
->used
->ring
+ s
));
688 /* Can we start vq? */
689 /* Caller should have vq mutex and device mutex */
690 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
692 return vq_access_ok(vq
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
) &&
693 vq_log_access_ok(vq
, vq
->log_base
);
695 EXPORT_SYMBOL_GPL(vhost_vq_access_ok
);
697 static int vhost_memory_reg_sort_cmp(const void *p1
, const void *p2
)
699 const struct vhost_memory_region
*r1
= p1
, *r2
= p2
;
700 if (r1
->guest_phys_addr
< r2
->guest_phys_addr
)
702 if (r1
->guest_phys_addr
> r2
->guest_phys_addr
)
707 static void *vhost_kvzalloc(unsigned long size
)
709 void *n
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
716 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
718 struct vhost_memory mem
, *newmem
, *oldmem
;
719 unsigned long size
= offsetof(struct vhost_memory
, regions
);
722 if (copy_from_user(&mem
, m
, size
))
726 if (mem
.nregions
> max_mem_regions
)
728 newmem
= vhost_kvzalloc(size
+ mem
.nregions
* sizeof(*m
->regions
));
732 memcpy(newmem
, &mem
, size
);
733 if (copy_from_user(newmem
->regions
, m
->regions
,
734 mem
.nregions
* sizeof *m
->regions
)) {
738 sort(newmem
->regions
, newmem
->nregions
, sizeof(*newmem
->regions
),
739 vhost_memory_reg_sort_cmp
, NULL
);
741 if (!memory_access_ok(d
, newmem
, 0)) {
748 /* All memory accesses are done under some VQ mutex. */
749 for (i
= 0; i
< d
->nvqs
; ++i
) {
750 mutex_lock(&d
->vqs
[i
]->mutex
);
751 d
->vqs
[i
]->memory
= newmem
;
752 mutex_unlock(&d
->vqs
[i
]->mutex
);
758 long vhost_vring_ioctl(struct vhost_dev
*d
, int ioctl
, void __user
*argp
)
760 struct file
*eventfp
, *filep
= NULL
;
761 bool pollstart
= false, pollstop
= false;
762 struct eventfd_ctx
*ctx
= NULL
;
763 u32 __user
*idxp
= argp
;
764 struct vhost_virtqueue
*vq
;
765 struct vhost_vring_state s
;
766 struct vhost_vring_file f
;
767 struct vhost_vring_addr a
;
771 r
= get_user(idx
, idxp
);
779 mutex_lock(&vq
->mutex
);
782 case VHOST_SET_VRING_NUM
:
783 /* Resizing ring with an active backend?
784 * You don't want to do that. */
785 if (vq
->private_data
) {
789 if (copy_from_user(&s
, argp
, sizeof s
)) {
793 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
799 case VHOST_SET_VRING_BASE
:
800 /* Moving base with an active backend?
801 * You don't want to do that. */
802 if (vq
->private_data
) {
806 if (copy_from_user(&s
, argp
, sizeof s
)) {
810 if (s
.num
> 0xffff) {
814 vq
->last_avail_idx
= s
.num
;
815 /* Forget the cached index value. */
816 vq
->avail_idx
= vq
->last_avail_idx
;
818 case VHOST_GET_VRING_BASE
:
820 s
.num
= vq
->last_avail_idx
;
821 if (copy_to_user(argp
, &s
, sizeof s
))
824 case VHOST_SET_VRING_ADDR
:
825 if (copy_from_user(&a
, argp
, sizeof a
)) {
829 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
833 /* For 32bit, verify that the top 32bits of the user
834 data are set to zero. */
835 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
836 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
837 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
842 /* Make sure it's safe to cast pointers to vring types. */
843 BUILD_BUG_ON(__alignof__
*vq
->avail
> VRING_AVAIL_ALIGN_SIZE
);
844 BUILD_BUG_ON(__alignof__
*vq
->used
> VRING_USED_ALIGN_SIZE
);
845 if ((a
.avail_user_addr
& (VRING_AVAIL_ALIGN_SIZE
- 1)) ||
846 (a
.used_user_addr
& (VRING_USED_ALIGN_SIZE
- 1)) ||
847 (a
.log_guest_addr
& (VRING_USED_ALIGN_SIZE
- 1))) {
852 /* We only verify access here if backend is configured.
853 * If it is not, we don't as size might not have been setup.
854 * We will verify when backend is configured. */
855 if (vq
->private_data
) {
856 if (!vq_access_ok(vq
, vq
->num
,
857 (void __user
*)(unsigned long)a
.desc_user_addr
,
858 (void __user
*)(unsigned long)a
.avail_user_addr
,
859 (void __user
*)(unsigned long)a
.used_user_addr
)) {
864 /* Also validate log access for used ring if enabled. */
865 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
866 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
868 vq
->num
* sizeof *vq
->used
->ring
)) {
874 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
875 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
876 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
877 vq
->log_addr
= a
.log_guest_addr
;
878 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
880 case VHOST_SET_VRING_KICK
:
881 if (copy_from_user(&f
, argp
, sizeof f
)) {
885 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
886 if (IS_ERR(eventfp
)) {
887 r
= PTR_ERR(eventfp
);
890 if (eventfp
!= vq
->kick
) {
891 pollstop
= (filep
= vq
->kick
) != NULL
;
892 pollstart
= (vq
->kick
= eventfp
) != NULL
;
896 case VHOST_SET_VRING_CALL
:
897 if (copy_from_user(&f
, argp
, sizeof f
)) {
901 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
902 if (IS_ERR(eventfp
)) {
903 r
= PTR_ERR(eventfp
);
906 if (eventfp
!= vq
->call
) {
910 vq
->call_ctx
= eventfp
?
911 eventfd_ctx_fileget(eventfp
) : NULL
;
915 case VHOST_SET_VRING_ERR
:
916 if (copy_from_user(&f
, argp
, sizeof f
)) {
920 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
921 if (IS_ERR(eventfp
)) {
922 r
= PTR_ERR(eventfp
);
925 if (eventfp
!= vq
->error
) {
929 vq
->error_ctx
= eventfp
?
930 eventfd_ctx_fileget(eventfp
) : NULL
;
934 case VHOST_SET_VRING_ENDIAN
:
935 r
= vhost_set_vring_endian(vq
, argp
);
937 case VHOST_GET_VRING_ENDIAN
:
938 r
= vhost_get_vring_endian(vq
, idx
, argp
);
944 if (pollstop
&& vq
->handle_kick
)
945 vhost_poll_stop(&vq
->poll
);
948 eventfd_ctx_put(ctx
);
952 if (pollstart
&& vq
->handle_kick
)
953 r
= vhost_poll_start(&vq
->poll
, vq
->kick
);
955 mutex_unlock(&vq
->mutex
);
957 if (pollstop
&& vq
->handle_kick
)
958 vhost_poll_flush(&vq
->poll
);
961 EXPORT_SYMBOL_GPL(vhost_vring_ioctl
);
963 /* Caller must have device mutex */
964 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, void __user
*argp
)
966 struct file
*eventfp
, *filep
= NULL
;
967 struct eventfd_ctx
*ctx
= NULL
;
972 /* If you are not the owner, you can become one */
973 if (ioctl
== VHOST_SET_OWNER
) {
974 r
= vhost_dev_set_owner(d
);
978 /* You must be the owner to do anything else */
979 r
= vhost_dev_check_owner(d
);
984 case VHOST_SET_MEM_TABLE
:
985 r
= vhost_set_memory(d
, argp
);
987 case VHOST_SET_LOG_BASE
:
988 if (copy_from_user(&p
, argp
, sizeof p
)) {
992 if ((u64
)(unsigned long)p
!= p
) {
996 for (i
= 0; i
< d
->nvqs
; ++i
) {
997 struct vhost_virtqueue
*vq
;
998 void __user
*base
= (void __user
*)(unsigned long)p
;
1000 mutex_lock(&vq
->mutex
);
1001 /* If ring is inactive, will check when it's enabled. */
1002 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
1005 vq
->log_base
= base
;
1006 mutex_unlock(&vq
->mutex
);
1009 case VHOST_SET_LOG_FD
:
1010 r
= get_user(fd
, (int __user
*)argp
);
1013 eventfp
= fd
== -1 ? NULL
: eventfd_fget(fd
);
1014 if (IS_ERR(eventfp
)) {
1015 r
= PTR_ERR(eventfp
);
1018 if (eventfp
!= d
->log_file
) {
1019 filep
= d
->log_file
;
1020 d
->log_file
= eventfp
;
1022 d
->log_ctx
= eventfp
?
1023 eventfd_ctx_fileget(eventfp
) : NULL
;
1026 for (i
= 0; i
< d
->nvqs
; ++i
) {
1027 mutex_lock(&d
->vqs
[i
]->mutex
);
1028 d
->vqs
[i
]->log_ctx
= d
->log_ctx
;
1029 mutex_unlock(&d
->vqs
[i
]->mutex
);
1032 eventfd_ctx_put(ctx
);
1043 EXPORT_SYMBOL_GPL(vhost_dev_ioctl
);
1045 static const struct vhost_memory_region
*find_region(struct vhost_memory
*mem
,
1046 __u64 addr
, __u32 len
)
1048 const struct vhost_memory_region
*reg
;
1049 int start
= 0, end
= mem
->nregions
;
1051 while (start
< end
) {
1052 int slot
= start
+ (end
- start
) / 2;
1053 reg
= mem
->regions
+ slot
;
1054 if (addr
>= reg
->guest_phys_addr
)
1060 reg
= mem
->regions
+ start
;
1061 if (addr
>= reg
->guest_phys_addr
&&
1062 reg
->guest_phys_addr
+ reg
->memory_size
> addr
)
1067 /* TODO: This is really inefficient. We need something like get_user()
1068 * (instruction directly accesses the data, with an exception table entry
1069 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1071 static int set_bit_to_user(int nr
, void __user
*addr
)
1073 unsigned long log
= (unsigned long)addr
;
1076 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
1079 r
= get_user_pages_fast(log
, 1, 1, &page
);
1083 base
= kmap_atomic(page
);
1085 kunmap_atomic(base
);
1086 set_page_dirty_lock(page
);
1091 static int log_write(void __user
*log_base
,
1092 u64 write_address
, u64 write_length
)
1094 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
1099 write_length
+= write_address
% VHOST_PAGE_SIZE
;
1101 u64 base
= (u64
)(unsigned long)log_base
;
1102 u64 log
= base
+ write_page
/ 8;
1103 int bit
= write_page
% 8;
1104 if ((u64
)(unsigned long)log
!= log
)
1106 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
1109 if (write_length
<= VHOST_PAGE_SIZE
)
1111 write_length
-= VHOST_PAGE_SIZE
;
1117 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
1118 unsigned int log_num
, u64 len
)
1122 /* Make sure data written is seen before log. */
1124 for (i
= 0; i
< log_num
; ++i
) {
1125 u64 l
= min(log
[i
].len
, len
);
1126 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
1132 eventfd_signal(vq
->log_ctx
, 1);
1136 /* Length written exceeds what we have stored. This is a bug. */
1140 EXPORT_SYMBOL_GPL(vhost_log_write
);
1142 static int vhost_update_used_flags(struct vhost_virtqueue
*vq
)
1145 if (__put_user(cpu_to_vhost16(vq
, vq
->used_flags
), &vq
->used
->flags
) < 0)
1147 if (unlikely(vq
->log_used
)) {
1148 /* Make sure the flag is seen before log. */
1150 /* Log used flag write. */
1151 used
= &vq
->used
->flags
;
1152 log_write(vq
->log_base
, vq
->log_addr
+
1153 (used
- (void __user
*)vq
->used
),
1154 sizeof vq
->used
->flags
);
1156 eventfd_signal(vq
->log_ctx
, 1);
1161 static int vhost_update_avail_event(struct vhost_virtqueue
*vq
, u16 avail_event
)
1163 if (__put_user(cpu_to_vhost16(vq
, vq
->avail_idx
), vhost_avail_event(vq
)))
1165 if (unlikely(vq
->log_used
)) {
1167 /* Make sure the event is seen before log. */
1169 /* Log avail event write */
1170 used
= vhost_avail_event(vq
);
1171 log_write(vq
->log_base
, vq
->log_addr
+
1172 (used
- (void __user
*)vq
->used
),
1173 sizeof *vhost_avail_event(vq
));
1175 eventfd_signal(vq
->log_ctx
, 1);
1180 int vhost_vq_init_access(struct vhost_virtqueue
*vq
)
1182 __virtio16 last_used_idx
;
1184 bool is_le
= vq
->is_le
;
1186 if (!vq
->private_data
) {
1187 vhost_reset_is_le(vq
);
1191 vhost_init_is_le(vq
);
1193 r
= vhost_update_used_flags(vq
);
1196 vq
->signalled_used_valid
= false;
1197 if (!access_ok(VERIFY_READ
, &vq
->used
->idx
, sizeof vq
->used
->idx
)) {
1201 r
= __get_user(last_used_idx
, &vq
->used
->idx
);
1204 vq
->last_used_idx
= vhost16_to_cpu(vq
, last_used_idx
);
1210 EXPORT_SYMBOL_GPL(vhost_vq_init_access
);
1212 static int translate_desc(struct vhost_virtqueue
*vq
, u64 addr
, u32 len
,
1213 struct iovec iov
[], int iov_size
)
1215 const struct vhost_memory_region
*reg
;
1216 struct vhost_memory
*mem
;
1222 while ((u64
)len
> s
) {
1224 if (unlikely(ret
>= iov_size
)) {
1228 reg
= find_region(mem
, addr
, len
);
1229 if (unlikely(!reg
)) {
1234 size
= reg
->memory_size
- addr
+ reg
->guest_phys_addr
;
1235 _iov
->iov_len
= min((u64
)len
- s
, size
);
1236 _iov
->iov_base
= (void __user
*)(unsigned long)
1237 (reg
->userspace_addr
+ addr
- reg
->guest_phys_addr
);
1246 /* Each buffer in the virtqueues is actually a chain of descriptors. This
1247 * function returns the next descriptor in the chain,
1248 * or -1U if we're at the end. */
1249 static unsigned next_desc(struct vhost_virtqueue
*vq
, struct vring_desc
*desc
)
1253 /* If this descriptor says it doesn't chain, we're done. */
1254 if (!(desc
->flags
& cpu_to_vhost16(vq
, VRING_DESC_F_NEXT
)))
1257 /* Check they're not leading us off end of descriptors. */
1258 next
= vhost16_to_cpu(vq
, desc
->next
);
1259 /* Make sure compiler knows to grab that: we don't want it changing! */
1260 /* We will use the result as an index in an array, so most
1261 * architectures only need a compiler barrier here. */
1262 read_barrier_depends();
1267 static int get_indirect(struct vhost_virtqueue
*vq
,
1268 struct iovec iov
[], unsigned int iov_size
,
1269 unsigned int *out_num
, unsigned int *in_num
,
1270 struct vhost_log
*log
, unsigned int *log_num
,
1271 struct vring_desc
*indirect
)
1273 struct vring_desc desc
;
1274 unsigned int i
= 0, count
, found
= 0;
1275 u32 len
= vhost32_to_cpu(vq
, indirect
->len
);
1276 struct iov_iter from
;
1280 if (unlikely(len
% sizeof desc
)) {
1281 vq_err(vq
, "Invalid length in indirect descriptor: "
1282 "len 0x%llx not multiple of 0x%zx\n",
1283 (unsigned long long)len
,
1288 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, indirect
->addr
), len
, vq
->indirect
,
1290 if (unlikely(ret
< 0)) {
1291 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
1294 iov_iter_init(&from
, READ
, vq
->indirect
, ret
, len
);
1296 /* We will use the result as an address to read from, so most
1297 * architectures only need a compiler barrier here. */
1298 read_barrier_depends();
1300 count
= len
/ sizeof desc
;
1301 /* Buffers are chained via a 16 bit next field, so
1302 * we can have at most 2^16 of these. */
1303 if (unlikely(count
> USHRT_MAX
+ 1)) {
1304 vq_err(vq
, "Indirect buffer length too big: %d\n",
1310 unsigned iov_count
= *in_num
+ *out_num
;
1311 if (unlikely(++found
> count
)) {
1312 vq_err(vq
, "Loop detected: last one at %u "
1313 "indirect size %u\n",
1317 if (unlikely(copy_from_iter(&desc
, sizeof(desc
), &from
) !=
1319 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
1320 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1323 if (unlikely(desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
))) {
1324 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
1325 i
, (size_t)vhost64_to_cpu(vq
, indirect
->addr
) + i
* sizeof desc
);
1329 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
1330 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
1331 iov_size
- iov_count
);
1332 if (unlikely(ret
< 0)) {
1333 vq_err(vq
, "Translation failure %d indirect idx %d\n",
1337 /* If this is an input descriptor, increment that count. */
1338 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
)) {
1340 if (unlikely(log
)) {
1341 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
1342 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
1346 /* If it's an output descriptor, they're all supposed
1347 * to come before any input descriptors. */
1348 if (unlikely(*in_num
)) {
1349 vq_err(vq
, "Indirect descriptor "
1350 "has out after in: idx %d\n", i
);
1355 } while ((i
= next_desc(vq
, &desc
)) != -1);
1359 /* This looks in the virtqueue and for the first available buffer, and converts
1360 * it to an iovec for convenient access. Since descriptors consist of some
1361 * number of output then some number of input descriptors, it's actually two
1362 * iovecs, but we pack them into one and note how many of each there were.
1364 * This function returns the descriptor number found, or vq->num (which is
1365 * never a valid descriptor number) if none was found. A negative code is
1366 * returned on error. */
1367 int vhost_get_vq_desc(struct vhost_virtqueue
*vq
,
1368 struct iovec iov
[], unsigned int iov_size
,
1369 unsigned int *out_num
, unsigned int *in_num
,
1370 struct vhost_log
*log
, unsigned int *log_num
)
1372 struct vring_desc desc
;
1373 unsigned int i
, head
, found
= 0;
1375 __virtio16 avail_idx
;
1376 __virtio16 ring_head
;
1379 /* Check it isn't doing very strange things with descriptor numbers. */
1380 last_avail_idx
= vq
->last_avail_idx
;
1381 if (unlikely(__get_user(avail_idx
, &vq
->avail
->idx
))) {
1382 vq_err(vq
, "Failed to access avail idx at %p\n",
1386 vq
->avail_idx
= vhost16_to_cpu(vq
, avail_idx
);
1388 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
1389 vq_err(vq
, "Guest moved used index from %u to %u",
1390 last_avail_idx
, vq
->avail_idx
);
1394 /* If there's nothing new since last we looked, return invalid. */
1395 if (vq
->avail_idx
== last_avail_idx
)
1398 /* Only get avail ring entries after they have been exposed by guest. */
1401 /* Grab the next descriptor number they're advertising, and increment
1402 * the index we've seen. */
1403 if (unlikely(__get_user(ring_head
,
1404 &vq
->avail
->ring
[last_avail_idx
& (vq
->num
- 1)]))) {
1405 vq_err(vq
, "Failed to read head: idx %d address %p\n",
1407 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
1411 head
= vhost16_to_cpu(vq
, ring_head
);
1413 /* If their number is silly, that's an error. */
1414 if (unlikely(head
>= vq
->num
)) {
1415 vq_err(vq
, "Guest says index %u > %u is available",
1420 /* When we start there are none of either input nor output. */
1421 *out_num
= *in_num
= 0;
1427 unsigned iov_count
= *in_num
+ *out_num
;
1428 if (unlikely(i
>= vq
->num
)) {
1429 vq_err(vq
, "Desc index is %u > %u, head = %u",
1433 if (unlikely(++found
> vq
->num
)) {
1434 vq_err(vq
, "Loop detected: last one at %u "
1435 "vq size %u head %u\n",
1439 ret
= __copy_from_user(&desc
, vq
->desc
+ i
, sizeof desc
);
1440 if (unlikely(ret
)) {
1441 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
1445 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_INDIRECT
)) {
1446 ret
= get_indirect(vq
, iov
, iov_size
,
1448 log
, log_num
, &desc
);
1449 if (unlikely(ret
< 0)) {
1450 vq_err(vq
, "Failure detected "
1451 "in indirect descriptor at idx %d\n", i
);
1457 ret
= translate_desc(vq
, vhost64_to_cpu(vq
, desc
.addr
),
1458 vhost32_to_cpu(vq
, desc
.len
), iov
+ iov_count
,
1459 iov_size
- iov_count
);
1460 if (unlikely(ret
< 0)) {
1461 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
1465 if (desc
.flags
& cpu_to_vhost16(vq
, VRING_DESC_F_WRITE
)) {
1466 /* If this is an input descriptor,
1467 * increment that count. */
1469 if (unlikely(log
)) {
1470 log
[*log_num
].addr
= vhost64_to_cpu(vq
, desc
.addr
);
1471 log
[*log_num
].len
= vhost32_to_cpu(vq
, desc
.len
);
1475 /* If it's an output descriptor, they're all supposed
1476 * to come before any input descriptors. */
1477 if (unlikely(*in_num
)) {
1478 vq_err(vq
, "Descriptor has out after in: "
1484 } while ((i
= next_desc(vq
, &desc
)) != -1);
1486 /* On success, increment avail index. */
1487 vq
->last_avail_idx
++;
1489 /* Assume notifications from guest are disabled at this point,
1490 * if they aren't we would need to update avail_event index. */
1491 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
1494 EXPORT_SYMBOL_GPL(vhost_get_vq_desc
);
1496 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1497 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
1499 vq
->last_avail_idx
-= n
;
1501 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc
);
1503 /* After we've used one of their buffers, we tell them about it. We'll then
1504 * want to notify the guest, using eventfd. */
1505 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
1507 struct vring_used_elem heads
= {
1508 cpu_to_vhost32(vq
, head
),
1509 cpu_to_vhost32(vq
, len
)
1512 return vhost_add_used_n(vq
, &heads
, 1);
1514 EXPORT_SYMBOL_GPL(vhost_add_used
);
1516 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
1517 struct vring_used_elem
*heads
,
1520 struct vring_used_elem __user
*used
;
1524 start
= vq
->last_used_idx
& (vq
->num
- 1);
1525 used
= vq
->used
->ring
+ start
;
1527 if (__put_user(heads
[0].id
, &used
->id
)) {
1528 vq_err(vq
, "Failed to write used id");
1531 if (__put_user(heads
[0].len
, &used
->len
)) {
1532 vq_err(vq
, "Failed to write used len");
1535 } else if (__copy_to_user(used
, heads
, count
* sizeof *used
)) {
1536 vq_err(vq
, "Failed to write used");
1539 if (unlikely(vq
->log_used
)) {
1540 /* Make sure data is seen before log. */
1542 /* Log used ring entry write. */
1543 log_write(vq
->log_base
,
1545 ((void __user
*)used
- (void __user
*)vq
->used
),
1546 count
* sizeof *used
);
1548 old
= vq
->last_used_idx
;
1549 new = (vq
->last_used_idx
+= count
);
1550 /* If the driver never bothers to signal in a very long while,
1551 * used index might wrap around. If that happens, invalidate
1552 * signalled_used index we stored. TODO: make sure driver
1553 * signals at least once in 2^16 and remove this. */
1554 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
1555 vq
->signalled_used_valid
= false;
1559 /* After we've used one of their buffers, we tell them about it. We'll then
1560 * want to notify the guest, using eventfd. */
1561 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
1566 start
= vq
->last_used_idx
& (vq
->num
- 1);
1567 n
= vq
->num
- start
;
1569 r
= __vhost_add_used_n(vq
, heads
, n
);
1575 r
= __vhost_add_used_n(vq
, heads
, count
);
1577 /* Make sure buffer is written before we update index. */
1579 if (__put_user(cpu_to_vhost16(vq
, vq
->last_used_idx
), &vq
->used
->idx
)) {
1580 vq_err(vq
, "Failed to increment used idx");
1583 if (unlikely(vq
->log_used
)) {
1584 /* Log used index update. */
1585 log_write(vq
->log_base
,
1586 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1587 sizeof vq
->used
->idx
);
1589 eventfd_signal(vq
->log_ctx
, 1);
1593 EXPORT_SYMBOL_GPL(vhost_add_used_n
);
1595 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1600 /* Flush out used index updates. This is paired
1601 * with the barrier that the Guest executes when enabling
1605 if (vhost_has_feature(vq
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1606 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
1609 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
1611 if (__get_user(flags
, &vq
->avail
->flags
)) {
1612 vq_err(vq
, "Failed to get flags");
1615 return !(flags
& cpu_to_vhost16(vq
, VRING_AVAIL_F_NO_INTERRUPT
));
1617 old
= vq
->signalled_used
;
1618 v
= vq
->signalled_used_valid
;
1619 new = vq
->signalled_used
= vq
->last_used_idx
;
1620 vq
->signalled_used_valid
= true;
1625 if (__get_user(event
, vhost_used_event(vq
))) {
1626 vq_err(vq
, "Failed to get used event idx");
1629 return vring_need_event(vhost16_to_cpu(vq
, event
), new, old
);
1632 /* This actually signals the guest, using eventfd. */
1633 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1635 /* Signal the Guest tell them we used something up. */
1636 if (vq
->call_ctx
&& vhost_notify(dev
, vq
))
1637 eventfd_signal(vq
->call_ctx
, 1);
1639 EXPORT_SYMBOL_GPL(vhost_signal
);
1641 /* And here's the combo meal deal. Supersize me! */
1642 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
1643 struct vhost_virtqueue
*vq
,
1644 unsigned int head
, int len
)
1646 vhost_add_used(vq
, head
, len
);
1647 vhost_signal(dev
, vq
);
1649 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal
);
1651 /* multi-buffer version of vhost_add_used_and_signal */
1652 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
1653 struct vhost_virtqueue
*vq
,
1654 struct vring_used_elem
*heads
, unsigned count
)
1656 vhost_add_used_n(vq
, heads
, count
);
1657 vhost_signal(dev
, vq
);
1659 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n
);
1661 /* return true if we're sure that avaiable ring is empty */
1662 bool vhost_vq_avail_empty(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1664 __virtio16 avail_idx
;
1667 r
= __get_user(avail_idx
, &vq
->avail
->idx
);
1671 return vhost16_to_cpu(vq
, avail_idx
) == vq
->avail_idx
;
1673 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty
);
1675 /* OK, now we need to know about added descriptors. */
1676 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1678 __virtio16 avail_idx
;
1681 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
1683 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
1684 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
1685 r
= vhost_update_used_flags(vq
);
1687 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1688 &vq
->used
->flags
, r
);
1692 r
= vhost_update_avail_event(vq
, vq
->avail_idx
);
1694 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
1695 vhost_avail_event(vq
), r
);
1699 /* They could have slipped one in as we were doing that: make
1700 * sure it's written, then check again. */
1702 r
= __get_user(avail_idx
, &vq
->avail
->idx
);
1704 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
1705 &vq
->avail
->idx
, r
);
1709 return vhost16_to_cpu(vq
, avail_idx
) != vq
->avail_idx
;
1711 EXPORT_SYMBOL_GPL(vhost_enable_notify
);
1713 /* We don't need to be notified again. */
1714 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1718 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
1720 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
1721 if (!vhost_has_feature(vq
, VIRTIO_RING_F_EVENT_IDX
)) {
1722 r
= vhost_update_used_flags(vq
);
1724 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1725 &vq
->used
->flags
, r
);
1728 EXPORT_SYMBOL_GPL(vhost_disable_notify
);
1730 static int __init
vhost_init(void)
1735 static void __exit
vhost_exit(void)
1739 module_init(vhost_init
);
1740 module_exit(vhost_exit
);
1742 MODULE_VERSION("0.0.1");
1743 MODULE_LICENSE("GPL v2");
1744 MODULE_AUTHOR("Michael S. Tsirkin");
1745 MODULE_DESCRIPTION("Host kernel accelerator for virtio");