1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/virtio_net.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mutex.h>
20 #include <linux/workqueue.h>
21 #include <linux/rcupdate.h>
22 #include <linux/poll.h>
23 #include <linux/file.h>
24 #include <linux/highmem.h>
25 #include <linux/slab.h>
27 #include <linux/net.h>
28 #include <linux/if_packet.h>
29 #include <linux/if_arp.h>
36 VHOST_MEMORY_MAX_NREGIONS
= 64,
37 VHOST_MEMORY_F_LOG
= 0x1,
40 static struct workqueue_struct
*vhost_workqueue
;
42 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
45 struct vhost_poll
*poll
;
46 poll
= container_of(pt
, struct vhost_poll
, table
);
49 add_wait_queue(wqh
, &poll
->wait
);
52 static int vhost_poll_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
,
55 struct vhost_poll
*poll
;
56 poll
= container_of(wait
, struct vhost_poll
, wait
);
57 if (!((unsigned long)key
& poll
->mask
))
60 queue_work(vhost_workqueue
, &poll
->work
);
64 /* Init poll structure */
65 void vhost_poll_init(struct vhost_poll
*poll
, work_func_t func
,
68 INIT_WORK(&poll
->work
, func
);
69 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
70 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
74 /* Start polling a file. We add ourselves to file's wait queue. The caller must
75 * keep a reference to a file until after vhost_poll_stop is called. */
76 void vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
79 mask
= file
->f_op
->poll(file
, &poll
->table
);
81 vhost_poll_wakeup(&poll
->wait
, 0, 0, (void *)mask
);
84 /* Stop polling a file. After this function returns, it becomes safe to drop the
85 * file reference. You must also flush afterwards. */
86 void vhost_poll_stop(struct vhost_poll
*poll
)
88 remove_wait_queue(poll
->wqh
, &poll
->wait
);
91 /* Flush any work that has been scheduled. When calling this, don't hold any
92 * locks that are also used by the callback. */
93 void vhost_poll_flush(struct vhost_poll
*poll
)
95 flush_work(&poll
->work
);
98 void vhost_poll_queue(struct vhost_poll
*poll
)
100 queue_work(vhost_workqueue
, &poll
->work
);
103 static void vhost_vq_reset(struct vhost_dev
*dev
,
104 struct vhost_virtqueue
*vq
)
110 vq
->last_avail_idx
= 0;
112 vq
->last_used_idx
= 0;
115 vq
->log_used
= false;
116 vq
->log_addr
= -1ull;
118 vq
->private_data
= NULL
;
120 vq
->error_ctx
= NULL
;
128 long vhost_dev_init(struct vhost_dev
*dev
,
129 struct vhost_virtqueue
*vqs
, int nvqs
)
134 mutex_init(&dev
->mutex
);
136 dev
->log_file
= NULL
;
140 for (i
= 0; i
< dev
->nvqs
; ++i
) {
141 dev
->vqs
[i
].dev
= dev
;
142 mutex_init(&dev
->vqs
[i
].mutex
);
143 vhost_vq_reset(dev
, dev
->vqs
+ i
);
144 if (dev
->vqs
[i
].handle_kick
)
145 vhost_poll_init(&dev
->vqs
[i
].poll
,
146 dev
->vqs
[i
].handle_kick
,
152 /* Caller should have device mutex */
153 long vhost_dev_check_owner(struct vhost_dev
*dev
)
155 /* Are you the owner? If not, I don't think you mean to do that */
156 return dev
->mm
== current
->mm
? 0 : -EPERM
;
159 /* Caller should have device mutex */
160 static long vhost_dev_set_owner(struct vhost_dev
*dev
)
162 /* Is there an owner already? */
165 /* No owner, become one */
166 dev
->mm
= get_task_mm(current
);
170 /* Caller should have device mutex */
171 long vhost_dev_reset_owner(struct vhost_dev
*dev
)
173 struct vhost_memory
*memory
;
175 /* Restore memory to default empty mapping. */
176 memory
= kmalloc(offsetof(struct vhost_memory
, regions
), GFP_KERNEL
);
180 vhost_dev_cleanup(dev
);
182 memory
->nregions
= 0;
183 dev
->memory
= memory
;
187 /* Caller should have device mutex */
188 void vhost_dev_cleanup(struct vhost_dev
*dev
)
191 for (i
= 0; i
< dev
->nvqs
; ++i
) {
192 if (dev
->vqs
[i
].kick
&& dev
->vqs
[i
].handle_kick
) {
193 vhost_poll_stop(&dev
->vqs
[i
].poll
);
194 vhost_poll_flush(&dev
->vqs
[i
].poll
);
196 if (dev
->vqs
[i
].error_ctx
)
197 eventfd_ctx_put(dev
->vqs
[i
].error_ctx
);
198 if (dev
->vqs
[i
].error
)
199 fput(dev
->vqs
[i
].error
);
200 if (dev
->vqs
[i
].kick
)
201 fput(dev
->vqs
[i
].kick
);
202 if (dev
->vqs
[i
].call_ctx
)
203 eventfd_ctx_put(dev
->vqs
[i
].call_ctx
);
204 if (dev
->vqs
[i
].call
)
205 fput(dev
->vqs
[i
].call
);
206 vhost_vq_reset(dev
, dev
->vqs
+ i
);
209 eventfd_ctx_put(dev
->log_ctx
);
213 dev
->log_file
= NULL
;
214 /* No one will access memory at this point */
222 static int log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
224 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
225 /* Make sure 64 bit math will not overflow. */
226 if (a
> ULONG_MAX
- (unsigned long)log_base
||
227 a
+ (unsigned long)log_base
> ULONG_MAX
)
230 return access_ok(VERIFY_WRITE
, log_base
+ a
,
231 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
234 /* Caller should have vq mutex and device mutex. */
235 static int vq_memory_access_ok(void __user
*log_base
, struct vhost_memory
*mem
,
239 for (i
= 0; i
< mem
->nregions
; ++i
) {
240 struct vhost_memory_region
*m
= mem
->regions
+ i
;
241 unsigned long a
= m
->userspace_addr
;
242 if (m
->memory_size
> ULONG_MAX
)
244 else if (!access_ok(VERIFY_WRITE
, (void __user
*)a
,
247 else if (log_all
&& !log_access_ok(log_base
,
255 /* Can we switch to this memory table? */
256 /* Caller should have device mutex but not vq mutex */
257 static int memory_access_ok(struct vhost_dev
*d
, struct vhost_memory
*mem
,
261 for (i
= 0; i
< d
->nvqs
; ++i
) {
263 mutex_lock(&d
->vqs
[i
].mutex
);
264 /* If ring is inactive, will check when it's enabled. */
265 if (d
->vqs
[i
].private_data
)
266 ok
= vq_memory_access_ok(d
->vqs
[i
].log_base
, mem
,
270 mutex_unlock(&d
->vqs
[i
].mutex
);
277 static int vq_access_ok(unsigned int num
,
278 struct vring_desc __user
*desc
,
279 struct vring_avail __user
*avail
,
280 struct vring_used __user
*used
)
282 return access_ok(VERIFY_READ
, desc
, num
* sizeof *desc
) &&
283 access_ok(VERIFY_READ
, avail
,
284 sizeof *avail
+ num
* sizeof *avail
->ring
) &&
285 access_ok(VERIFY_WRITE
, used
,
286 sizeof *used
+ num
* sizeof *used
->ring
);
289 /* Can we log writes? */
290 /* Caller should have device mutex but not vq mutex */
291 int vhost_log_access_ok(struct vhost_dev
*dev
)
293 return memory_access_ok(dev
, dev
->memory
, 1);
296 /* Verify access for write logging. */
297 /* Caller should have vq mutex and device mutex */
298 static int vq_log_access_ok(struct vhost_virtqueue
*vq
, void __user
*log_base
)
300 return vq_memory_access_ok(log_base
, vq
->dev
->memory
,
301 vhost_has_feature(vq
->dev
, VHOST_F_LOG_ALL
)) &&
302 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
304 vq
->num
* sizeof *vq
->used
->ring
));
307 /* Can we start vq? */
308 /* Caller should have vq mutex and device mutex */
309 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
311 return vq_access_ok(vq
->num
, vq
->desc
, vq
->avail
, vq
->used
) &&
312 vq_log_access_ok(vq
, vq
->log_base
);
315 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
317 struct vhost_memory mem
, *newmem
, *oldmem
;
318 unsigned long size
= offsetof(struct vhost_memory
, regions
);
320 r
= copy_from_user(&mem
, m
, size
);
325 if (mem
.nregions
> VHOST_MEMORY_MAX_NREGIONS
)
327 newmem
= kmalloc(size
+ mem
.nregions
* sizeof *m
->regions
, GFP_KERNEL
);
331 memcpy(newmem
, &mem
, size
);
332 r
= copy_from_user(newmem
->regions
, m
->regions
,
333 mem
.nregions
* sizeof *m
->regions
);
339 if (!memory_access_ok(d
, newmem
, vhost_has_feature(d
, VHOST_F_LOG_ALL
)))
342 rcu_assign_pointer(d
->memory
, newmem
);
348 static int init_used(struct vhost_virtqueue
*vq
,
349 struct vring_used __user
*used
)
351 int r
= put_user(vq
->used_flags
, &used
->flags
);
354 return get_user(vq
->last_used_idx
, &used
->idx
);
357 static long vhost_set_vring(struct vhost_dev
*d
, int ioctl
, void __user
*argp
)
359 struct file
*eventfp
, *filep
= NULL
,
360 *pollstart
= NULL
, *pollstop
= NULL
;
361 struct eventfd_ctx
*ctx
= NULL
;
362 u32 __user
*idxp
= argp
;
363 struct vhost_virtqueue
*vq
;
364 struct vhost_vring_state s
;
365 struct vhost_vring_file f
;
366 struct vhost_vring_addr a
;
370 r
= get_user(idx
, idxp
);
378 mutex_lock(&vq
->mutex
);
381 case VHOST_SET_VRING_NUM
:
382 /* Resizing ring with an active backend?
383 * You don't want to do that. */
384 if (vq
->private_data
) {
388 r
= copy_from_user(&s
, argp
, sizeof s
);
391 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
397 case VHOST_SET_VRING_BASE
:
398 /* Moving base with an active backend?
399 * You don't want to do that. */
400 if (vq
->private_data
) {
404 r
= copy_from_user(&s
, argp
, sizeof s
);
407 if (s
.num
> 0xffff) {
411 vq
->last_avail_idx
= s
.num
;
412 /* Forget the cached index value. */
413 vq
->avail_idx
= vq
->last_avail_idx
;
415 case VHOST_GET_VRING_BASE
:
417 s
.num
= vq
->last_avail_idx
;
418 r
= copy_to_user(argp
, &s
, sizeof s
);
420 case VHOST_SET_VRING_ADDR
:
421 r
= copy_from_user(&a
, argp
, sizeof a
);
424 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
428 /* For 32bit, verify that the top 32bits of the user
429 data are set to zero. */
430 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
431 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
432 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
436 if ((a
.avail_user_addr
& (sizeof *vq
->avail
->ring
- 1)) ||
437 (a
.used_user_addr
& (sizeof *vq
->used
->ring
- 1)) ||
438 (a
.log_guest_addr
& (sizeof *vq
->used
->ring
- 1))) {
443 /* We only verify access here if backend is configured.
444 * If it is not, we don't as size might not have been setup.
445 * We will verify when backend is configured. */
446 if (vq
->private_data
) {
447 if (!vq_access_ok(vq
->num
,
448 (void __user
*)(unsigned long)a
.desc_user_addr
,
449 (void __user
*)(unsigned long)a
.avail_user_addr
,
450 (void __user
*)(unsigned long)a
.used_user_addr
)) {
455 /* Also validate log access for used ring if enabled. */
456 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
457 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
459 vq
->num
* sizeof *vq
->used
->ring
)) {
465 r
= init_used(vq
, (struct vring_used __user
*)(unsigned long)
469 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
470 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
471 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
472 vq
->log_addr
= a
.log_guest_addr
;
473 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
475 case VHOST_SET_VRING_KICK
:
476 r
= copy_from_user(&f
, argp
, sizeof f
);
479 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
480 if (IS_ERR(eventfp
)) {
481 r
= PTR_ERR(eventfp
);
484 if (eventfp
!= vq
->kick
) {
485 pollstop
= filep
= vq
->kick
;
486 pollstart
= vq
->kick
= eventfp
;
490 case VHOST_SET_VRING_CALL
:
491 r
= copy_from_user(&f
, argp
, sizeof f
);
494 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
495 if (IS_ERR(eventfp
)) {
496 r
= PTR_ERR(eventfp
);
499 if (eventfp
!= vq
->call
) {
503 vq
->call_ctx
= eventfp
?
504 eventfd_ctx_fileget(eventfp
) : NULL
;
508 case VHOST_SET_VRING_ERR
:
509 r
= copy_from_user(&f
, argp
, sizeof f
);
512 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
513 if (IS_ERR(eventfp
)) {
514 r
= PTR_ERR(eventfp
);
517 if (eventfp
!= vq
->error
) {
521 vq
->error_ctx
= eventfp
?
522 eventfd_ctx_fileget(eventfp
) : NULL
;
530 if (pollstop
&& vq
->handle_kick
)
531 vhost_poll_stop(&vq
->poll
);
534 eventfd_ctx_put(ctx
);
538 if (pollstart
&& vq
->handle_kick
)
539 vhost_poll_start(&vq
->poll
, vq
->kick
);
541 mutex_unlock(&vq
->mutex
);
543 if (pollstop
&& vq
->handle_kick
)
544 vhost_poll_flush(&vq
->poll
);
548 /* Caller must have device mutex */
549 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, unsigned long arg
)
551 void __user
*argp
= (void __user
*)arg
;
552 struct file
*eventfp
, *filep
= NULL
;
553 struct eventfd_ctx
*ctx
= NULL
;
558 /* If you are not the owner, you can become one */
559 if (ioctl
== VHOST_SET_OWNER
) {
560 r
= vhost_dev_set_owner(d
);
564 /* You must be the owner to do anything else */
565 r
= vhost_dev_check_owner(d
);
570 case VHOST_SET_MEM_TABLE
:
571 r
= vhost_set_memory(d
, argp
);
573 case VHOST_SET_LOG_BASE
:
574 r
= copy_from_user(&p
, argp
, sizeof p
);
577 if ((u64
)(unsigned long)p
!= p
) {
581 for (i
= 0; i
< d
->nvqs
; ++i
) {
582 struct vhost_virtqueue
*vq
;
583 void __user
*base
= (void __user
*)(unsigned long)p
;
585 mutex_lock(&vq
->mutex
);
586 /* If ring is inactive, will check when it's enabled. */
587 if (vq
->private_data
&& !vq_log_access_ok(vq
, base
))
591 mutex_unlock(&vq
->mutex
);
594 case VHOST_SET_LOG_FD
:
595 r
= get_user(fd
, (int __user
*)argp
);
598 eventfp
= fd
== -1 ? NULL
: eventfd_fget(fd
);
599 if (IS_ERR(eventfp
)) {
600 r
= PTR_ERR(eventfp
);
603 if (eventfp
!= d
->log_file
) {
606 d
->log_ctx
= eventfp
?
607 eventfd_ctx_fileget(eventfp
) : NULL
;
610 for (i
= 0; i
< d
->nvqs
; ++i
) {
611 mutex_lock(&d
->vqs
[i
].mutex
);
612 d
->vqs
[i
].log_ctx
= d
->log_ctx
;
613 mutex_unlock(&d
->vqs
[i
].mutex
);
616 eventfd_ctx_put(ctx
);
621 r
= vhost_set_vring(d
, ioctl
, argp
);
628 static const struct vhost_memory_region
*find_region(struct vhost_memory
*mem
,
629 __u64 addr
, __u32 len
)
631 struct vhost_memory_region
*reg
;
633 /* linear search is not brilliant, but we really have on the order of 6
634 * regions in practice */
635 for (i
= 0; i
< mem
->nregions
; ++i
) {
636 reg
= mem
->regions
+ i
;
637 if (reg
->guest_phys_addr
<= addr
&&
638 reg
->guest_phys_addr
+ reg
->memory_size
- 1 >= addr
)
644 /* TODO: This is really inefficient. We need something like get_user()
645 * (instruction directly accesses the data, with an exception table entry
646 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
648 static int set_bit_to_user(int nr
, void __user
*addr
)
650 unsigned long log
= (unsigned long)addr
;
653 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
655 r
= get_user_pages_fast(log
, 1, 1, &page
);
659 base
= kmap_atomic(page
, KM_USER0
);
661 kunmap_atomic(base
, KM_USER0
);
662 set_page_dirty_lock(page
);
667 static int log_write(void __user
*log_base
,
668 u64 write_address
, u64 write_length
)
673 write_address
/= VHOST_PAGE_SIZE
;
675 u64 base
= (u64
)(unsigned long)log_base
;
676 u64 log
= base
+ write_address
/ 8;
677 int bit
= write_address
% 8;
678 if ((u64
)(unsigned long)log
!= log
)
680 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
683 if (write_length
<= VHOST_PAGE_SIZE
)
685 write_length
-= VHOST_PAGE_SIZE
;
686 write_address
+= VHOST_PAGE_SIZE
;
691 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
692 unsigned int log_num
, u64 len
)
696 /* Make sure data written is seen before log. */
698 for (i
= 0; i
< log_num
; ++i
) {
699 u64 l
= min(log
[i
].len
, len
);
700 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
708 eventfd_signal(vq
->log_ctx
, 1);
709 /* Length written exceeds what we have stored. This is a bug. */
714 int translate_desc(struct vhost_dev
*dev
, u64 addr
, u32 len
,
715 struct iovec iov
[], int iov_size
)
717 const struct vhost_memory_region
*reg
;
718 struct vhost_memory
*mem
;
725 mem
= rcu_dereference(dev
->memory
);
726 while ((u64
)len
> s
) {
728 if (ret
>= iov_size
) {
732 reg
= find_region(mem
, addr
, len
);
738 size
= reg
->memory_size
- addr
+ reg
->guest_phys_addr
;
739 _iov
->iov_len
= min((u64
)len
, size
);
740 _iov
->iov_base
= (void *)(unsigned long)
741 (reg
->userspace_addr
+ addr
- reg
->guest_phys_addr
);
751 /* Each buffer in the virtqueues is actually a chain of descriptors. This
752 * function returns the next descriptor in the chain,
753 * or -1U if we're at the end. */
754 static unsigned next_desc(struct vring_desc
*desc
)
758 /* If this descriptor says it doesn't chain, we're done. */
759 if (!(desc
->flags
& VRING_DESC_F_NEXT
))
762 /* Check they're not leading us off end of descriptors. */
764 /* Make sure compiler knows to grab that: we don't want it changing! */
765 /* We will use the result as an index in an array, so most
766 * architectures only need a compiler barrier here. */
767 read_barrier_depends();
772 static unsigned get_indirect(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
773 struct iovec iov
[], unsigned int iov_size
,
774 unsigned int *out_num
, unsigned int *in_num
,
775 struct vhost_log
*log
, unsigned int *log_num
,
776 struct vring_desc
*indirect
)
778 struct vring_desc desc
;
779 unsigned int i
= 0, count
, found
= 0;
783 if (indirect
->len
% sizeof desc
) {
784 vq_err(vq
, "Invalid length in indirect descriptor: "
785 "len 0x%llx not multiple of 0x%zx\n",
786 (unsigned long long)indirect
->len
,
791 ret
= translate_desc(dev
, indirect
->addr
, indirect
->len
, vq
->indirect
,
792 ARRAY_SIZE(vq
->indirect
));
794 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
798 /* We will use the result as an address to read from, so most
799 * architectures only need a compiler barrier here. */
800 read_barrier_depends();
802 count
= indirect
->len
/ sizeof desc
;
803 /* Buffers are chained via a 16 bit next field, so
804 * we can have at most 2^16 of these. */
805 if (count
> USHORT_MAX
+ 1) {
806 vq_err(vq
, "Indirect buffer length too big: %d\n",
812 unsigned iov_count
= *in_num
+ *out_num
;
813 if (++found
> count
) {
814 vq_err(vq
, "Loop detected: last one at %u "
815 "indirect size %u\n",
819 if (memcpy_fromiovec((unsigned char *)&desc
, vq
->indirect
,
821 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
822 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
825 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
826 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
827 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
831 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
832 iov_size
- iov_count
);
834 vq_err(vq
, "Translation failure %d indirect idx %d\n",
838 /* If this is an input descriptor, increment that count. */
839 if (desc
.flags
& VRING_DESC_F_WRITE
) {
842 log
[*log_num
].addr
= desc
.addr
;
843 log
[*log_num
].len
= desc
.len
;
847 /* If it's an output descriptor, they're all supposed
848 * to come before any input descriptors. */
850 vq_err(vq
, "Indirect descriptor "
851 "has out after in: idx %d\n", i
);
856 } while ((i
= next_desc(&desc
)) != -1);
860 /* This looks in the virtqueue and for the first available buffer, and converts
861 * it to an iovec for convenient access. Since descriptors consist of some
862 * number of output then some number of input descriptors, it's actually two
863 * iovecs, but we pack them into one and note how many of each there were.
865 * This function returns the descriptor number found, or vq->num (which
866 * is never a valid descriptor number) if none was found. */
867 unsigned vhost_get_vq_desc(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
868 struct iovec iov
[], unsigned int iov_size
,
869 unsigned int *out_num
, unsigned int *in_num
,
870 struct vhost_log
*log
, unsigned int *log_num
)
872 struct vring_desc desc
;
873 unsigned int i
, head
, found
= 0;
877 /* Check it isn't doing very strange things with descriptor numbers. */
878 last_avail_idx
= vq
->last_avail_idx
;
879 if (get_user(vq
->avail_idx
, &vq
->avail
->idx
)) {
880 vq_err(vq
, "Failed to access avail idx at %p\n",
885 if ((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
) {
886 vq_err(vq
, "Guest moved used index from %u to %u",
887 last_avail_idx
, vq
->avail_idx
);
891 /* If there's nothing new since last we looked, return invalid. */
892 if (vq
->avail_idx
== last_avail_idx
)
895 /* Only get avail ring entries after they have been exposed by guest. */
898 /* Grab the next descriptor number they're advertising, and increment
899 * the index we've seen. */
900 if (get_user(head
, &vq
->avail
->ring
[last_avail_idx
% vq
->num
])) {
901 vq_err(vq
, "Failed to read head: idx %d address %p\n",
903 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
907 /* If their number is silly, that's an error. */
908 if (head
>= vq
->num
) {
909 vq_err(vq
, "Guest says index %u > %u is available",
914 /* When we start there are none of either input nor output. */
915 *out_num
= *in_num
= 0;
921 unsigned iov_count
= *in_num
+ *out_num
;
923 vq_err(vq
, "Desc index is %u > %u, head = %u",
927 if (++found
> vq
->num
) {
928 vq_err(vq
, "Loop detected: last one at %u "
929 "vq size %u head %u\n",
933 ret
= copy_from_user(&desc
, vq
->desc
+ i
, sizeof desc
);
935 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
939 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
940 ret
= get_indirect(dev
, vq
, iov
, iov_size
,
942 log
, log_num
, &desc
);
944 vq_err(vq
, "Failure detected "
945 "in indirect descriptor at idx %d\n", i
);
951 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
952 iov_size
- iov_count
);
954 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
958 if (desc
.flags
& VRING_DESC_F_WRITE
) {
959 /* If this is an input descriptor,
960 * increment that count. */
963 log
[*log_num
].addr
= desc
.addr
;
964 log
[*log_num
].len
= desc
.len
;
968 /* If it's an output descriptor, they're all supposed
969 * to come before any input descriptors. */
971 vq_err(vq
, "Descriptor has out after in: "
977 } while ((i
= next_desc(&desc
)) != -1);
979 /* On success, increment avail index. */
980 vq
->last_avail_idx
++;
984 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
985 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
)
987 vq
->last_avail_idx
--;
990 /* After we've used one of their buffers, we tell them about it. We'll then
991 * want to notify the guest, using eventfd. */
992 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
994 struct vring_used_elem
*used
;
996 /* The virtqueue contains a ring of used buffers. Get a pointer to the
997 * next entry in that used ring. */
998 used
= &vq
->used
->ring
[vq
->last_used_idx
% vq
->num
];
999 if (put_user(head
, &used
->id
)) {
1000 vq_err(vq
, "Failed to write used id");
1003 if (put_user(len
, &used
->len
)) {
1004 vq_err(vq
, "Failed to write used len");
1007 /* Make sure buffer is written before we update index. */
1009 if (put_user(vq
->last_used_idx
+ 1, &vq
->used
->idx
)) {
1010 vq_err(vq
, "Failed to increment used idx");
1013 if (unlikely(vq
->log_used
)) {
1014 /* Make sure data is seen before log. */
1016 /* Log used ring entry write. */
1017 log_write(vq
->log_base
,
1018 vq
->log_addr
+ ((void *)used
- (void *)vq
->used
),
1020 /* Log used index update. */
1021 log_write(vq
->log_base
,
1022 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1023 sizeof vq
->used
->idx
);
1025 eventfd_signal(vq
->log_ctx
, 1);
1027 vq
->last_used_idx
++;
1031 /* This actually signals the guest, using eventfd. */
1032 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1035 if (get_user(flags
, &vq
->avail
->flags
)) {
1036 vq_err(vq
, "Failed to get flags");
1040 /* If they don't want an interrupt, don't signal, unless empty. */
1041 if ((flags
& VRING_AVAIL_F_NO_INTERRUPT
) &&
1042 (vq
->avail_idx
!= vq
->last_avail_idx
||
1043 !vhost_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
)))
1046 /* Signal the Guest tell them we used something up. */
1048 eventfd_signal(vq
->call_ctx
, 1);
1051 /* And here's the combo meal deal. Supersize me! */
1052 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
1053 struct vhost_virtqueue
*vq
,
1054 unsigned int head
, int len
)
1056 vhost_add_used(vq
, head
, len
);
1057 vhost_signal(dev
, vq
);
1060 /* OK, now we need to know about added descriptors. */
1061 bool vhost_enable_notify(struct vhost_virtqueue
*vq
)
1065 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
1067 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
1068 r
= put_user(vq
->used_flags
, &vq
->used
->flags
);
1070 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1071 &vq
->used
->flags
, r
);
1074 /* They could have slipped one in as we were doing that: make
1075 * sure it's written, then check again. */
1077 r
= get_user(avail_idx
, &vq
->avail
->idx
);
1079 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
1080 &vq
->avail
->idx
, r
);
1084 return avail_idx
!= vq
->last_avail_idx
;
1087 /* We don't need to be notified again. */
1088 void vhost_disable_notify(struct vhost_virtqueue
*vq
)
1091 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
1093 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
1094 r
= put_user(vq
->used_flags
, &vq
->used
->flags
);
1096 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1097 &vq
->used
->flags
, r
);
1100 int vhost_init(void)
1102 vhost_workqueue
= create_singlethread_workqueue("vhost");
1103 if (!vhost_workqueue
)
1108 void vhost_cleanup(void)
1110 destroy_workqueue(vhost_workqueue
);