1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 * Author: Michael S. Tsirkin <mst@redhat.com>
6 * Inspiration, some code, and most witty comments come from
7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 * This work is licensed under the terms of the GNU GPL, version 2.
11 * Generic code for virtio server in host kernel.
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/virtio_net.h>
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/rcupdate.h>
22 #include <linux/poll.h>
23 #include <linux/file.h>
24 #include <linux/highmem.h>
25 #include <linux/slab.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
29 #include <linux/net.h>
30 #include <linux/if_packet.h>
31 #include <linux/if_arp.h>
36 VHOST_MEMORY_MAX_NREGIONS
= 64,
37 VHOST_MEMORY_F_LOG
= 0x1,
40 static unsigned vhost_zcopy_mask __read_mostly
;
42 #define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
43 #define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
45 static void vhost_poll_func(struct file
*file
, wait_queue_head_t
*wqh
,
48 struct vhost_poll
*poll
;
50 poll
= container_of(pt
, struct vhost_poll
, table
);
52 add_wait_queue(wqh
, &poll
->wait
);
55 static int vhost_poll_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
,
58 struct vhost_poll
*poll
= container_of(wait
, struct vhost_poll
, wait
);
60 if (!((unsigned long)key
& poll
->mask
))
63 vhost_poll_queue(poll
);
67 static void vhost_work_init(struct vhost_work
*work
, vhost_work_fn_t fn
)
69 INIT_LIST_HEAD(&work
->node
);
71 init_waitqueue_head(&work
->done
);
73 work
->queue_seq
= work
->done_seq
= 0;
76 /* Init poll structure */
77 void vhost_poll_init(struct vhost_poll
*poll
, vhost_work_fn_t fn
,
78 unsigned long mask
, struct vhost_dev
*dev
)
80 init_waitqueue_func_entry(&poll
->wait
, vhost_poll_wakeup
);
81 init_poll_funcptr(&poll
->table
, vhost_poll_func
);
85 vhost_work_init(&poll
->work
, fn
);
88 /* Start polling a file. We add ourselves to file's wait queue. The caller must
89 * keep a reference to a file until after vhost_poll_stop is called. */
90 void vhost_poll_start(struct vhost_poll
*poll
, struct file
*file
)
94 mask
= file
->f_op
->poll(file
, &poll
->table
);
96 vhost_poll_wakeup(&poll
->wait
, 0, 0, (void *)mask
);
99 /* Stop polling a file. After this function returns, it becomes safe to drop the
100 * file reference. You must also flush afterwards. */
101 void vhost_poll_stop(struct vhost_poll
*poll
)
103 remove_wait_queue(poll
->wqh
, &poll
->wait
);
106 static bool vhost_work_seq_done(struct vhost_dev
*dev
, struct vhost_work
*work
,
111 spin_lock_irq(&dev
->work_lock
);
112 left
= seq
- work
->done_seq
;
113 spin_unlock_irq(&dev
->work_lock
);
117 static void vhost_work_flush(struct vhost_dev
*dev
, struct vhost_work
*work
)
122 spin_lock_irq(&dev
->work_lock
);
123 seq
= work
->queue_seq
;
125 spin_unlock_irq(&dev
->work_lock
);
126 wait_event(work
->done
, vhost_work_seq_done(dev
, work
, seq
));
127 spin_lock_irq(&dev
->work_lock
);
128 flushing
= --work
->flushing
;
129 spin_unlock_irq(&dev
->work_lock
);
130 BUG_ON(flushing
< 0);
133 /* Flush any work that has been scheduled. When calling this, don't hold any
134 * locks that are also used by the callback. */
135 void vhost_poll_flush(struct vhost_poll
*poll
)
137 vhost_work_flush(poll
->dev
, &poll
->work
);
140 static inline void vhost_work_queue(struct vhost_dev
*dev
,
141 struct vhost_work
*work
)
145 spin_lock_irqsave(&dev
->work_lock
, flags
);
146 if (list_empty(&work
->node
)) {
147 list_add_tail(&work
->node
, &dev
->work_list
);
149 wake_up_process(dev
->worker
);
151 spin_unlock_irqrestore(&dev
->work_lock
, flags
);
154 void vhost_poll_queue(struct vhost_poll
*poll
)
156 vhost_work_queue(poll
->dev
, &poll
->work
);
159 static void vhost_vq_reset(struct vhost_dev
*dev
,
160 struct vhost_virtqueue
*vq
)
166 vq
->last_avail_idx
= 0;
168 vq
->last_used_idx
= 0;
169 vq
->signalled_used
= 0;
170 vq
->signalled_used_valid
= false;
172 vq
->log_used
= false;
173 vq
->log_addr
= -1ull;
176 vq
->private_data
= NULL
;
178 vq
->error_ctx
= NULL
;
189 static int vhost_worker(void *data
)
191 struct vhost_dev
*dev
= data
;
192 struct vhost_work
*work
= NULL
;
193 unsigned uninitialized_var(seq
);
198 /* mb paired w/ kthread_stop */
199 set_current_state(TASK_INTERRUPTIBLE
);
201 spin_lock_irq(&dev
->work_lock
);
203 work
->done_seq
= seq
;
205 wake_up_all(&work
->done
);
208 if (kthread_should_stop()) {
209 spin_unlock_irq(&dev
->work_lock
);
210 __set_current_state(TASK_RUNNING
);
213 if (!list_empty(&dev
->work_list
)) {
214 work
= list_first_entry(&dev
->work_list
,
215 struct vhost_work
, node
);
216 list_del_init(&work
->node
);
217 seq
= work
->queue_seq
;
220 spin_unlock_irq(&dev
->work_lock
);
223 __set_current_state(TASK_RUNNING
);
233 static void vhost_vq_free_iovecs(struct vhost_virtqueue
*vq
)
241 kfree(vq
->ubuf_info
);
242 vq
->ubuf_info
= NULL
;
245 void vhost_enable_zcopy(int vq
)
247 vhost_zcopy_mask
|= 0x1 << vq
;
250 /* Helper to allocate iovec buffers for all vqs. */
251 static long vhost_dev_alloc_iovecs(struct vhost_dev
*dev
)
256 for (i
= 0; i
< dev
->nvqs
; ++i
) {
257 dev
->vqs
[i
].indirect
= kmalloc(sizeof *dev
->vqs
[i
].indirect
*
258 UIO_MAXIOV
, GFP_KERNEL
);
259 dev
->vqs
[i
].log
= kmalloc(sizeof *dev
->vqs
[i
].log
* UIO_MAXIOV
,
261 dev
->vqs
[i
].heads
= kmalloc(sizeof *dev
->vqs
[i
].heads
*
262 UIO_MAXIOV
, GFP_KERNEL
);
263 zcopy
= vhost_zcopy_mask
& (0x1 << i
);
265 dev
->vqs
[i
].ubuf_info
=
266 kmalloc(sizeof *dev
->vqs
[i
].ubuf_info
*
267 UIO_MAXIOV
, GFP_KERNEL
);
268 if (!dev
->vqs
[i
].indirect
|| !dev
->vqs
[i
].log
||
269 !dev
->vqs
[i
].heads
||
270 (zcopy
&& !dev
->vqs
[i
].ubuf_info
))
277 vhost_vq_free_iovecs(&dev
->vqs
[i
]);
281 static void vhost_dev_free_iovecs(struct vhost_dev
*dev
)
285 for (i
= 0; i
< dev
->nvqs
; ++i
)
286 vhost_vq_free_iovecs(&dev
->vqs
[i
]);
289 long vhost_dev_init(struct vhost_dev
*dev
,
290 struct vhost_virtqueue
*vqs
, int nvqs
)
296 mutex_init(&dev
->mutex
);
298 dev
->log_file
= NULL
;
301 spin_lock_init(&dev
->work_lock
);
302 INIT_LIST_HEAD(&dev
->work_list
);
305 for (i
= 0; i
< dev
->nvqs
; ++i
) {
306 dev
->vqs
[i
].log
= NULL
;
307 dev
->vqs
[i
].indirect
= NULL
;
308 dev
->vqs
[i
].heads
= NULL
;
309 dev
->vqs
[i
].ubuf_info
= NULL
;
310 dev
->vqs
[i
].dev
= dev
;
311 mutex_init(&dev
->vqs
[i
].mutex
);
312 vhost_vq_reset(dev
, dev
->vqs
+ i
);
313 if (dev
->vqs
[i
].handle_kick
)
314 vhost_poll_init(&dev
->vqs
[i
].poll
,
315 dev
->vqs
[i
].handle_kick
, POLLIN
, dev
);
321 /* Caller should have device mutex */
322 long vhost_dev_check_owner(struct vhost_dev
*dev
)
324 /* Are you the owner? If not, I don't think you mean to do that */
325 return dev
->mm
== current
->mm
? 0 : -EPERM
;
328 struct vhost_attach_cgroups_struct
{
329 struct vhost_work work
;
330 struct task_struct
*owner
;
334 static void vhost_attach_cgroups_work(struct vhost_work
*work
)
336 struct vhost_attach_cgroups_struct
*s
;
338 s
= container_of(work
, struct vhost_attach_cgroups_struct
, work
);
339 s
->ret
= cgroup_attach_task_all(s
->owner
, current
);
342 static int vhost_attach_cgroups(struct vhost_dev
*dev
)
344 struct vhost_attach_cgroups_struct attach
;
346 attach
.owner
= current
;
347 vhost_work_init(&attach
.work
, vhost_attach_cgroups_work
);
348 vhost_work_queue(dev
, &attach
.work
);
349 vhost_work_flush(dev
, &attach
.work
);
353 /* Caller should have device mutex */
354 static long vhost_dev_set_owner(struct vhost_dev
*dev
)
356 struct task_struct
*worker
;
359 /* Is there an owner already? */
365 /* No owner, become one */
366 dev
->mm
= get_task_mm(current
);
367 worker
= kthread_create(vhost_worker
, dev
, "vhost-%d", current
->pid
);
368 if (IS_ERR(worker
)) {
369 err
= PTR_ERR(worker
);
373 dev
->worker
= worker
;
374 wake_up_process(worker
); /* avoid contributing to loadavg */
376 err
= vhost_attach_cgroups(dev
);
380 err
= vhost_dev_alloc_iovecs(dev
);
386 kthread_stop(worker
);
396 /* Caller should have device mutex */
397 long vhost_dev_reset_owner(struct vhost_dev
*dev
)
399 struct vhost_memory
*memory
;
401 /* Restore memory to default empty mapping. */
402 memory
= kmalloc(offsetof(struct vhost_memory
, regions
), GFP_KERNEL
);
406 vhost_dev_cleanup(dev
);
408 memory
->nregions
= 0;
409 RCU_INIT_POINTER(dev
->memory
, memory
);
413 /* In case of DMA done not in order in lower device driver for some reason.
414 * upend_idx is used to track end of used idx, done_idx is used to track head
415 * of used idx. Once lower device DMA done contiguously, we will signal KVM
418 int vhost_zerocopy_signal_used(struct vhost_virtqueue
*vq
)
423 for (i
= vq
->done_idx
; i
!= vq
->upend_idx
; i
= (i
+ 1) % UIO_MAXIOV
) {
424 if ((vq
->heads
[i
].len
== VHOST_DMA_DONE_LEN
)) {
425 vq
->heads
[i
].len
= VHOST_DMA_CLEAR_LEN
;
426 vhost_add_used_and_signal(vq
->dev
, vq
,
437 /* Caller should have device mutex */
438 void vhost_dev_cleanup(struct vhost_dev
*dev
)
442 for (i
= 0; i
< dev
->nvqs
; ++i
) {
443 if (dev
->vqs
[i
].kick
&& dev
->vqs
[i
].handle_kick
) {
444 vhost_poll_stop(&dev
->vqs
[i
].poll
);
445 vhost_poll_flush(&dev
->vqs
[i
].poll
);
447 /* Wait for all lower device DMAs done. */
448 if (dev
->vqs
[i
].ubufs
)
449 vhost_ubuf_put_and_wait(dev
->vqs
[i
].ubufs
);
451 /* Signal guest as appropriate. */
452 vhost_zerocopy_signal_used(&dev
->vqs
[i
]);
454 if (dev
->vqs
[i
].error_ctx
)
455 eventfd_ctx_put(dev
->vqs
[i
].error_ctx
);
456 if (dev
->vqs
[i
].error
)
457 fput(dev
->vqs
[i
].error
);
458 if (dev
->vqs
[i
].kick
)
459 fput(dev
->vqs
[i
].kick
);
460 if (dev
->vqs
[i
].call_ctx
)
461 eventfd_ctx_put(dev
->vqs
[i
].call_ctx
);
462 if (dev
->vqs
[i
].call
)
463 fput(dev
->vqs
[i
].call
);
464 vhost_vq_reset(dev
, dev
->vqs
+ i
);
466 vhost_dev_free_iovecs(dev
);
468 eventfd_ctx_put(dev
->log_ctx
);
472 dev
->log_file
= NULL
;
473 /* No one will access memory at this point */
474 kfree(rcu_dereference_protected(dev
->memory
,
475 lockdep_is_held(&dev
->mutex
)));
476 RCU_INIT_POINTER(dev
->memory
, NULL
);
477 WARN_ON(!list_empty(&dev
->work_list
));
479 kthread_stop(dev
->worker
);
487 static int log_access_ok(void __user
*log_base
, u64 addr
, unsigned long sz
)
489 u64 a
= addr
/ VHOST_PAGE_SIZE
/ 8;
491 /* Make sure 64 bit math will not overflow. */
492 if (a
> ULONG_MAX
- (unsigned long)log_base
||
493 a
+ (unsigned long)log_base
> ULONG_MAX
)
496 return access_ok(VERIFY_WRITE
, log_base
+ a
,
497 (sz
+ VHOST_PAGE_SIZE
* 8 - 1) / VHOST_PAGE_SIZE
/ 8);
500 /* Caller should have vq mutex and device mutex. */
501 static int vq_memory_access_ok(void __user
*log_base
, struct vhost_memory
*mem
,
509 for (i
= 0; i
< mem
->nregions
; ++i
) {
510 struct vhost_memory_region
*m
= mem
->regions
+ i
;
511 unsigned long a
= m
->userspace_addr
;
512 if (m
->memory_size
> ULONG_MAX
)
514 else if (!access_ok(VERIFY_WRITE
, (void __user
*)a
,
517 else if (log_all
&& !log_access_ok(log_base
,
525 /* Can we switch to this memory table? */
526 /* Caller should have device mutex but not vq mutex */
527 static int memory_access_ok(struct vhost_dev
*d
, struct vhost_memory
*mem
,
532 for (i
= 0; i
< d
->nvqs
; ++i
) {
534 mutex_lock(&d
->vqs
[i
].mutex
);
535 /* If ring is inactive, will check when it's enabled. */
536 if (d
->vqs
[i
].private_data
)
537 ok
= vq_memory_access_ok(d
->vqs
[i
].log_base
, mem
,
541 mutex_unlock(&d
->vqs
[i
].mutex
);
548 static int vq_access_ok(struct vhost_dev
*d
, unsigned int num
,
549 struct vring_desc __user
*desc
,
550 struct vring_avail __user
*avail
,
551 struct vring_used __user
*used
)
553 size_t s
= vhost_has_feature(d
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
554 return access_ok(VERIFY_READ
, desc
, num
* sizeof *desc
) &&
555 access_ok(VERIFY_READ
, avail
,
556 sizeof *avail
+ num
* sizeof *avail
->ring
+ s
) &&
557 access_ok(VERIFY_WRITE
, used
,
558 sizeof *used
+ num
* sizeof *used
->ring
+ s
);
561 /* Can we log writes? */
562 /* Caller should have device mutex but not vq mutex */
563 int vhost_log_access_ok(struct vhost_dev
*dev
)
565 struct vhost_memory
*mp
;
567 mp
= rcu_dereference_protected(dev
->memory
,
568 lockdep_is_held(&dev
->mutex
));
569 return memory_access_ok(dev
, mp
, 1);
572 /* Verify access for write logging. */
573 /* Caller should have vq mutex and device mutex */
574 static int vq_log_access_ok(struct vhost_dev
*d
, struct vhost_virtqueue
*vq
,
575 void __user
*log_base
)
577 struct vhost_memory
*mp
;
578 size_t s
= vhost_has_feature(d
, VIRTIO_RING_F_EVENT_IDX
) ? 2 : 0;
580 mp
= rcu_dereference_protected(vq
->dev
->memory
,
581 lockdep_is_held(&vq
->mutex
));
582 return vq_memory_access_ok(log_base
, mp
,
583 vhost_has_feature(vq
->dev
, VHOST_F_LOG_ALL
)) &&
584 (!vq
->log_used
|| log_access_ok(log_base
, vq
->log_addr
,
586 vq
->num
* sizeof *vq
->used
->ring
+ s
));
589 /* Can we start vq? */
590 /* Caller should have vq mutex and device mutex */
591 int vhost_vq_access_ok(struct vhost_virtqueue
*vq
)
593 return vq_access_ok(vq
->dev
, vq
->num
, vq
->desc
, vq
->avail
, vq
->used
) &&
594 vq_log_access_ok(vq
->dev
, vq
, vq
->log_base
);
597 static long vhost_set_memory(struct vhost_dev
*d
, struct vhost_memory __user
*m
)
599 struct vhost_memory mem
, *newmem
, *oldmem
;
600 unsigned long size
= offsetof(struct vhost_memory
, regions
);
602 if (copy_from_user(&mem
, m
, size
))
606 if (mem
.nregions
> VHOST_MEMORY_MAX_NREGIONS
)
608 newmem
= kmalloc(size
+ mem
.nregions
* sizeof *m
->regions
, GFP_KERNEL
);
612 memcpy(newmem
, &mem
, size
);
613 if (copy_from_user(newmem
->regions
, m
->regions
,
614 mem
.nregions
* sizeof *m
->regions
)) {
619 if (!memory_access_ok(d
, newmem
,
620 vhost_has_feature(d
, VHOST_F_LOG_ALL
))) {
624 oldmem
= rcu_dereference_protected(d
->memory
,
625 lockdep_is_held(&d
->mutex
));
626 rcu_assign_pointer(d
->memory
, newmem
);
632 static int init_used(struct vhost_virtqueue
*vq
,
633 struct vring_used __user
*used
)
635 int r
= put_user(vq
->used_flags
, &used
->flags
);
639 vq
->signalled_used_valid
= false;
640 return get_user(vq
->last_used_idx
, &used
->idx
);
643 static long vhost_set_vring(struct vhost_dev
*d
, int ioctl
, void __user
*argp
)
645 struct file
*eventfp
, *filep
= NULL
,
646 *pollstart
= NULL
, *pollstop
= NULL
;
647 struct eventfd_ctx
*ctx
= NULL
;
648 u32 __user
*idxp
= argp
;
649 struct vhost_virtqueue
*vq
;
650 struct vhost_vring_state s
;
651 struct vhost_vring_file f
;
652 struct vhost_vring_addr a
;
656 r
= get_user(idx
, idxp
);
664 mutex_lock(&vq
->mutex
);
667 case VHOST_SET_VRING_NUM
:
668 /* Resizing ring with an active backend?
669 * You don't want to do that. */
670 if (vq
->private_data
) {
674 if (copy_from_user(&s
, argp
, sizeof s
)) {
678 if (!s
.num
|| s
.num
> 0xffff || (s
.num
& (s
.num
- 1))) {
684 case VHOST_SET_VRING_BASE
:
685 /* Moving base with an active backend?
686 * You don't want to do that. */
687 if (vq
->private_data
) {
691 if (copy_from_user(&s
, argp
, sizeof s
)) {
695 if (s
.num
> 0xffff) {
699 vq
->last_avail_idx
= s
.num
;
700 /* Forget the cached index value. */
701 vq
->avail_idx
= vq
->last_avail_idx
;
703 case VHOST_GET_VRING_BASE
:
705 s
.num
= vq
->last_avail_idx
;
706 if (copy_to_user(argp
, &s
, sizeof s
))
709 case VHOST_SET_VRING_ADDR
:
710 if (copy_from_user(&a
, argp
, sizeof a
)) {
714 if (a
.flags
& ~(0x1 << VHOST_VRING_F_LOG
)) {
718 /* For 32bit, verify that the top 32bits of the user
719 data are set to zero. */
720 if ((u64
)(unsigned long)a
.desc_user_addr
!= a
.desc_user_addr
||
721 (u64
)(unsigned long)a
.used_user_addr
!= a
.used_user_addr
||
722 (u64
)(unsigned long)a
.avail_user_addr
!= a
.avail_user_addr
) {
726 if ((a
.avail_user_addr
& (sizeof *vq
->avail
->ring
- 1)) ||
727 (a
.used_user_addr
& (sizeof *vq
->used
->ring
- 1)) ||
728 (a
.log_guest_addr
& (sizeof *vq
->used
->ring
- 1))) {
733 /* We only verify access here if backend is configured.
734 * If it is not, we don't as size might not have been setup.
735 * We will verify when backend is configured. */
736 if (vq
->private_data
) {
737 if (!vq_access_ok(d
, vq
->num
,
738 (void __user
*)(unsigned long)a
.desc_user_addr
,
739 (void __user
*)(unsigned long)a
.avail_user_addr
,
740 (void __user
*)(unsigned long)a
.used_user_addr
)) {
745 /* Also validate log access for used ring if enabled. */
746 if ((a
.flags
& (0x1 << VHOST_VRING_F_LOG
)) &&
747 !log_access_ok(vq
->log_base
, a
.log_guest_addr
,
749 vq
->num
* sizeof *vq
->used
->ring
)) {
755 r
= init_used(vq
, (struct vring_used __user
*)(unsigned long)
759 vq
->log_used
= !!(a
.flags
& (0x1 << VHOST_VRING_F_LOG
));
760 vq
->desc
= (void __user
*)(unsigned long)a
.desc_user_addr
;
761 vq
->avail
= (void __user
*)(unsigned long)a
.avail_user_addr
;
762 vq
->log_addr
= a
.log_guest_addr
;
763 vq
->used
= (void __user
*)(unsigned long)a
.used_user_addr
;
765 case VHOST_SET_VRING_KICK
:
766 if (copy_from_user(&f
, argp
, sizeof f
)) {
770 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
771 if (IS_ERR(eventfp
)) {
772 r
= PTR_ERR(eventfp
);
775 if (eventfp
!= vq
->kick
) {
776 pollstop
= filep
= vq
->kick
;
777 pollstart
= vq
->kick
= eventfp
;
781 case VHOST_SET_VRING_CALL
:
782 if (copy_from_user(&f
, argp
, sizeof f
)) {
786 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
787 if (IS_ERR(eventfp
)) {
788 r
= PTR_ERR(eventfp
);
791 if (eventfp
!= vq
->call
) {
795 vq
->call_ctx
= eventfp
?
796 eventfd_ctx_fileget(eventfp
) : NULL
;
800 case VHOST_SET_VRING_ERR
:
801 if (copy_from_user(&f
, argp
, sizeof f
)) {
805 eventfp
= f
.fd
== -1 ? NULL
: eventfd_fget(f
.fd
);
806 if (IS_ERR(eventfp
)) {
807 r
= PTR_ERR(eventfp
);
810 if (eventfp
!= vq
->error
) {
814 vq
->error_ctx
= eventfp
?
815 eventfd_ctx_fileget(eventfp
) : NULL
;
823 if (pollstop
&& vq
->handle_kick
)
824 vhost_poll_stop(&vq
->poll
);
827 eventfd_ctx_put(ctx
);
831 if (pollstart
&& vq
->handle_kick
)
832 vhost_poll_start(&vq
->poll
, vq
->kick
);
834 mutex_unlock(&vq
->mutex
);
836 if (pollstop
&& vq
->handle_kick
)
837 vhost_poll_flush(&vq
->poll
);
841 /* Caller must have device mutex */
842 long vhost_dev_ioctl(struct vhost_dev
*d
, unsigned int ioctl
, unsigned long arg
)
844 void __user
*argp
= (void __user
*)arg
;
845 struct file
*eventfp
, *filep
= NULL
;
846 struct eventfd_ctx
*ctx
= NULL
;
851 /* If you are not the owner, you can become one */
852 if (ioctl
== VHOST_SET_OWNER
) {
853 r
= vhost_dev_set_owner(d
);
857 /* You must be the owner to do anything else */
858 r
= vhost_dev_check_owner(d
);
863 case VHOST_SET_MEM_TABLE
:
864 r
= vhost_set_memory(d
, argp
);
866 case VHOST_SET_LOG_BASE
:
867 if (copy_from_user(&p
, argp
, sizeof p
)) {
871 if ((u64
)(unsigned long)p
!= p
) {
875 for (i
= 0; i
< d
->nvqs
; ++i
) {
876 struct vhost_virtqueue
*vq
;
877 void __user
*base
= (void __user
*)(unsigned long)p
;
879 mutex_lock(&vq
->mutex
);
880 /* If ring is inactive, will check when it's enabled. */
881 if (vq
->private_data
&& !vq_log_access_ok(d
, vq
, base
))
885 mutex_unlock(&vq
->mutex
);
888 case VHOST_SET_LOG_FD
:
889 r
= get_user(fd
, (int __user
*)argp
);
892 eventfp
= fd
== -1 ? NULL
: eventfd_fget(fd
);
893 if (IS_ERR(eventfp
)) {
894 r
= PTR_ERR(eventfp
);
897 if (eventfp
!= d
->log_file
) {
900 d
->log_ctx
= eventfp
?
901 eventfd_ctx_fileget(eventfp
) : NULL
;
904 for (i
= 0; i
< d
->nvqs
; ++i
) {
905 mutex_lock(&d
->vqs
[i
].mutex
);
906 d
->vqs
[i
].log_ctx
= d
->log_ctx
;
907 mutex_unlock(&d
->vqs
[i
].mutex
);
910 eventfd_ctx_put(ctx
);
915 r
= vhost_set_vring(d
, ioctl
, argp
);
922 static const struct vhost_memory_region
*find_region(struct vhost_memory
*mem
,
923 __u64 addr
, __u32 len
)
925 struct vhost_memory_region
*reg
;
928 /* linear search is not brilliant, but we really have on the order of 6
929 * regions in practice */
930 for (i
= 0; i
< mem
->nregions
; ++i
) {
931 reg
= mem
->regions
+ i
;
932 if (reg
->guest_phys_addr
<= addr
&&
933 reg
->guest_phys_addr
+ reg
->memory_size
- 1 >= addr
)
939 /* TODO: This is really inefficient. We need something like get_user()
940 * (instruction directly accesses the data, with an exception table entry
941 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
943 static int set_bit_to_user(int nr
, void __user
*addr
)
945 unsigned long log
= (unsigned long)addr
;
948 int bit
= nr
+ (log
% PAGE_SIZE
) * 8;
951 r
= get_user_pages_fast(log
, 1, 1, &page
);
955 base
= kmap_atomic(page
, KM_USER0
);
957 kunmap_atomic(base
, KM_USER0
);
958 set_page_dirty_lock(page
);
963 static int log_write(void __user
*log_base
,
964 u64 write_address
, u64 write_length
)
966 u64 write_page
= write_address
/ VHOST_PAGE_SIZE
;
971 write_length
+= write_address
% VHOST_PAGE_SIZE
;
973 u64 base
= (u64
)(unsigned long)log_base
;
974 u64 log
= base
+ write_page
/ 8;
975 int bit
= write_page
% 8;
976 if ((u64
)(unsigned long)log
!= log
)
978 r
= set_bit_to_user(bit
, (void __user
*)(unsigned long)log
);
981 if (write_length
<= VHOST_PAGE_SIZE
)
983 write_length
-= VHOST_PAGE_SIZE
;
989 int vhost_log_write(struct vhost_virtqueue
*vq
, struct vhost_log
*log
,
990 unsigned int log_num
, u64 len
)
994 /* Make sure data written is seen before log. */
996 for (i
= 0; i
< log_num
; ++i
) {
997 u64 l
= min(log
[i
].len
, len
);
998 r
= log_write(vq
->log_base
, log
[i
].addr
, l
);
1004 eventfd_signal(vq
->log_ctx
, 1);
1008 /* Length written exceeds what we have stored. This is a bug. */
1013 static int translate_desc(struct vhost_dev
*dev
, u64 addr
, u32 len
,
1014 struct iovec iov
[], int iov_size
)
1016 const struct vhost_memory_region
*reg
;
1017 struct vhost_memory
*mem
;
1024 mem
= rcu_dereference(dev
->memory
);
1025 while ((u64
)len
> s
) {
1027 if (unlikely(ret
>= iov_size
)) {
1031 reg
= find_region(mem
, addr
, len
);
1032 if (unlikely(!reg
)) {
1037 size
= reg
->memory_size
- addr
+ reg
->guest_phys_addr
;
1038 _iov
->iov_len
= min((u64
)len
, size
);
1039 _iov
->iov_base
= (void __user
*)(unsigned long)
1040 (reg
->userspace_addr
+ addr
- reg
->guest_phys_addr
);
1050 /* Each buffer in the virtqueues is actually a chain of descriptors. This
1051 * function returns the next descriptor in the chain,
1052 * or -1U if we're at the end. */
1053 static unsigned next_desc(struct vring_desc
*desc
)
1057 /* If this descriptor says it doesn't chain, we're done. */
1058 if (!(desc
->flags
& VRING_DESC_F_NEXT
))
1061 /* Check they're not leading us off end of descriptors. */
1063 /* Make sure compiler knows to grab that: we don't want it changing! */
1064 /* We will use the result as an index in an array, so most
1065 * architectures only need a compiler barrier here. */
1066 read_barrier_depends();
1071 static int get_indirect(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
1072 struct iovec iov
[], unsigned int iov_size
,
1073 unsigned int *out_num
, unsigned int *in_num
,
1074 struct vhost_log
*log
, unsigned int *log_num
,
1075 struct vring_desc
*indirect
)
1077 struct vring_desc desc
;
1078 unsigned int i
= 0, count
, found
= 0;
1082 if (unlikely(indirect
->len
% sizeof desc
)) {
1083 vq_err(vq
, "Invalid length in indirect descriptor: "
1084 "len 0x%llx not multiple of 0x%zx\n",
1085 (unsigned long long)indirect
->len
,
1090 ret
= translate_desc(dev
, indirect
->addr
, indirect
->len
, vq
->indirect
,
1092 if (unlikely(ret
< 0)) {
1093 vq_err(vq
, "Translation failure %d in indirect.\n", ret
);
1097 /* We will use the result as an address to read from, so most
1098 * architectures only need a compiler barrier here. */
1099 read_barrier_depends();
1101 count
= indirect
->len
/ sizeof desc
;
1102 /* Buffers are chained via a 16 bit next field, so
1103 * we can have at most 2^16 of these. */
1104 if (unlikely(count
> USHRT_MAX
+ 1)) {
1105 vq_err(vq
, "Indirect buffer length too big: %d\n",
1111 unsigned iov_count
= *in_num
+ *out_num
;
1112 if (unlikely(++found
> count
)) {
1113 vq_err(vq
, "Loop detected: last one at %u "
1114 "indirect size %u\n",
1118 if (unlikely(memcpy_fromiovec((unsigned char *)&desc
,
1119 vq
->indirect
, sizeof desc
))) {
1120 vq_err(vq
, "Failed indirect descriptor: idx %d, %zx\n",
1121 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
1124 if (unlikely(desc
.flags
& VRING_DESC_F_INDIRECT
)) {
1125 vq_err(vq
, "Nested indirect descriptor: idx %d, %zx\n",
1126 i
, (size_t)indirect
->addr
+ i
* sizeof desc
);
1130 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
1131 iov_size
- iov_count
);
1132 if (unlikely(ret
< 0)) {
1133 vq_err(vq
, "Translation failure %d indirect idx %d\n",
1137 /* If this is an input descriptor, increment that count. */
1138 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1140 if (unlikely(log
)) {
1141 log
[*log_num
].addr
= desc
.addr
;
1142 log
[*log_num
].len
= desc
.len
;
1146 /* If it's an output descriptor, they're all supposed
1147 * to come before any input descriptors. */
1148 if (unlikely(*in_num
)) {
1149 vq_err(vq
, "Indirect descriptor "
1150 "has out after in: idx %d\n", i
);
1155 } while ((i
= next_desc(&desc
)) != -1);
1159 /* This looks in the virtqueue and for the first available buffer, and converts
1160 * it to an iovec for convenient access. Since descriptors consist of some
1161 * number of output then some number of input descriptors, it's actually two
1162 * iovecs, but we pack them into one and note how many of each there were.
1164 * This function returns the descriptor number found, or vq->num (which is
1165 * never a valid descriptor number) if none was found. A negative code is
1166 * returned on error. */
1167 int vhost_get_vq_desc(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
,
1168 struct iovec iov
[], unsigned int iov_size
,
1169 unsigned int *out_num
, unsigned int *in_num
,
1170 struct vhost_log
*log
, unsigned int *log_num
)
1172 struct vring_desc desc
;
1173 unsigned int i
, head
, found
= 0;
1177 /* Check it isn't doing very strange things with descriptor numbers. */
1178 last_avail_idx
= vq
->last_avail_idx
;
1179 if (unlikely(__get_user(vq
->avail_idx
, &vq
->avail
->idx
))) {
1180 vq_err(vq
, "Failed to access avail idx at %p\n",
1185 if (unlikely((u16
)(vq
->avail_idx
- last_avail_idx
) > vq
->num
)) {
1186 vq_err(vq
, "Guest moved used index from %u to %u",
1187 last_avail_idx
, vq
->avail_idx
);
1191 /* If there's nothing new since last we looked, return invalid. */
1192 if (vq
->avail_idx
== last_avail_idx
)
1195 /* Only get avail ring entries after they have been exposed by guest. */
1198 /* Grab the next descriptor number they're advertising, and increment
1199 * the index we've seen. */
1200 if (unlikely(__get_user(head
,
1201 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]))) {
1202 vq_err(vq
, "Failed to read head: idx %d address %p\n",
1204 &vq
->avail
->ring
[last_avail_idx
% vq
->num
]);
1208 /* If their number is silly, that's an error. */
1209 if (unlikely(head
>= vq
->num
)) {
1210 vq_err(vq
, "Guest says index %u > %u is available",
1215 /* When we start there are none of either input nor output. */
1216 *out_num
= *in_num
= 0;
1222 unsigned iov_count
= *in_num
+ *out_num
;
1223 if (unlikely(i
>= vq
->num
)) {
1224 vq_err(vq
, "Desc index is %u > %u, head = %u",
1228 if (unlikely(++found
> vq
->num
)) {
1229 vq_err(vq
, "Loop detected: last one at %u "
1230 "vq size %u head %u\n",
1234 ret
= __copy_from_user(&desc
, vq
->desc
+ i
, sizeof desc
);
1235 if (unlikely(ret
)) {
1236 vq_err(vq
, "Failed to get descriptor: idx %d addr %p\n",
1240 if (desc
.flags
& VRING_DESC_F_INDIRECT
) {
1241 ret
= get_indirect(dev
, vq
, iov
, iov_size
,
1243 log
, log_num
, &desc
);
1244 if (unlikely(ret
< 0)) {
1245 vq_err(vq
, "Failure detected "
1246 "in indirect descriptor at idx %d\n", i
);
1252 ret
= translate_desc(dev
, desc
.addr
, desc
.len
, iov
+ iov_count
,
1253 iov_size
- iov_count
);
1254 if (unlikely(ret
< 0)) {
1255 vq_err(vq
, "Translation failure %d descriptor idx %d\n",
1259 if (desc
.flags
& VRING_DESC_F_WRITE
) {
1260 /* If this is an input descriptor,
1261 * increment that count. */
1263 if (unlikely(log
)) {
1264 log
[*log_num
].addr
= desc
.addr
;
1265 log
[*log_num
].len
= desc
.len
;
1269 /* If it's an output descriptor, they're all supposed
1270 * to come before any input descriptors. */
1271 if (unlikely(*in_num
)) {
1272 vq_err(vq
, "Descriptor has out after in: "
1278 } while ((i
= next_desc(&desc
)) != -1);
1280 /* On success, increment avail index. */
1281 vq
->last_avail_idx
++;
1283 /* Assume notifications from guest are disabled at this point,
1284 * if they aren't we would need to update avail_event index. */
1285 BUG_ON(!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
));
1289 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1290 void vhost_discard_vq_desc(struct vhost_virtqueue
*vq
, int n
)
1292 vq
->last_avail_idx
-= n
;
1295 /* After we've used one of their buffers, we tell them about it. We'll then
1296 * want to notify the guest, using eventfd. */
1297 int vhost_add_used(struct vhost_virtqueue
*vq
, unsigned int head
, int len
)
1299 struct vring_used_elem __user
*used
;
1301 /* The virtqueue contains a ring of used buffers. Get a pointer to the
1302 * next entry in that used ring. */
1303 used
= &vq
->used
->ring
[vq
->last_used_idx
% vq
->num
];
1304 if (__put_user(head
, &used
->id
)) {
1305 vq_err(vq
, "Failed to write used id");
1308 if (__put_user(len
, &used
->len
)) {
1309 vq_err(vq
, "Failed to write used len");
1312 /* Make sure buffer is written before we update index. */
1314 if (__put_user(vq
->last_used_idx
+ 1, &vq
->used
->idx
)) {
1315 vq_err(vq
, "Failed to increment used idx");
1318 if (unlikely(vq
->log_used
)) {
1319 /* Make sure data is seen before log. */
1321 /* Log used ring entry write. */
1322 log_write(vq
->log_base
,
1324 ((void __user
*)used
- (void __user
*)vq
->used
),
1326 /* Log used index update. */
1327 log_write(vq
->log_base
,
1328 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1329 sizeof vq
->used
->idx
);
1331 eventfd_signal(vq
->log_ctx
, 1);
1333 vq
->last_used_idx
++;
1334 /* If the driver never bothers to signal in a very long while,
1335 * used index might wrap around. If that happens, invalidate
1336 * signalled_used index we stored. TODO: make sure driver
1337 * signals at least once in 2^16 and remove this. */
1338 if (unlikely(vq
->last_used_idx
== vq
->signalled_used
))
1339 vq
->signalled_used_valid
= false;
1343 static int __vhost_add_used_n(struct vhost_virtqueue
*vq
,
1344 struct vring_used_elem
*heads
,
1347 struct vring_used_elem __user
*used
;
1351 start
= vq
->last_used_idx
% vq
->num
;
1352 used
= vq
->used
->ring
+ start
;
1353 if (__copy_to_user(used
, heads
, count
* sizeof *used
)) {
1354 vq_err(vq
, "Failed to write used");
1357 if (unlikely(vq
->log_used
)) {
1358 /* Make sure data is seen before log. */
1360 /* Log used ring entry write. */
1361 log_write(vq
->log_base
,
1363 ((void __user
*)used
- (void __user
*)vq
->used
),
1364 count
* sizeof *used
);
1366 old
= vq
->last_used_idx
;
1367 new = (vq
->last_used_idx
+= count
);
1368 /* If the driver never bothers to signal in a very long while,
1369 * used index might wrap around. If that happens, invalidate
1370 * signalled_used index we stored. TODO: make sure driver
1371 * signals at least once in 2^16 and remove this. */
1372 if (unlikely((u16
)(new - vq
->signalled_used
) < (u16
)(new - old
)))
1373 vq
->signalled_used_valid
= false;
1377 /* After we've used one of their buffers, we tell them about it. We'll then
1378 * want to notify the guest, using eventfd. */
1379 int vhost_add_used_n(struct vhost_virtqueue
*vq
, struct vring_used_elem
*heads
,
1384 start
= vq
->last_used_idx
% vq
->num
;
1385 n
= vq
->num
- start
;
1387 r
= __vhost_add_used_n(vq
, heads
, n
);
1393 r
= __vhost_add_used_n(vq
, heads
, count
);
1395 /* Make sure buffer is written before we update index. */
1397 if (put_user(vq
->last_used_idx
, &vq
->used
->idx
)) {
1398 vq_err(vq
, "Failed to increment used idx");
1401 if (unlikely(vq
->log_used
)) {
1402 /* Log used index update. */
1403 log_write(vq
->log_base
,
1404 vq
->log_addr
+ offsetof(struct vring_used
, idx
),
1405 sizeof vq
->used
->idx
);
1407 eventfd_signal(vq
->log_ctx
, 1);
1412 static bool vhost_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1414 __u16 old
, new, event
;
1416 /* Flush out used index updates. This is paired
1417 * with the barrier that the Guest executes when enabling
1421 if (vhost_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
1422 unlikely(vq
->avail_idx
== vq
->last_avail_idx
))
1425 if (!vhost_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1427 if (__get_user(flags
, &vq
->avail
->flags
)) {
1428 vq_err(vq
, "Failed to get flags");
1431 return !(flags
& VRING_AVAIL_F_NO_INTERRUPT
);
1433 old
= vq
->signalled_used
;
1434 v
= vq
->signalled_used_valid
;
1435 new = vq
->signalled_used
= vq
->last_used_idx
;
1436 vq
->signalled_used_valid
= true;
1441 if (get_user(event
, vhost_used_event(vq
))) {
1442 vq_err(vq
, "Failed to get used event idx");
1445 return vring_need_event(event
, new, old
);
1448 /* This actually signals the guest, using eventfd. */
1449 void vhost_signal(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1451 /* Signal the Guest tell them we used something up. */
1452 if (vq
->call_ctx
&& vhost_notify(dev
, vq
))
1453 eventfd_signal(vq
->call_ctx
, 1);
1456 /* And here's the combo meal deal. Supersize me! */
1457 void vhost_add_used_and_signal(struct vhost_dev
*dev
,
1458 struct vhost_virtqueue
*vq
,
1459 unsigned int head
, int len
)
1461 vhost_add_used(vq
, head
, len
);
1462 vhost_signal(dev
, vq
);
1465 /* multi-buffer version of vhost_add_used_and_signal */
1466 void vhost_add_used_and_signal_n(struct vhost_dev
*dev
,
1467 struct vhost_virtqueue
*vq
,
1468 struct vring_used_elem
*heads
, unsigned count
)
1470 vhost_add_used_n(vq
, heads
, count
);
1471 vhost_signal(dev
, vq
);
1474 /* OK, now we need to know about added descriptors. */
1475 bool vhost_enable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1480 if (!(vq
->used_flags
& VRING_USED_F_NO_NOTIFY
))
1482 vq
->used_flags
&= ~VRING_USED_F_NO_NOTIFY
;
1483 if (!vhost_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1484 r
= put_user(vq
->used_flags
, &vq
->used
->flags
);
1486 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1487 &vq
->used
->flags
, r
);
1491 r
= put_user(vq
->avail_idx
, vhost_avail_event(vq
));
1493 vq_err(vq
, "Failed to update avail event index at %p: %d\n",
1494 vhost_avail_event(vq
), r
);
1498 if (unlikely(vq
->log_used
)) {
1500 /* Make sure data is seen before log. */
1502 used
= vhost_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
) ?
1503 &vq
->used
->flags
: vhost_avail_event(vq
);
1504 /* Log used flags or event index entry write. Both are 16 bit
1506 log_write(vq
->log_base
, vq
->log_addr
+
1507 (used
- (void __user
*)vq
->used
),
1510 eventfd_signal(vq
->log_ctx
, 1);
1512 /* They could have slipped one in as we were doing that: make
1513 * sure it's written, then check again. */
1515 r
= __get_user(avail_idx
, &vq
->avail
->idx
);
1517 vq_err(vq
, "Failed to check avail idx at %p: %d\n",
1518 &vq
->avail
->idx
, r
);
1522 return avail_idx
!= vq
->avail_idx
;
1525 /* We don't need to be notified again. */
1526 void vhost_disable_notify(struct vhost_dev
*dev
, struct vhost_virtqueue
*vq
)
1530 if (vq
->used_flags
& VRING_USED_F_NO_NOTIFY
)
1532 vq
->used_flags
|= VRING_USED_F_NO_NOTIFY
;
1533 if (!vhost_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
1534 r
= put_user(vq
->used_flags
, &vq
->used
->flags
);
1536 vq_err(vq
, "Failed to enable notification at %p: %d\n",
1537 &vq
->used
->flags
, r
);
1541 static void vhost_zerocopy_done_signal(struct kref
*kref
)
1543 struct vhost_ubuf_ref
*ubufs
= container_of(kref
, struct vhost_ubuf_ref
,
1545 wake_up(&ubufs
->wait
);
1548 struct vhost_ubuf_ref
*vhost_ubuf_alloc(struct vhost_virtqueue
*vq
,
1551 struct vhost_ubuf_ref
*ubufs
;
1552 /* No zero copy backend? Nothing to count. */
1555 ubufs
= kmalloc(sizeof *ubufs
, GFP_KERNEL
);
1557 return ERR_PTR(-ENOMEM
);
1558 kref_init(&ubufs
->kref
);
1559 kref_get(&ubufs
->kref
);
1560 init_waitqueue_head(&ubufs
->wait
);
1565 void vhost_ubuf_put(struct vhost_ubuf_ref
*ubufs
)
1567 kref_put(&ubufs
->kref
, vhost_zerocopy_done_signal
);
1570 void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref
*ubufs
)
1572 kref_put(&ubufs
->kref
, vhost_zerocopy_done_signal
);
1573 wait_event(ubufs
->wait
, !atomic_read(&ubufs
->kref
.refcount
));
1577 void vhost_zerocopy_callback(void *arg
)
1579 struct ubuf_info
*ubuf
= arg
;
1580 struct vhost_ubuf_ref
*ubufs
= ubuf
->arg
;
1581 struct vhost_virtqueue
*vq
= ubufs
->vq
;
1583 /* set len = 1 to mark this desc buffers done DMA */
1584 vq
->heads
[ubuf
->desc
].len
= VHOST_DMA_DONE_LEN
;
1585 kref_put(&ubufs
->kref
, vhost_zerocopy_done_signal
);