1 // SPDX-License-Identifier: GPL-2.0-only
3 * virtio transport for vsock
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10 * early virtio-vsock proof-of-concept bits.
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/virtio.h>
17 #include <linux/virtio_ids.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_vsock.h>
21 #include <linux/mutex.h>
22 #include <net/af_vsock.h>
24 static struct workqueue_struct
*virtio_vsock_workqueue
;
25 static struct virtio_vsock __rcu
*the_virtio_vsock
;
26 static DEFINE_MUTEX(the_virtio_vsock_mutex
); /* protects the_virtio_vsock */
27 static struct virtio_transport virtio_transport
; /* forward declaration */
30 struct virtio_device
*vdev
;
31 struct virtqueue
*vqs
[VSOCK_VQ_MAX
];
33 /* Virtqueue processing is deferred to a workqueue */
34 struct work_struct tx_work
;
35 struct work_struct rx_work
;
36 struct work_struct event_work
;
38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
39 * must be accessed with tx_lock held.
44 struct work_struct send_pkt_work
;
45 spinlock_t send_pkt_list_lock
;
46 struct list_head send_pkt_list
;
48 atomic_t queued_replies
;
50 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
51 * must be accessed with rx_lock held.
58 /* The following fields are protected by event_lock.
59 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
61 struct mutex event_lock
;
63 struct virtio_vsock_event event_list
[8];
69 static u32
virtio_transport_get_local_cid(void)
71 struct virtio_vsock
*vsock
;
75 vsock
= rcu_dereference(the_virtio_vsock
);
81 ret
= vsock
->guest_cid
;
88 virtio_transport_send_pkt_work(struct work_struct
*work
)
90 struct virtio_vsock
*vsock
=
91 container_of(work
, struct virtio_vsock
, send_pkt_work
);
94 bool restart_rx
= false;
96 mutex_lock(&vsock
->tx_lock
);
101 vq
= vsock
->vqs
[VSOCK_VQ_TX
];
104 struct virtio_vsock_pkt
*pkt
;
105 struct scatterlist hdr
, buf
, *sgs
[2];
106 int ret
, in_sg
= 0, out_sg
= 0;
109 spin_lock_bh(&vsock
->send_pkt_list_lock
);
110 if (list_empty(&vsock
->send_pkt_list
)) {
111 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
115 pkt
= list_first_entry(&vsock
->send_pkt_list
,
116 struct virtio_vsock_pkt
, list
);
117 list_del_init(&pkt
->list
);
118 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
120 virtio_transport_deliver_tap_pkt(pkt
);
124 sg_init_one(&hdr
, &pkt
->hdr
, sizeof(pkt
->hdr
));
125 sgs
[out_sg
++] = &hdr
;
127 sg_init_one(&buf
, pkt
->buf
, pkt
->len
);
128 sgs
[out_sg
++] = &buf
;
131 ret
= virtqueue_add_sgs(vq
, sgs
, out_sg
, in_sg
, pkt
, GFP_KERNEL
);
132 /* Usually this means that there is no more space available in
136 spin_lock_bh(&vsock
->send_pkt_list_lock
);
137 list_add(&pkt
->list
, &vsock
->send_pkt_list
);
138 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
143 struct virtqueue
*rx_vq
= vsock
->vqs
[VSOCK_VQ_RX
];
146 val
= atomic_dec_return(&vsock
->queued_replies
);
148 /* Do we now have resources to resume rx processing? */
149 if (val
+ 1 == virtqueue_get_vring_size(rx_vq
))
160 mutex_unlock(&vsock
->tx_lock
);
163 queue_work(virtio_vsock_workqueue
, &vsock
->rx_work
);
167 virtio_transport_send_pkt(struct virtio_vsock_pkt
*pkt
)
169 struct virtio_vsock
*vsock
;
173 vsock
= rcu_dereference(the_virtio_vsock
);
175 virtio_transport_free_pkt(pkt
);
180 if (le64_to_cpu(pkt
->hdr
.dst_cid
) == vsock
->guest_cid
) {
181 virtio_transport_free_pkt(pkt
);
187 atomic_inc(&vsock
->queued_replies
);
189 spin_lock_bh(&vsock
->send_pkt_list_lock
);
190 list_add_tail(&pkt
->list
, &vsock
->send_pkt_list
);
191 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
193 queue_work(virtio_vsock_workqueue
, &vsock
->send_pkt_work
);
201 virtio_transport_cancel_pkt(struct vsock_sock
*vsk
)
203 struct virtio_vsock
*vsock
;
204 struct virtio_vsock_pkt
*pkt
, *n
;
209 vsock
= rcu_dereference(the_virtio_vsock
);
215 spin_lock_bh(&vsock
->send_pkt_list_lock
);
216 list_for_each_entry_safe(pkt
, n
, &vsock
->send_pkt_list
, list
) {
219 list_move(&pkt
->list
, &freeme
);
221 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
223 list_for_each_entry_safe(pkt
, n
, &freeme
, list
) {
226 list_del(&pkt
->list
);
227 virtio_transport_free_pkt(pkt
);
231 struct virtqueue
*rx_vq
= vsock
->vqs
[VSOCK_VQ_RX
];
234 new_cnt
= atomic_sub_return(cnt
, &vsock
->queued_replies
);
235 if (new_cnt
+ cnt
>= virtqueue_get_vring_size(rx_vq
) &&
236 new_cnt
< virtqueue_get_vring_size(rx_vq
))
237 queue_work(virtio_vsock_workqueue
, &vsock
->rx_work
);
247 static void virtio_vsock_rx_fill(struct virtio_vsock
*vsock
)
249 int buf_len
= VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE
;
250 struct virtio_vsock_pkt
*pkt
;
251 struct scatterlist hdr
, buf
, *sgs
[2];
252 struct virtqueue
*vq
;
255 vq
= vsock
->vqs
[VSOCK_VQ_RX
];
258 pkt
= kzalloc(sizeof(*pkt
), GFP_KERNEL
);
262 pkt
->buf
= kmalloc(buf_len
, GFP_KERNEL
);
264 virtio_transport_free_pkt(pkt
);
268 pkt
->buf_len
= buf_len
;
271 sg_init_one(&hdr
, &pkt
->hdr
, sizeof(pkt
->hdr
));
274 sg_init_one(&buf
, pkt
->buf
, buf_len
);
276 ret
= virtqueue_add_sgs(vq
, sgs
, 0, 2, pkt
, GFP_KERNEL
);
278 virtio_transport_free_pkt(pkt
);
282 } while (vq
->num_free
);
283 if (vsock
->rx_buf_nr
> vsock
->rx_buf_max_nr
)
284 vsock
->rx_buf_max_nr
= vsock
->rx_buf_nr
;
288 static void virtio_transport_tx_work(struct work_struct
*work
)
290 struct virtio_vsock
*vsock
=
291 container_of(work
, struct virtio_vsock
, tx_work
);
292 struct virtqueue
*vq
;
295 vq
= vsock
->vqs
[VSOCK_VQ_TX
];
296 mutex_lock(&vsock
->tx_lock
);
302 struct virtio_vsock_pkt
*pkt
;
305 virtqueue_disable_cb(vq
);
306 while ((pkt
= virtqueue_get_buf(vq
, &len
)) != NULL
) {
307 virtio_transport_free_pkt(pkt
);
310 } while (!virtqueue_enable_cb(vq
));
313 mutex_unlock(&vsock
->tx_lock
);
316 queue_work(virtio_vsock_workqueue
, &vsock
->send_pkt_work
);
319 /* Is there space left for replies to rx packets? */
320 static bool virtio_transport_more_replies(struct virtio_vsock
*vsock
)
322 struct virtqueue
*vq
= vsock
->vqs
[VSOCK_VQ_RX
];
325 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
326 val
= atomic_read(&vsock
->queued_replies
);
328 return val
< virtqueue_get_vring_size(vq
);
331 /* event_lock must be held */
332 static int virtio_vsock_event_fill_one(struct virtio_vsock
*vsock
,
333 struct virtio_vsock_event
*event
)
335 struct scatterlist sg
;
336 struct virtqueue
*vq
;
338 vq
= vsock
->vqs
[VSOCK_VQ_EVENT
];
340 sg_init_one(&sg
, event
, sizeof(*event
));
342 return virtqueue_add_inbuf(vq
, &sg
, 1, event
, GFP_KERNEL
);
345 /* event_lock must be held */
346 static void virtio_vsock_event_fill(struct virtio_vsock
*vsock
)
350 for (i
= 0; i
< ARRAY_SIZE(vsock
->event_list
); i
++) {
351 struct virtio_vsock_event
*event
= &vsock
->event_list
[i
];
353 virtio_vsock_event_fill_one(vsock
, event
);
356 virtqueue_kick(vsock
->vqs
[VSOCK_VQ_EVENT
]);
359 static void virtio_vsock_reset_sock(struct sock
*sk
)
361 /* vmci_transport.c doesn't take sk_lock here either. At least we're
362 * under vsock_table_lock so the sock cannot disappear while we're
366 sk
->sk_state
= TCP_CLOSE
;
367 sk
->sk_err
= ECONNRESET
;
371 static void virtio_vsock_update_guest_cid(struct virtio_vsock
*vsock
)
373 struct virtio_device
*vdev
= vsock
->vdev
;
376 vdev
->config
->get(vdev
, offsetof(struct virtio_vsock_config
, guest_cid
),
377 &guest_cid
, sizeof(guest_cid
));
378 vsock
->guest_cid
= le64_to_cpu(guest_cid
);
381 /* event_lock must be held */
382 static void virtio_vsock_event_handle(struct virtio_vsock
*vsock
,
383 struct virtio_vsock_event
*event
)
385 switch (le32_to_cpu(event
->id
)) {
386 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET
:
387 virtio_vsock_update_guest_cid(vsock
);
388 vsock_for_each_connected_socket(&virtio_transport
.transport
,
389 virtio_vsock_reset_sock
);
394 static void virtio_transport_event_work(struct work_struct
*work
)
396 struct virtio_vsock
*vsock
=
397 container_of(work
, struct virtio_vsock
, event_work
);
398 struct virtqueue
*vq
;
400 vq
= vsock
->vqs
[VSOCK_VQ_EVENT
];
402 mutex_lock(&vsock
->event_lock
);
404 if (!vsock
->event_run
)
408 struct virtio_vsock_event
*event
;
411 virtqueue_disable_cb(vq
);
412 while ((event
= virtqueue_get_buf(vq
, &len
)) != NULL
) {
413 if (len
== sizeof(*event
))
414 virtio_vsock_event_handle(vsock
, event
);
416 virtio_vsock_event_fill_one(vsock
, event
);
418 } while (!virtqueue_enable_cb(vq
));
420 virtqueue_kick(vsock
->vqs
[VSOCK_VQ_EVENT
]);
422 mutex_unlock(&vsock
->event_lock
);
425 static void virtio_vsock_event_done(struct virtqueue
*vq
)
427 struct virtio_vsock
*vsock
= vq
->vdev
->priv
;
431 queue_work(virtio_vsock_workqueue
, &vsock
->event_work
);
434 static void virtio_vsock_tx_done(struct virtqueue
*vq
)
436 struct virtio_vsock
*vsock
= vq
->vdev
->priv
;
440 queue_work(virtio_vsock_workqueue
, &vsock
->tx_work
);
443 static void virtio_vsock_rx_done(struct virtqueue
*vq
)
445 struct virtio_vsock
*vsock
= vq
->vdev
->priv
;
449 queue_work(virtio_vsock_workqueue
, &vsock
->rx_work
);
452 static bool virtio_transport_seqpacket_allow(u32 remote_cid
);
454 static struct virtio_transport virtio_transport
= {
456 .module
= THIS_MODULE
,
458 .get_local_cid
= virtio_transport_get_local_cid
,
460 .init
= virtio_transport_do_socket_init
,
461 .destruct
= virtio_transport_destruct
,
462 .release
= virtio_transport_release
,
463 .connect
= virtio_transport_connect
,
464 .shutdown
= virtio_transport_shutdown
,
465 .cancel_pkt
= virtio_transport_cancel_pkt
,
467 .dgram_bind
= virtio_transport_dgram_bind
,
468 .dgram_dequeue
= virtio_transport_dgram_dequeue
,
469 .dgram_enqueue
= virtio_transport_dgram_enqueue
,
470 .dgram_allow
= virtio_transport_dgram_allow
,
472 .stream_dequeue
= virtio_transport_stream_dequeue
,
473 .stream_enqueue
= virtio_transport_stream_enqueue
,
474 .stream_has_data
= virtio_transport_stream_has_data
,
475 .stream_has_space
= virtio_transport_stream_has_space
,
476 .stream_rcvhiwat
= virtio_transport_stream_rcvhiwat
,
477 .stream_is_active
= virtio_transport_stream_is_active
,
478 .stream_allow
= virtio_transport_stream_allow
,
480 .seqpacket_dequeue
= virtio_transport_seqpacket_dequeue
,
481 .seqpacket_enqueue
= virtio_transport_seqpacket_enqueue
,
482 .seqpacket_allow
= virtio_transport_seqpacket_allow
,
483 .seqpacket_has_data
= virtio_transport_seqpacket_has_data
,
485 .notify_poll_in
= virtio_transport_notify_poll_in
,
486 .notify_poll_out
= virtio_transport_notify_poll_out
,
487 .notify_recv_init
= virtio_transport_notify_recv_init
,
488 .notify_recv_pre_block
= virtio_transport_notify_recv_pre_block
,
489 .notify_recv_pre_dequeue
= virtio_transport_notify_recv_pre_dequeue
,
490 .notify_recv_post_dequeue
= virtio_transport_notify_recv_post_dequeue
,
491 .notify_send_init
= virtio_transport_notify_send_init
,
492 .notify_send_pre_block
= virtio_transport_notify_send_pre_block
,
493 .notify_send_pre_enqueue
= virtio_transport_notify_send_pre_enqueue
,
494 .notify_send_post_enqueue
= virtio_transport_notify_send_post_enqueue
,
495 .notify_buffer_size
= virtio_transport_notify_buffer_size
,
498 .send_pkt
= virtio_transport_send_pkt
,
501 static bool virtio_transport_seqpacket_allow(u32 remote_cid
)
503 struct virtio_vsock
*vsock
;
504 bool seqpacket_allow
;
506 seqpacket_allow
= false;
508 vsock
= rcu_dereference(the_virtio_vsock
);
510 seqpacket_allow
= vsock
->seqpacket_allow
;
513 return seqpacket_allow
;
516 static void virtio_transport_rx_work(struct work_struct
*work
)
518 struct virtio_vsock
*vsock
=
519 container_of(work
, struct virtio_vsock
, rx_work
);
520 struct virtqueue
*vq
;
522 vq
= vsock
->vqs
[VSOCK_VQ_RX
];
524 mutex_lock(&vsock
->rx_lock
);
530 virtqueue_disable_cb(vq
);
532 struct virtio_vsock_pkt
*pkt
;
535 if (!virtio_transport_more_replies(vsock
)) {
536 /* Stop rx until the device processes already
537 * pending replies. Leave rx virtqueue
538 * callbacks disabled.
543 pkt
= virtqueue_get_buf(vq
, &len
);
550 /* Drop short/long packets */
551 if (unlikely(len
< sizeof(pkt
->hdr
) ||
552 len
> sizeof(pkt
->hdr
) + pkt
->len
)) {
553 virtio_transport_free_pkt(pkt
);
557 pkt
->len
= len
- sizeof(pkt
->hdr
);
558 virtio_transport_deliver_tap_pkt(pkt
);
559 virtio_transport_recv_pkt(&virtio_transport
, pkt
);
561 } while (!virtqueue_enable_cb(vq
));
564 if (vsock
->rx_buf_nr
< vsock
->rx_buf_max_nr
/ 2)
565 virtio_vsock_rx_fill(vsock
);
566 mutex_unlock(&vsock
->rx_lock
);
569 static int virtio_vsock_probe(struct virtio_device
*vdev
)
571 vq_callback_t
*callbacks
[] = {
572 virtio_vsock_rx_done
,
573 virtio_vsock_tx_done
,
574 virtio_vsock_event_done
,
576 static const char * const names
[] = {
581 struct virtio_vsock
*vsock
= NULL
;
584 ret
= mutex_lock_interruptible(&the_virtio_vsock_mutex
);
588 /* Only one virtio-vsock device per guest is supported */
589 if (rcu_dereference_protected(the_virtio_vsock
,
590 lockdep_is_held(&the_virtio_vsock_mutex
))) {
595 vsock
= kzalloc(sizeof(*vsock
), GFP_KERNEL
);
603 ret
= virtio_find_vqs(vsock
->vdev
, VSOCK_VQ_MAX
,
604 vsock
->vqs
, callbacks
, names
,
609 virtio_vsock_update_guest_cid(vsock
);
611 vsock
->rx_buf_nr
= 0;
612 vsock
->rx_buf_max_nr
= 0;
613 atomic_set(&vsock
->queued_replies
, 0);
615 mutex_init(&vsock
->tx_lock
);
616 mutex_init(&vsock
->rx_lock
);
617 mutex_init(&vsock
->event_lock
);
618 spin_lock_init(&vsock
->send_pkt_list_lock
);
619 INIT_LIST_HEAD(&vsock
->send_pkt_list
);
620 INIT_WORK(&vsock
->rx_work
, virtio_transport_rx_work
);
621 INIT_WORK(&vsock
->tx_work
, virtio_transport_tx_work
);
622 INIT_WORK(&vsock
->event_work
, virtio_transport_event_work
);
623 INIT_WORK(&vsock
->send_pkt_work
, virtio_transport_send_pkt_work
);
625 if (virtio_has_feature(vdev
, VIRTIO_VSOCK_F_SEQPACKET
))
626 vsock
->seqpacket_allow
= true;
630 virtio_device_ready(vdev
);
632 mutex_lock(&vsock
->tx_lock
);
633 vsock
->tx_run
= true;
634 mutex_unlock(&vsock
->tx_lock
);
636 mutex_lock(&vsock
->rx_lock
);
637 virtio_vsock_rx_fill(vsock
);
638 vsock
->rx_run
= true;
639 mutex_unlock(&vsock
->rx_lock
);
641 mutex_lock(&vsock
->event_lock
);
642 virtio_vsock_event_fill(vsock
);
643 vsock
->event_run
= true;
644 mutex_unlock(&vsock
->event_lock
);
646 rcu_assign_pointer(the_virtio_vsock
, vsock
);
648 mutex_unlock(&the_virtio_vsock_mutex
);
654 mutex_unlock(&the_virtio_vsock_mutex
);
658 static void virtio_vsock_remove(struct virtio_device
*vdev
)
660 struct virtio_vsock
*vsock
= vdev
->priv
;
661 struct virtio_vsock_pkt
*pkt
;
663 mutex_lock(&the_virtio_vsock_mutex
);
666 rcu_assign_pointer(the_virtio_vsock
, NULL
);
669 /* Reset all connected sockets when the device disappear */
670 vsock_for_each_connected_socket(&virtio_transport
.transport
,
671 virtio_vsock_reset_sock
);
673 /* Stop all work handlers to make sure no one is accessing the device,
674 * so we can safely call vdev->config->reset().
676 mutex_lock(&vsock
->rx_lock
);
677 vsock
->rx_run
= false;
678 mutex_unlock(&vsock
->rx_lock
);
680 mutex_lock(&vsock
->tx_lock
);
681 vsock
->tx_run
= false;
682 mutex_unlock(&vsock
->tx_lock
);
684 mutex_lock(&vsock
->event_lock
);
685 vsock
->event_run
= false;
686 mutex_unlock(&vsock
->event_lock
);
688 /* Flush all device writes and interrupts, device will not use any
691 vdev
->config
->reset(vdev
);
693 mutex_lock(&vsock
->rx_lock
);
694 while ((pkt
= virtqueue_detach_unused_buf(vsock
->vqs
[VSOCK_VQ_RX
])))
695 virtio_transport_free_pkt(pkt
);
696 mutex_unlock(&vsock
->rx_lock
);
698 mutex_lock(&vsock
->tx_lock
);
699 while ((pkt
= virtqueue_detach_unused_buf(vsock
->vqs
[VSOCK_VQ_TX
])))
700 virtio_transport_free_pkt(pkt
);
701 mutex_unlock(&vsock
->tx_lock
);
703 spin_lock_bh(&vsock
->send_pkt_list_lock
);
704 while (!list_empty(&vsock
->send_pkt_list
)) {
705 pkt
= list_first_entry(&vsock
->send_pkt_list
,
706 struct virtio_vsock_pkt
, list
);
707 list_del(&pkt
->list
);
708 virtio_transport_free_pkt(pkt
);
710 spin_unlock_bh(&vsock
->send_pkt_list_lock
);
712 /* Delete virtqueues and flush outstanding callbacks if any */
713 vdev
->config
->del_vqs(vdev
);
715 /* Other works can be queued before 'config->del_vqs()', so we flush
716 * all works before to free the vsock object to avoid use after free.
718 flush_work(&vsock
->rx_work
);
719 flush_work(&vsock
->tx_work
);
720 flush_work(&vsock
->event_work
);
721 flush_work(&vsock
->send_pkt_work
);
723 mutex_unlock(&the_virtio_vsock_mutex
);
728 static struct virtio_device_id id_table
[] = {
729 { VIRTIO_ID_VSOCK
, VIRTIO_DEV_ANY_ID
},
733 static unsigned int features
[] = {
734 VIRTIO_VSOCK_F_SEQPACKET
737 static struct virtio_driver virtio_vsock_driver
= {
738 .feature_table
= features
,
739 .feature_table_size
= ARRAY_SIZE(features
),
740 .driver
.name
= KBUILD_MODNAME
,
741 .driver
.owner
= THIS_MODULE
,
742 .id_table
= id_table
,
743 .probe
= virtio_vsock_probe
,
744 .remove
= virtio_vsock_remove
,
747 static int __init
virtio_vsock_init(void)
751 virtio_vsock_workqueue
= alloc_workqueue("virtio_vsock", 0, 0);
752 if (!virtio_vsock_workqueue
)
755 ret
= vsock_core_register(&virtio_transport
.transport
,
756 VSOCK_TRANSPORT_F_G2H
);
760 ret
= register_virtio_driver(&virtio_vsock_driver
);
767 vsock_core_unregister(&virtio_transport
.transport
);
769 destroy_workqueue(virtio_vsock_workqueue
);
773 static void __exit
virtio_vsock_exit(void)
775 unregister_virtio_driver(&virtio_vsock_driver
);
776 vsock_core_unregister(&virtio_transport
.transport
);
777 destroy_workqueue(virtio_vsock_workqueue
);
780 module_init(virtio_vsock_init
);
781 module_exit(virtio_vsock_exit
);
782 MODULE_LICENSE("GPL v2");
783 MODULE_AUTHOR("Asias He");
784 MODULE_DESCRIPTION("virtio transport for vsock");
785 MODULE_DEVICE_TABLE(virtio
, id_table
);