]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/vhost/vsock.c
ACPI: APD: Fix HID for Hisilicon Hip07/08
[mirror_ubuntu-zesty-kernel.git] / drivers / vhost / vsock.c
1 /*
2 * vhost transport for vsock
3 *
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 */
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
15 #include <net/sock.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
18
19 #include <net/af_vsock.h>
20 #include "vhost.h"
21
22 #define VHOST_VSOCK_DEFAULT_HOST_CID 2
23
24 enum {
25 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
26 };
27
28 /* Used to track all the vhost_vsock instances on the system. */
29 static DEFINE_SPINLOCK(vhost_vsock_lock);
30 static LIST_HEAD(vhost_vsock_list);
31
32 struct vhost_vsock {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[2];
35
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list;
38
39 struct vhost_work send_pkt_work;
40 spinlock_t send_pkt_list_lock;
41 struct list_head send_pkt_list; /* host->guest pending packets */
42
43 atomic_t queued_replies;
44
45 u32 guest_cid;
46 };
47
48 static u32 vhost_transport_get_local_cid(void)
49 {
50 return VHOST_VSOCK_DEFAULT_HOST_CID;
51 }
52
53 static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
54 {
55 struct vhost_vsock *vsock;
56
57 list_for_each_entry(vsock, &vhost_vsock_list, list) {
58 u32 other_cid = vsock->guest_cid;
59
60 /* Skip instances that have no CID yet */
61 if (other_cid == 0)
62 continue;
63
64 if (other_cid == guest_cid) {
65 return vsock;
66 }
67 }
68
69 return NULL;
70 }
71
72 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
73 {
74 struct vhost_vsock *vsock;
75
76 spin_lock_bh(&vhost_vsock_lock);
77 vsock = __vhost_vsock_get(guest_cid);
78 spin_unlock_bh(&vhost_vsock_lock);
79
80 return vsock;
81 }
82
83 static void
84 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
85 struct vhost_virtqueue *vq)
86 {
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
88 bool added = false;
89 bool restart_tx = false;
90
91 mutex_lock(&vq->mutex);
92
93 if (!vq->private_data)
94 goto out;
95
96 /* Avoid further vmexits, we're already processing the virtqueue */
97 vhost_disable_notify(&vsock->dev, vq);
98
99 for (;;) {
100 struct virtio_vsock_pkt *pkt;
101 struct iov_iter iov_iter;
102 unsigned out, in;
103 size_t nbytes;
104 size_t len;
105 int head;
106
107 spin_lock_bh(&vsock->send_pkt_list_lock);
108 if (list_empty(&vsock->send_pkt_list)) {
109 spin_unlock_bh(&vsock->send_pkt_list_lock);
110 vhost_enable_notify(&vsock->dev, vq);
111 break;
112 }
113
114 pkt = list_first_entry(&vsock->send_pkt_list,
115 struct virtio_vsock_pkt, list);
116 list_del_init(&pkt->list);
117 spin_unlock_bh(&vsock->send_pkt_list_lock);
118
119 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
120 &out, &in, NULL, NULL);
121 if (head < 0) {
122 spin_lock_bh(&vsock->send_pkt_list_lock);
123 list_add(&pkt->list, &vsock->send_pkt_list);
124 spin_unlock_bh(&vsock->send_pkt_list_lock);
125 break;
126 }
127
128 if (head == vq->num) {
129 spin_lock_bh(&vsock->send_pkt_list_lock);
130 list_add(&pkt->list, &vsock->send_pkt_list);
131 spin_unlock_bh(&vsock->send_pkt_list_lock);
132
133 /* We cannot finish yet if more buffers snuck in while
134 * re-enabling notify.
135 */
136 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
137 vhost_disable_notify(&vsock->dev, vq);
138 continue;
139 }
140 break;
141 }
142
143 if (out) {
144 virtio_transport_free_pkt(pkt);
145 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
146 break;
147 }
148
149 len = iov_length(&vq->iov[out], in);
150 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
151
152 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
153 if (nbytes != sizeof(pkt->hdr)) {
154 virtio_transport_free_pkt(pkt);
155 vq_err(vq, "Faulted on copying pkt hdr\n");
156 break;
157 }
158
159 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
160 if (nbytes != pkt->len) {
161 virtio_transport_free_pkt(pkt);
162 vq_err(vq, "Faulted on copying pkt buf\n");
163 break;
164 }
165
166 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
167 added = true;
168
169 if (pkt->reply) {
170 int val;
171
172 val = atomic_dec_return(&vsock->queued_replies);
173
174 /* Do we have resources to resume tx processing? */
175 if (val + 1 == tx_vq->num)
176 restart_tx = true;
177 }
178
179 virtio_transport_free_pkt(pkt);
180 }
181 if (added)
182 vhost_signal(&vsock->dev, vq);
183
184 out:
185 mutex_unlock(&vq->mutex);
186
187 if (restart_tx)
188 vhost_poll_queue(&tx_vq->poll);
189 }
190
191 static void vhost_transport_send_pkt_work(struct vhost_work *work)
192 {
193 struct vhost_virtqueue *vq;
194 struct vhost_vsock *vsock;
195
196 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
197 vq = &vsock->vqs[VSOCK_VQ_RX];
198
199 vhost_transport_do_send_pkt(vsock, vq);
200 }
201
202 static int
203 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
204 {
205 struct vhost_vsock *vsock;
206 int len = pkt->len;
207
208 /* Find the vhost_vsock according to guest context id */
209 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
210 if (!vsock) {
211 virtio_transport_free_pkt(pkt);
212 return -ENODEV;
213 }
214
215 if (pkt->reply)
216 atomic_inc(&vsock->queued_replies);
217
218 spin_lock_bh(&vsock->send_pkt_list_lock);
219 list_add_tail(&pkt->list, &vsock->send_pkt_list);
220 spin_unlock_bh(&vsock->send_pkt_list_lock);
221
222 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
223 return len;
224 }
225
226 static struct virtio_vsock_pkt *
227 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
228 unsigned int out, unsigned int in)
229 {
230 struct virtio_vsock_pkt *pkt;
231 struct iov_iter iov_iter;
232 size_t nbytes;
233 size_t len;
234
235 if (in != 0) {
236 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
237 return NULL;
238 }
239
240 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
241 if (!pkt)
242 return NULL;
243
244 len = iov_length(vq->iov, out);
245 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
246
247 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
248 if (nbytes != sizeof(pkt->hdr)) {
249 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
250 sizeof(pkt->hdr), nbytes);
251 kfree(pkt);
252 return NULL;
253 }
254
255 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
256 pkt->len = le32_to_cpu(pkt->hdr.len);
257
258 /* No payload */
259 if (!pkt->len)
260 return pkt;
261
262 /* The pkt is too big */
263 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
264 kfree(pkt);
265 return NULL;
266 }
267
268 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
269 if (!pkt->buf) {
270 kfree(pkt);
271 return NULL;
272 }
273
274 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
275 if (nbytes != pkt->len) {
276 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
277 pkt->len, nbytes);
278 virtio_transport_free_pkt(pkt);
279 return NULL;
280 }
281
282 return pkt;
283 }
284
285 /* Is there space left for replies to rx packets? */
286 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
287 {
288 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
289 int val;
290
291 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
292 val = atomic_read(&vsock->queued_replies);
293
294 return val < vq->num;
295 }
296
297 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
298 {
299 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
300 poll.work);
301 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
302 dev);
303 struct virtio_vsock_pkt *pkt;
304 int head;
305 unsigned int out, in;
306 bool added = false;
307
308 mutex_lock(&vq->mutex);
309
310 if (!vq->private_data)
311 goto out;
312
313 vhost_disable_notify(&vsock->dev, vq);
314 for (;;) {
315 u32 len;
316
317 if (!vhost_vsock_more_replies(vsock)) {
318 /* Stop tx until the device processes already
319 * pending replies. Leave tx virtqueue
320 * callbacks disabled.
321 */
322 goto no_more_replies;
323 }
324
325 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
326 &out, &in, NULL, NULL);
327 if (head < 0)
328 break;
329
330 if (head == vq->num) {
331 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
332 vhost_disable_notify(&vsock->dev, vq);
333 continue;
334 }
335 break;
336 }
337
338 pkt = vhost_vsock_alloc_pkt(vq, out, in);
339 if (!pkt) {
340 vq_err(vq, "Faulted on pkt\n");
341 continue;
342 }
343
344 len = pkt->len;
345
346 /* Only accept correctly addressed packets */
347 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
348 virtio_transport_recv_pkt(pkt);
349 else
350 virtio_transport_free_pkt(pkt);
351
352 vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
353 added = true;
354 }
355
356 no_more_replies:
357 if (added)
358 vhost_signal(&vsock->dev, vq);
359
360 out:
361 mutex_unlock(&vq->mutex);
362 }
363
364 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
365 {
366 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
367 poll.work);
368 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
369 dev);
370
371 vhost_transport_do_send_pkt(vsock, vq);
372 }
373
374 static int vhost_vsock_start(struct vhost_vsock *vsock)
375 {
376 struct vhost_virtqueue *vq;
377 size_t i;
378 int ret;
379
380 mutex_lock(&vsock->dev.mutex);
381
382 ret = vhost_dev_check_owner(&vsock->dev);
383 if (ret)
384 goto err;
385
386 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
387 vq = &vsock->vqs[i];
388
389 mutex_lock(&vq->mutex);
390
391 if (!vhost_vq_access_ok(vq)) {
392 ret = -EFAULT;
393 goto err_vq;
394 }
395
396 if (!vq->private_data) {
397 vq->private_data = vsock;
398 ret = vhost_vq_init_access(vq);
399 if (ret)
400 goto err_vq;
401 }
402
403 mutex_unlock(&vq->mutex);
404 }
405
406 mutex_unlock(&vsock->dev.mutex);
407 return 0;
408
409 err_vq:
410 vq->private_data = NULL;
411 mutex_unlock(&vq->mutex);
412
413 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
414 vq = &vsock->vqs[i];
415
416 mutex_lock(&vq->mutex);
417 vq->private_data = NULL;
418 mutex_unlock(&vq->mutex);
419 }
420 err:
421 mutex_unlock(&vsock->dev.mutex);
422 return ret;
423 }
424
425 static int vhost_vsock_stop(struct vhost_vsock *vsock)
426 {
427 size_t i;
428 int ret;
429
430 mutex_lock(&vsock->dev.mutex);
431
432 ret = vhost_dev_check_owner(&vsock->dev);
433 if (ret)
434 goto err;
435
436 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
437 struct vhost_virtqueue *vq = &vsock->vqs[i];
438
439 mutex_lock(&vq->mutex);
440 vq->private_data = NULL;
441 mutex_unlock(&vq->mutex);
442 }
443
444 err:
445 mutex_unlock(&vsock->dev.mutex);
446 return ret;
447 }
448
449 static void vhost_vsock_free(struct vhost_vsock *vsock)
450 {
451 kvfree(vsock);
452 }
453
454 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
455 {
456 struct vhost_virtqueue **vqs;
457 struct vhost_vsock *vsock;
458 int ret;
459
460 /* This struct is large and allocation could fail, fall back to vmalloc
461 * if there is no other way.
462 */
463 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
464 if (!vsock) {
465 vsock = vmalloc(sizeof(*vsock));
466 if (!vsock)
467 return -ENOMEM;
468 }
469
470 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
471 if (!vqs) {
472 ret = -ENOMEM;
473 goto out;
474 }
475
476 atomic_set(&vsock->queued_replies, 0);
477
478 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
479 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
480 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
481 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
482
483 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
484
485 file->private_data = vsock;
486 spin_lock_init(&vsock->send_pkt_list_lock);
487 INIT_LIST_HEAD(&vsock->send_pkt_list);
488 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
489
490 spin_lock_bh(&vhost_vsock_lock);
491 list_add_tail(&vsock->list, &vhost_vsock_list);
492 spin_unlock_bh(&vhost_vsock_lock);
493 return 0;
494
495 out:
496 vhost_vsock_free(vsock);
497 return ret;
498 }
499
500 static void vhost_vsock_flush(struct vhost_vsock *vsock)
501 {
502 int i;
503
504 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
505 if (vsock->vqs[i].handle_kick)
506 vhost_poll_flush(&vsock->vqs[i].poll);
507 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
508 }
509
510 static void vhost_vsock_reset_orphans(struct sock *sk)
511 {
512 struct vsock_sock *vsk = vsock_sk(sk);
513
514 /* vmci_transport.c doesn't take sk_lock here either. At least we're
515 * under vsock_table_lock so the sock cannot disappear while we're
516 * executing.
517 */
518
519 if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
520 sock_set_flag(sk, SOCK_DONE);
521 vsk->peer_shutdown = SHUTDOWN_MASK;
522 sk->sk_state = SS_UNCONNECTED;
523 sk->sk_err = ECONNRESET;
524 sk->sk_error_report(sk);
525 }
526 }
527
528 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
529 {
530 struct vhost_vsock *vsock = file->private_data;
531
532 spin_lock_bh(&vhost_vsock_lock);
533 list_del(&vsock->list);
534 spin_unlock_bh(&vhost_vsock_lock);
535
536 /* Iterating over all connections for all CIDs to find orphans is
537 * inefficient. Room for improvement here. */
538 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
539
540 vhost_vsock_stop(vsock);
541 vhost_vsock_flush(vsock);
542 vhost_dev_stop(&vsock->dev);
543
544 spin_lock_bh(&vsock->send_pkt_list_lock);
545 while (!list_empty(&vsock->send_pkt_list)) {
546 struct virtio_vsock_pkt *pkt;
547
548 pkt = list_first_entry(&vsock->send_pkt_list,
549 struct virtio_vsock_pkt, list);
550 list_del_init(&pkt->list);
551 virtio_transport_free_pkt(pkt);
552 }
553 spin_unlock_bh(&vsock->send_pkt_list_lock);
554
555 vhost_dev_cleanup(&vsock->dev, false);
556 kfree(vsock->dev.vqs);
557 vhost_vsock_free(vsock);
558 return 0;
559 }
560
561 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
562 {
563 struct vhost_vsock *other;
564
565 /* Refuse reserved CIDs */
566 if (guest_cid <= VMADDR_CID_HOST ||
567 guest_cid == U32_MAX)
568 return -EINVAL;
569
570 /* 64-bit CIDs are not yet supported */
571 if (guest_cid > U32_MAX)
572 return -EINVAL;
573
574 /* Refuse if CID is already in use */
575 spin_lock_bh(&vhost_vsock_lock);
576 other = __vhost_vsock_get(guest_cid);
577 if (other && other != vsock) {
578 spin_unlock_bh(&vhost_vsock_lock);
579 return -EADDRINUSE;
580 }
581 vsock->guest_cid = guest_cid;
582 spin_unlock_bh(&vhost_vsock_lock);
583
584 return 0;
585 }
586
587 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
588 {
589 struct vhost_virtqueue *vq;
590 int i;
591
592 if (features & ~VHOST_VSOCK_FEATURES)
593 return -EOPNOTSUPP;
594
595 mutex_lock(&vsock->dev.mutex);
596 if ((features & (1 << VHOST_F_LOG_ALL)) &&
597 !vhost_log_access_ok(&vsock->dev)) {
598 mutex_unlock(&vsock->dev.mutex);
599 return -EFAULT;
600 }
601
602 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
603 vq = &vsock->vqs[i];
604 mutex_lock(&vq->mutex);
605 vq->acked_features = features;
606 mutex_unlock(&vq->mutex);
607 }
608 mutex_unlock(&vsock->dev.mutex);
609 return 0;
610 }
611
612 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
613 unsigned long arg)
614 {
615 struct vhost_vsock *vsock = f->private_data;
616 void __user *argp = (void __user *)arg;
617 u64 guest_cid;
618 u64 features;
619 int start;
620 int r;
621
622 switch (ioctl) {
623 case VHOST_VSOCK_SET_GUEST_CID:
624 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
625 return -EFAULT;
626 return vhost_vsock_set_cid(vsock, guest_cid);
627 case VHOST_VSOCK_SET_RUNNING:
628 if (copy_from_user(&start, argp, sizeof(start)))
629 return -EFAULT;
630 if (start)
631 return vhost_vsock_start(vsock);
632 else
633 return vhost_vsock_stop(vsock);
634 case VHOST_GET_FEATURES:
635 features = VHOST_VSOCK_FEATURES;
636 if (copy_to_user(argp, &features, sizeof(features)))
637 return -EFAULT;
638 return 0;
639 case VHOST_SET_FEATURES:
640 if (copy_from_user(&features, argp, sizeof(features)))
641 return -EFAULT;
642 return vhost_vsock_set_features(vsock, features);
643 default:
644 mutex_lock(&vsock->dev.mutex);
645 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
646 if (r == -ENOIOCTLCMD)
647 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
648 else
649 vhost_vsock_flush(vsock);
650 mutex_unlock(&vsock->dev.mutex);
651 return r;
652 }
653 }
654
655 static const struct file_operations vhost_vsock_fops = {
656 .owner = THIS_MODULE,
657 .open = vhost_vsock_dev_open,
658 .release = vhost_vsock_dev_release,
659 .llseek = noop_llseek,
660 .unlocked_ioctl = vhost_vsock_dev_ioctl,
661 };
662
663 static struct miscdevice vhost_vsock_misc = {
664 .minor = MISC_DYNAMIC_MINOR,
665 .name = "vhost-vsock",
666 .fops = &vhost_vsock_fops,
667 };
668
669 static struct virtio_transport vhost_transport = {
670 .transport = {
671 .get_local_cid = vhost_transport_get_local_cid,
672
673 .init = virtio_transport_do_socket_init,
674 .destruct = virtio_transport_destruct,
675 .release = virtio_transport_release,
676 .connect = virtio_transport_connect,
677 .shutdown = virtio_transport_shutdown,
678
679 .dgram_enqueue = virtio_transport_dgram_enqueue,
680 .dgram_dequeue = virtio_transport_dgram_dequeue,
681 .dgram_bind = virtio_transport_dgram_bind,
682 .dgram_allow = virtio_transport_dgram_allow,
683
684 .stream_enqueue = virtio_transport_stream_enqueue,
685 .stream_dequeue = virtio_transport_stream_dequeue,
686 .stream_has_data = virtio_transport_stream_has_data,
687 .stream_has_space = virtio_transport_stream_has_space,
688 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
689 .stream_is_active = virtio_transport_stream_is_active,
690 .stream_allow = virtio_transport_stream_allow,
691
692 .notify_poll_in = virtio_transport_notify_poll_in,
693 .notify_poll_out = virtio_transport_notify_poll_out,
694 .notify_recv_init = virtio_transport_notify_recv_init,
695 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
696 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
697 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
698 .notify_send_init = virtio_transport_notify_send_init,
699 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
700 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
701 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
702
703 .set_buffer_size = virtio_transport_set_buffer_size,
704 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
705 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
706 .get_buffer_size = virtio_transport_get_buffer_size,
707 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
708 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
709 },
710
711 .send_pkt = vhost_transport_send_pkt,
712 };
713
714 static int __init vhost_vsock_init(void)
715 {
716 int ret;
717
718 ret = vsock_core_init(&vhost_transport.transport);
719 if (ret < 0)
720 return ret;
721 return misc_register(&vhost_vsock_misc);
722 };
723
724 static void __exit vhost_vsock_exit(void)
725 {
726 misc_deregister(&vhost_vsock_misc);
727 vsock_core_exit();
728 };
729
730 module_init(vhost_vsock_init);
731 module_exit(vhost_vsock_exit);
732 MODULE_LICENSE("GPL v2");
733 MODULE_AUTHOR("Asias He");
734 MODULE_DESCRIPTION("vhost transport for vsock ");