]>
Commit | Line | Data |
---|---|---|
433fc58e AH |
1 | /* |
2 | * vhost transport for vsock | |
3 | * | |
4 | * Copyright (C) 2013-2015 Red Hat, Inc. | |
5 | * Author: Asias He <asias@redhat.com> | |
6 | * Stefan Hajnoczi <stefanha@redhat.com> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2. | |
9 | */ | |
10 | #include <linux/miscdevice.h> | |
11 | #include <linux/atomic.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/mutex.h> | |
14 | #include <linux/vmalloc.h> | |
15 | #include <net/sock.h> | |
16 | #include <linux/virtio_vsock.h> | |
17 | #include <linux/vhost.h> | |
18 | ||
19 | #include <net/af_vsock.h> | |
20 | #include "vhost.h" | |
21 | ||
22 | #define VHOST_VSOCK_DEFAULT_HOST_CID 2 | |
23 | ||
24 | enum { | |
25 | VHOST_VSOCK_FEATURES = VHOST_FEATURES, | |
26 | }; | |
27 | ||
28 | /* Used to track all the vhost_vsock instances on the system. */ | |
29 | static DEFINE_SPINLOCK(vhost_vsock_lock); | |
30 | static LIST_HEAD(vhost_vsock_list); | |
31 | ||
32 | struct vhost_vsock { | |
33 | struct vhost_dev dev; | |
34 | struct vhost_virtqueue vqs[2]; | |
35 | ||
36 | /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ | |
37 | struct list_head list; | |
38 | ||
39 | struct vhost_work send_pkt_work; | |
40 | spinlock_t send_pkt_list_lock; | |
41 | struct list_head send_pkt_list; /* host->guest pending packets */ | |
42 | ||
43 | atomic_t queued_replies; | |
44 | ||
45 | u32 guest_cid; | |
46 | }; | |
47 | ||
48 | static u32 vhost_transport_get_local_cid(void) | |
49 | { | |
50 | return VHOST_VSOCK_DEFAULT_HOST_CID; | |
51 | } | |
52 | ||
6c083c2b | 53 | static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) |
433fc58e AH |
54 | { |
55 | struct vhost_vsock *vsock; | |
56 | ||
433fc58e AH |
57 | list_for_each_entry(vsock, &vhost_vsock_list, list) { |
58 | u32 other_cid = vsock->guest_cid; | |
59 | ||
60 | /* Skip instances that have no CID yet */ | |
61 | if (other_cid == 0) | |
62 | continue; | |
63 | ||
64 | if (other_cid == guest_cid) { | |
433fc58e AH |
65 | return vsock; |
66 | } | |
67 | } | |
433fc58e AH |
68 | |
69 | return NULL; | |
70 | } | |
71 | ||
6c083c2b G |
72 | static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) |
73 | { | |
74 | struct vhost_vsock *vsock; | |
75 | ||
76 | spin_lock_bh(&vhost_vsock_lock); | |
77 | vsock = __vhost_vsock_get(guest_cid); | |
78 | spin_unlock_bh(&vhost_vsock_lock); | |
79 | ||
80 | return vsock; | |
81 | } | |
82 | ||
433fc58e AH |
83 | static void |
84 | vhost_transport_do_send_pkt(struct vhost_vsock *vsock, | |
85 | struct vhost_virtqueue *vq) | |
86 | { | |
87 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; | |
88 | bool added = false; | |
89 | bool restart_tx = false; | |
90 | ||
91 | mutex_lock(&vq->mutex); | |
92 | ||
93 | if (!vq->private_data) | |
94 | goto out; | |
95 | ||
96 | /* Avoid further vmexits, we're already processing the virtqueue */ | |
97 | vhost_disable_notify(&vsock->dev, vq); | |
98 | ||
99 | for (;;) { | |
100 | struct virtio_vsock_pkt *pkt; | |
101 | struct iov_iter iov_iter; | |
102 | unsigned out, in; | |
103 | size_t nbytes; | |
104 | size_t len; | |
105 | int head; | |
106 | ||
107 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
108 | if (list_empty(&vsock->send_pkt_list)) { | |
109 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
110 | vhost_enable_notify(&vsock->dev, vq); | |
111 | break; | |
112 | } | |
113 | ||
114 | pkt = list_first_entry(&vsock->send_pkt_list, | |
115 | struct virtio_vsock_pkt, list); | |
116 | list_del_init(&pkt->list); | |
117 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
118 | ||
119 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
120 | &out, &in, NULL, NULL); | |
121 | if (head < 0) { | |
122 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
123 | list_add(&pkt->list, &vsock->send_pkt_list); | |
124 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
125 | break; | |
126 | } | |
127 | ||
128 | if (head == vq->num) { | |
129 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
130 | list_add(&pkt->list, &vsock->send_pkt_list); | |
131 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
132 | ||
133 | /* We cannot finish yet if more buffers snuck in while | |
134 | * re-enabling notify. | |
135 | */ | |
136 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { | |
137 | vhost_disable_notify(&vsock->dev, vq); | |
138 | continue; | |
139 | } | |
140 | break; | |
141 | } | |
142 | ||
143 | if (out) { | |
144 | virtio_transport_free_pkt(pkt); | |
145 | vq_err(vq, "Expected 0 output buffers, got %u\n", out); | |
146 | break; | |
147 | } | |
148 | ||
149 | len = iov_length(&vq->iov[out], in); | |
150 | iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len); | |
151 | ||
152 | nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); | |
153 | if (nbytes != sizeof(pkt->hdr)) { | |
154 | virtio_transport_free_pkt(pkt); | |
155 | vq_err(vq, "Faulted on copying pkt hdr\n"); | |
156 | break; | |
157 | } | |
158 | ||
159 | nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter); | |
160 | if (nbytes != pkt->len) { | |
161 | virtio_transport_free_pkt(pkt); | |
162 | vq_err(vq, "Faulted on copying pkt buf\n"); | |
163 | break; | |
164 | } | |
165 | ||
166 | vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len); | |
167 | added = true; | |
168 | ||
169 | if (pkt->reply) { | |
170 | int val; | |
171 | ||
172 | val = atomic_dec_return(&vsock->queued_replies); | |
173 | ||
174 | /* Do we have resources to resume tx processing? */ | |
175 | if (val + 1 == tx_vq->num) | |
176 | restart_tx = true; | |
177 | } | |
178 | ||
179 | virtio_transport_free_pkt(pkt); | |
180 | } | |
181 | if (added) | |
182 | vhost_signal(&vsock->dev, vq); | |
183 | ||
184 | out: | |
185 | mutex_unlock(&vq->mutex); | |
186 | ||
187 | if (restart_tx) | |
188 | vhost_poll_queue(&tx_vq->poll); | |
189 | } | |
190 | ||
191 | static void vhost_transport_send_pkt_work(struct vhost_work *work) | |
192 | { | |
193 | struct vhost_virtqueue *vq; | |
194 | struct vhost_vsock *vsock; | |
195 | ||
196 | vsock = container_of(work, struct vhost_vsock, send_pkt_work); | |
197 | vq = &vsock->vqs[VSOCK_VQ_RX]; | |
198 | ||
199 | vhost_transport_do_send_pkt(vsock, vq); | |
200 | } | |
201 | ||
202 | static int | |
203 | vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |
204 | { | |
205 | struct vhost_vsock *vsock; | |
433fc58e AH |
206 | int len = pkt->len; |
207 | ||
208 | /* Find the vhost_vsock according to guest context id */ | |
209 | vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); | |
210 | if (!vsock) { | |
211 | virtio_transport_free_pkt(pkt); | |
212 | return -ENODEV; | |
213 | } | |
214 | ||
433fc58e AH |
215 | if (pkt->reply) |
216 | atomic_inc(&vsock->queued_replies); | |
217 | ||
218 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
219 | list_add_tail(&pkt->list, &vsock->send_pkt_list); | |
220 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
221 | ||
222 | vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); | |
223 | return len; | |
224 | } | |
225 | ||
226 | static struct virtio_vsock_pkt * | |
227 | vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, | |
228 | unsigned int out, unsigned int in) | |
229 | { | |
230 | struct virtio_vsock_pkt *pkt; | |
231 | struct iov_iter iov_iter; | |
232 | size_t nbytes; | |
233 | size_t len; | |
234 | ||
235 | if (in != 0) { | |
236 | vq_err(vq, "Expected 0 input buffers, got %u\n", in); | |
237 | return NULL; | |
238 | } | |
239 | ||
240 | pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); | |
241 | if (!pkt) | |
242 | return NULL; | |
243 | ||
244 | len = iov_length(vq->iov, out); | |
245 | iov_iter_init(&iov_iter, WRITE, vq->iov, out, len); | |
246 | ||
247 | nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter); | |
248 | if (nbytes != sizeof(pkt->hdr)) { | |
249 | vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n", | |
250 | sizeof(pkt->hdr), nbytes); | |
251 | kfree(pkt); | |
252 | return NULL; | |
253 | } | |
254 | ||
255 | if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM) | |
256 | pkt->len = le32_to_cpu(pkt->hdr.len); | |
257 | ||
258 | /* No payload */ | |
259 | if (!pkt->len) | |
260 | return pkt; | |
261 | ||
262 | /* The pkt is too big */ | |
263 | if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) { | |
264 | kfree(pkt); | |
265 | return NULL; | |
266 | } | |
267 | ||
268 | pkt->buf = kmalloc(pkt->len, GFP_KERNEL); | |
269 | if (!pkt->buf) { | |
270 | kfree(pkt); | |
271 | return NULL; | |
272 | } | |
273 | ||
274 | nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter); | |
275 | if (nbytes != pkt->len) { | |
276 | vq_err(vq, "Expected %u byte payload, got %zu bytes\n", | |
277 | pkt->len, nbytes); | |
278 | virtio_transport_free_pkt(pkt); | |
279 | return NULL; | |
280 | } | |
281 | ||
282 | return pkt; | |
283 | } | |
284 | ||
285 | /* Is there space left for replies to rx packets? */ | |
286 | static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) | |
287 | { | |
288 | struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; | |
289 | int val; | |
290 | ||
291 | smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ | |
292 | val = atomic_read(&vsock->queued_replies); | |
293 | ||
294 | return val < vq->num; | |
295 | } | |
296 | ||
297 | static void vhost_vsock_handle_tx_kick(struct vhost_work *work) | |
298 | { | |
299 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | |
300 | poll.work); | |
301 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, | |
302 | dev); | |
303 | struct virtio_vsock_pkt *pkt; | |
304 | int head; | |
305 | unsigned int out, in; | |
306 | bool added = false; | |
307 | ||
308 | mutex_lock(&vq->mutex); | |
309 | ||
310 | if (!vq->private_data) | |
311 | goto out; | |
312 | ||
313 | vhost_disable_notify(&vsock->dev, vq); | |
314 | for (;;) { | |
3fda5d6e SH |
315 | u32 len; |
316 | ||
433fc58e AH |
317 | if (!vhost_vsock_more_replies(vsock)) { |
318 | /* Stop tx until the device processes already | |
319 | * pending replies. Leave tx virtqueue | |
320 | * callbacks disabled. | |
321 | */ | |
322 | goto no_more_replies; | |
323 | } | |
324 | ||
325 | head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), | |
326 | &out, &in, NULL, NULL); | |
327 | if (head < 0) | |
328 | break; | |
329 | ||
330 | if (head == vq->num) { | |
331 | if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { | |
332 | vhost_disable_notify(&vsock->dev, vq); | |
333 | continue; | |
334 | } | |
335 | break; | |
336 | } | |
337 | ||
338 | pkt = vhost_vsock_alloc_pkt(vq, out, in); | |
339 | if (!pkt) { | |
340 | vq_err(vq, "Faulted on pkt\n"); | |
341 | continue; | |
342 | } | |
343 | ||
3fda5d6e SH |
344 | len = pkt->len; |
345 | ||
433fc58e AH |
346 | /* Only accept correctly addressed packets */ |
347 | if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid) | |
348 | virtio_transport_recv_pkt(pkt); | |
349 | else | |
350 | virtio_transport_free_pkt(pkt); | |
351 | ||
3fda5d6e | 352 | vhost_add_used(vq, head, sizeof(pkt->hdr) + len); |
433fc58e AH |
353 | added = true; |
354 | } | |
355 | ||
356 | no_more_replies: | |
357 | if (added) | |
358 | vhost_signal(&vsock->dev, vq); | |
359 | ||
360 | out: | |
361 | mutex_unlock(&vq->mutex); | |
362 | } | |
363 | ||
364 | static void vhost_vsock_handle_rx_kick(struct vhost_work *work) | |
365 | { | |
366 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | |
367 | poll.work); | |
368 | struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, | |
369 | dev); | |
370 | ||
371 | vhost_transport_do_send_pkt(vsock, vq); | |
372 | } | |
373 | ||
374 | static int vhost_vsock_start(struct vhost_vsock *vsock) | |
375 | { | |
376 | size_t i; | |
377 | int ret; | |
378 | ||
379 | mutex_lock(&vsock->dev.mutex); | |
380 | ||
381 | ret = vhost_dev_check_owner(&vsock->dev); | |
382 | if (ret) | |
383 | goto err; | |
384 | ||
385 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
386 | struct vhost_virtqueue *vq = &vsock->vqs[i]; | |
387 | ||
388 | mutex_lock(&vq->mutex); | |
389 | ||
390 | if (!vhost_vq_access_ok(vq)) { | |
391 | ret = -EFAULT; | |
392 | mutex_unlock(&vq->mutex); | |
393 | goto err_vq; | |
394 | } | |
395 | ||
396 | if (!vq->private_data) { | |
397 | vq->private_data = vsock; | |
398 | vhost_vq_init_access(vq); | |
399 | } | |
400 | ||
401 | mutex_unlock(&vq->mutex); | |
402 | } | |
403 | ||
404 | mutex_unlock(&vsock->dev.mutex); | |
405 | return 0; | |
406 | ||
407 | err_vq: | |
408 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
409 | struct vhost_virtqueue *vq = &vsock->vqs[i]; | |
410 | ||
411 | mutex_lock(&vq->mutex); | |
412 | vq->private_data = NULL; | |
413 | mutex_unlock(&vq->mutex); | |
414 | } | |
415 | err: | |
416 | mutex_unlock(&vsock->dev.mutex); | |
417 | return ret; | |
418 | } | |
419 | ||
420 | static int vhost_vsock_stop(struct vhost_vsock *vsock) | |
421 | { | |
422 | size_t i; | |
423 | int ret; | |
424 | ||
425 | mutex_lock(&vsock->dev.mutex); | |
426 | ||
427 | ret = vhost_dev_check_owner(&vsock->dev); | |
428 | if (ret) | |
429 | goto err; | |
430 | ||
431 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
432 | struct vhost_virtqueue *vq = &vsock->vqs[i]; | |
433 | ||
434 | mutex_lock(&vq->mutex); | |
435 | vq->private_data = NULL; | |
436 | mutex_unlock(&vq->mutex); | |
437 | } | |
438 | ||
439 | err: | |
440 | mutex_unlock(&vsock->dev.mutex); | |
441 | return ret; | |
442 | } | |
443 | ||
444 | static void vhost_vsock_free(struct vhost_vsock *vsock) | |
445 | { | |
b226acab | 446 | kvfree(vsock); |
433fc58e AH |
447 | } |
448 | ||
449 | static int vhost_vsock_dev_open(struct inode *inode, struct file *file) | |
450 | { | |
451 | struct vhost_virtqueue **vqs; | |
452 | struct vhost_vsock *vsock; | |
453 | int ret; | |
454 | ||
455 | /* This struct is large and allocation could fail, fall back to vmalloc | |
456 | * if there is no other way. | |
457 | */ | |
458 | vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); | |
459 | if (!vsock) { | |
460 | vsock = vmalloc(sizeof(*vsock)); | |
461 | if (!vsock) | |
462 | return -ENOMEM; | |
463 | } | |
464 | ||
465 | vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); | |
466 | if (!vqs) { | |
467 | ret = -ENOMEM; | |
468 | goto out; | |
469 | } | |
470 | ||
471 | atomic_set(&vsock->queued_replies, 0); | |
472 | ||
473 | vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; | |
474 | vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; | |
475 | vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; | |
476 | vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; | |
477 | ||
478 | vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); | |
479 | ||
480 | file->private_data = vsock; | |
481 | spin_lock_init(&vsock->send_pkt_list_lock); | |
482 | INIT_LIST_HEAD(&vsock->send_pkt_list); | |
483 | vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); | |
484 | ||
485 | spin_lock_bh(&vhost_vsock_lock); | |
486 | list_add_tail(&vsock->list, &vhost_vsock_list); | |
487 | spin_unlock_bh(&vhost_vsock_lock); | |
488 | return 0; | |
489 | ||
490 | out: | |
491 | vhost_vsock_free(vsock); | |
492 | return ret; | |
493 | } | |
494 | ||
495 | static void vhost_vsock_flush(struct vhost_vsock *vsock) | |
496 | { | |
497 | int i; | |
498 | ||
499 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) | |
500 | if (vsock->vqs[i].handle_kick) | |
501 | vhost_poll_flush(&vsock->vqs[i].poll); | |
502 | vhost_work_flush(&vsock->dev, &vsock->send_pkt_work); | |
503 | } | |
504 | ||
505 | static void vhost_vsock_reset_orphans(struct sock *sk) | |
506 | { | |
507 | struct vsock_sock *vsk = vsock_sk(sk); | |
508 | ||
509 | /* vmci_transport.c doesn't take sk_lock here either. At least we're | |
510 | * under vsock_table_lock so the sock cannot disappear while we're | |
511 | * executing. | |
512 | */ | |
513 | ||
c4587631 | 514 | if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { |
433fc58e AH |
515 | sock_set_flag(sk, SOCK_DONE); |
516 | vsk->peer_shutdown = SHUTDOWN_MASK; | |
517 | sk->sk_state = SS_UNCONNECTED; | |
518 | sk->sk_err = ECONNRESET; | |
519 | sk->sk_error_report(sk); | |
520 | } | |
521 | } | |
522 | ||
523 | static int vhost_vsock_dev_release(struct inode *inode, struct file *file) | |
524 | { | |
525 | struct vhost_vsock *vsock = file->private_data; | |
526 | ||
527 | spin_lock_bh(&vhost_vsock_lock); | |
528 | list_del(&vsock->list); | |
529 | spin_unlock_bh(&vhost_vsock_lock); | |
530 | ||
531 | /* Iterating over all connections for all CIDs to find orphans is | |
532 | * inefficient. Room for improvement here. */ | |
533 | vsock_for_each_connected_socket(vhost_vsock_reset_orphans); | |
534 | ||
535 | vhost_vsock_stop(vsock); | |
536 | vhost_vsock_flush(vsock); | |
537 | vhost_dev_stop(&vsock->dev); | |
538 | ||
539 | spin_lock_bh(&vsock->send_pkt_list_lock); | |
540 | while (!list_empty(&vsock->send_pkt_list)) { | |
541 | struct virtio_vsock_pkt *pkt; | |
542 | ||
543 | pkt = list_first_entry(&vsock->send_pkt_list, | |
544 | struct virtio_vsock_pkt, list); | |
545 | list_del_init(&pkt->list); | |
546 | virtio_transport_free_pkt(pkt); | |
547 | } | |
548 | spin_unlock_bh(&vsock->send_pkt_list_lock); | |
549 | ||
550 | vhost_dev_cleanup(&vsock->dev, false); | |
551 | kfree(vsock->dev.vqs); | |
552 | vhost_vsock_free(vsock); | |
553 | return 0; | |
554 | } | |
555 | ||
556 | static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) | |
557 | { | |
558 | struct vhost_vsock *other; | |
559 | ||
560 | /* Refuse reserved CIDs */ | |
561 | if (guest_cid <= VMADDR_CID_HOST || | |
562 | guest_cid == U32_MAX) | |
563 | return -EINVAL; | |
564 | ||
565 | /* 64-bit CIDs are not yet supported */ | |
566 | if (guest_cid > U32_MAX) | |
567 | return -EINVAL; | |
568 | ||
569 | /* Refuse if CID is already in use */ | |
433fc58e | 570 | spin_lock_bh(&vhost_vsock_lock); |
6c083c2b G |
571 | other = __vhost_vsock_get(guest_cid); |
572 | if (other && other != vsock) { | |
573 | spin_unlock_bh(&vhost_vsock_lock); | |
574 | return -EADDRINUSE; | |
575 | } | |
433fc58e AH |
576 | vsock->guest_cid = guest_cid; |
577 | spin_unlock_bh(&vhost_vsock_lock); | |
578 | ||
579 | return 0; | |
580 | } | |
581 | ||
582 | static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) | |
583 | { | |
584 | struct vhost_virtqueue *vq; | |
585 | int i; | |
586 | ||
587 | if (features & ~VHOST_VSOCK_FEATURES) | |
588 | return -EOPNOTSUPP; | |
589 | ||
590 | mutex_lock(&vsock->dev.mutex); | |
591 | if ((features & (1 << VHOST_F_LOG_ALL)) && | |
592 | !vhost_log_access_ok(&vsock->dev)) { | |
593 | mutex_unlock(&vsock->dev.mutex); | |
594 | return -EFAULT; | |
595 | } | |
596 | ||
597 | for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { | |
598 | vq = &vsock->vqs[i]; | |
599 | mutex_lock(&vq->mutex); | |
600 | vq->acked_features = features; | |
601 | mutex_unlock(&vq->mutex); | |
602 | } | |
603 | mutex_unlock(&vsock->dev.mutex); | |
604 | return 0; | |
605 | } | |
606 | ||
607 | static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, | |
608 | unsigned long arg) | |
609 | { | |
610 | struct vhost_vsock *vsock = f->private_data; | |
611 | void __user *argp = (void __user *)arg; | |
612 | u64 guest_cid; | |
613 | u64 features; | |
614 | int start; | |
615 | int r; | |
616 | ||
617 | switch (ioctl) { | |
618 | case VHOST_VSOCK_SET_GUEST_CID: | |
619 | if (copy_from_user(&guest_cid, argp, sizeof(guest_cid))) | |
620 | return -EFAULT; | |
621 | return vhost_vsock_set_cid(vsock, guest_cid); | |
622 | case VHOST_VSOCK_SET_RUNNING: | |
623 | if (copy_from_user(&start, argp, sizeof(start))) | |
624 | return -EFAULT; | |
625 | if (start) | |
626 | return vhost_vsock_start(vsock); | |
627 | else | |
628 | return vhost_vsock_stop(vsock); | |
629 | case VHOST_GET_FEATURES: | |
630 | features = VHOST_VSOCK_FEATURES; | |
631 | if (copy_to_user(argp, &features, sizeof(features))) | |
632 | return -EFAULT; | |
633 | return 0; | |
634 | case VHOST_SET_FEATURES: | |
635 | if (copy_from_user(&features, argp, sizeof(features))) | |
636 | return -EFAULT; | |
637 | return vhost_vsock_set_features(vsock, features); | |
638 | default: | |
639 | mutex_lock(&vsock->dev.mutex); | |
640 | r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); | |
641 | if (r == -ENOIOCTLCMD) | |
642 | r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); | |
643 | else | |
644 | vhost_vsock_flush(vsock); | |
645 | mutex_unlock(&vsock->dev.mutex); | |
646 | return r; | |
647 | } | |
648 | } | |
649 | ||
650 | static const struct file_operations vhost_vsock_fops = { | |
651 | .owner = THIS_MODULE, | |
652 | .open = vhost_vsock_dev_open, | |
653 | .release = vhost_vsock_dev_release, | |
654 | .llseek = noop_llseek, | |
655 | .unlocked_ioctl = vhost_vsock_dev_ioctl, | |
656 | }; | |
657 | ||
658 | static struct miscdevice vhost_vsock_misc = { | |
659 | .minor = MISC_DYNAMIC_MINOR, | |
660 | .name = "vhost-vsock", | |
661 | .fops = &vhost_vsock_fops, | |
662 | }; | |
663 | ||
664 | static struct virtio_transport vhost_transport = { | |
665 | .transport = { | |
666 | .get_local_cid = vhost_transport_get_local_cid, | |
667 | ||
668 | .init = virtio_transport_do_socket_init, | |
669 | .destruct = virtio_transport_destruct, | |
670 | .release = virtio_transport_release, | |
671 | .connect = virtio_transport_connect, | |
672 | .shutdown = virtio_transport_shutdown, | |
673 | ||
674 | .dgram_enqueue = virtio_transport_dgram_enqueue, | |
675 | .dgram_dequeue = virtio_transport_dgram_dequeue, | |
676 | .dgram_bind = virtio_transport_dgram_bind, | |
677 | .dgram_allow = virtio_transport_dgram_allow, | |
678 | ||
679 | .stream_enqueue = virtio_transport_stream_enqueue, | |
680 | .stream_dequeue = virtio_transport_stream_dequeue, | |
681 | .stream_has_data = virtio_transport_stream_has_data, | |
682 | .stream_has_space = virtio_transport_stream_has_space, | |
683 | .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, | |
684 | .stream_is_active = virtio_transport_stream_is_active, | |
685 | .stream_allow = virtio_transport_stream_allow, | |
686 | ||
687 | .notify_poll_in = virtio_transport_notify_poll_in, | |
688 | .notify_poll_out = virtio_transport_notify_poll_out, | |
689 | .notify_recv_init = virtio_transport_notify_recv_init, | |
690 | .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, | |
691 | .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, | |
692 | .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, | |
693 | .notify_send_init = virtio_transport_notify_send_init, | |
694 | .notify_send_pre_block = virtio_transport_notify_send_pre_block, | |
695 | .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, | |
696 | .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, | |
697 | ||
698 | .set_buffer_size = virtio_transport_set_buffer_size, | |
699 | .set_min_buffer_size = virtio_transport_set_min_buffer_size, | |
700 | .set_max_buffer_size = virtio_transport_set_max_buffer_size, | |
701 | .get_buffer_size = virtio_transport_get_buffer_size, | |
702 | .get_min_buffer_size = virtio_transport_get_min_buffer_size, | |
703 | .get_max_buffer_size = virtio_transport_get_max_buffer_size, | |
704 | }, | |
705 | ||
706 | .send_pkt = vhost_transport_send_pkt, | |
707 | }; | |
708 | ||
709 | static int __init vhost_vsock_init(void) | |
710 | { | |
711 | int ret; | |
712 | ||
713 | ret = vsock_core_init(&vhost_transport.transport); | |
714 | if (ret < 0) | |
715 | return ret; | |
716 | return misc_register(&vhost_vsock_misc); | |
717 | }; | |
718 | ||
719 | static void __exit vhost_vsock_exit(void) | |
720 | { | |
721 | misc_deregister(&vhost_vsock_misc); | |
722 | vsock_core_exit(); | |
723 | }; | |
724 | ||
725 | module_init(vhost_vsock_init); | |
726 | module_exit(vhost_vsock_exit); | |
727 | MODULE_LICENSE("GPL v2"); | |
728 | MODULE_AUTHOR("Asias He"); | |
729 | MODULE_DESCRIPTION("vhost transport for vsock "); |