1 // SPDX-License-Identifier: GPL-2.0-only
3 * VMware vSockets Driver
5 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
8 /* Implementation notes:
10 * - There are two kinds of sockets: those created by user action (such as
11 * calling socket(2)) and those created by incoming connection request packets.
13 * - There are two "global" tables, one for bound sockets (sockets that have
14 * specified an address that they are responsible for) and one for connected
15 * sockets (sockets that have established a connection with another socket).
16 * These tables are "global" in that all sockets on the system are placed
17 * within them. - Note, though, that the bound table contains an extra entry
18 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19 * that list. The bound table is used solely for lookup of sockets when packets
20 * are received and that's not necessary for SOCK_DGRAM sockets since we create
21 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
22 * sockets out of the bound hash buckets will reduce the chance of collisions
23 * when looking for SOCK_STREAM sockets and prevents us from having to check the
24 * socket type in the hash table lookups.
26 * - Sockets created by user action will either be "client" sockets that
27 * initiate a connection or "server" sockets that listen for connections; we do
28 * not support simultaneous connects (two "client" sockets connecting).
30 * - "Server" sockets are referred to as listener sockets throughout this
31 * implementation because they are in the TCP_LISTEN state. When a
32 * connection request is received (the second kind of socket mentioned above),
33 * we create a new socket and refer to it as a pending socket. These pending
34 * sockets are placed on the pending connection list of the listener socket.
35 * When future packets are received for the address the listener socket is
36 * bound to, we check if the source of the packet is from one that has an
37 * existing pending connection. If it does, we process the packet for the
38 * pending socket. When that socket reaches the connected state, it is removed
39 * from the listener socket's pending list and enqueued in the listener
40 * socket's accept queue. Callers of accept(2) will accept connected sockets
41 * from the listener socket's accept queue. If the socket cannot be accepted
42 * for some reason then it is marked rejected. Once the connection is
43 * accepted, it is owned by the user process and the responsibility for cleanup
44 * falls with that user process.
46 * - It is possible that these pending sockets will never reach the connected
47 * state; in fact, we may never receive another packet after the connection
48 * request. Because of this, we must schedule a cleanup function to run in the
49 * future, after some amount of time passes where a connection should have been
50 * established. This function ensures that the socket is off all lists so it
51 * cannot be retrieved, then drops all references to the socket so it is cleaned
52 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
53 * function will also cleanup rejected sockets, those that reach the connected
54 * state but leave it before they have been accepted.
56 * - Lock ordering for pending or accept queue sockets is:
58 * lock_sock(listener);
59 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
61 * Using explicit nested locking keeps lockdep happy since normally only one
62 * lock of a given class may be taken at a time.
64 * - Sockets created by user action will be cleaned up when the user process
65 * calls close(2), causing our release implementation to be called. Our release
66 * implementation will perform some cleanup then drop the last reference so our
67 * sk_destruct implementation is invoked. Our sk_destruct implementation will
68 * perform additional cleanup that's common for both types of sockets.
70 * - A socket's reference count is what ensures that the structure won't be
71 * freed. Each entry in a list (such as the "global" bound and connected tables
72 * and the listener socket's pending list and connected queue) ensures a
73 * reference. When we defer work until process context and pass a socket as our
74 * argument, we must ensure the reference count is increased to ensure the
75 * socket isn't freed before the function is run; the deferred function will
76 * then drop the reference.
78 * - sk->sk_state uses the TCP state constants because they are widely used by
79 * other address families and exposed to userspace tools like ss(8):
81 * TCP_CLOSE - unconnected
82 * TCP_SYN_SENT - connecting
83 * TCP_ESTABLISHED - connected
84 * TCP_CLOSING - disconnecting
85 * TCP_LISTEN - listening
88 #include <linux/types.h>
89 #include <linux/bitops.h>
90 #include <linux/cred.h>
91 #include <linux/init.h>
93 #include <linux/kernel.h>
94 #include <linux/sched/signal.h>
95 #include <linux/kmod.h>
96 #include <linux/list.h>
97 #include <linux/miscdevice.h>
98 #include <linux/module.h>
99 #include <linux/mutex.h>
100 #include <linux/net.h>
101 #include <linux/poll.h>
102 #include <linux/random.h>
103 #include <linux/skbuff.h>
104 #include <linux/smp.h>
105 #include <linux/socket.h>
106 #include <linux/stddef.h>
107 #include <linux/unistd.h>
108 #include <linux/wait.h>
109 #include <linux/workqueue.h>
110 #include <net/sock.h>
111 #include <net/af_vsock.h>
113 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
);
114 static void vsock_sk_destruct(struct sock
*sk
);
115 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
);
117 /* Protocol family. */
118 static struct proto vsock_proto
= {
120 .owner
= THIS_MODULE
,
121 .obj_size
= sizeof(struct vsock_sock
),
124 /* The default peer timeout indicates how long we will wait for a peer response
125 * to a control message.
127 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
129 #define VSOCK_DEFAULT_BUFFER_SIZE (1024 * 256)
130 #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
131 #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
133 /* Transport used for host->guest communication */
134 static const struct vsock_transport
*transport_h2g
;
135 /* Transport used for guest->host communication */
136 static const struct vsock_transport
*transport_g2h
;
137 /* Transport used for DGRAM communication */
138 static const struct vsock_transport
*transport_dgram
;
139 /* Transport used for local communication */
140 static const struct vsock_transport
*transport_local
;
141 static DEFINE_MUTEX(vsock_register_mutex
);
145 /* Each bound VSocket is stored in the bind hash table and each connected
146 * VSocket is stored in the connected hash table.
148 * Unbound sockets are all put on the same list attached to the end of the hash
149 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
150 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
151 * represents the list that addr hashes to).
153 * Specifically, we initialize the vsock_bind_table array to a size of
154 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
155 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
156 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
157 * mods with VSOCK_HASH_SIZE to ensure this.
159 #define MAX_PORT_RETRIES 24
161 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
162 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
163 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
165 /* XXX This can probably be implemented in a better way. */
166 #define VSOCK_CONN_HASH(src, dst) \
167 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
168 #define vsock_connected_sockets(src, dst) \
169 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
170 #define vsock_connected_sockets_vsk(vsk) \
171 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
173 struct list_head vsock_bind_table
[VSOCK_HASH_SIZE
+ 1];
174 EXPORT_SYMBOL_GPL(vsock_bind_table
);
175 struct list_head vsock_connected_table
[VSOCK_HASH_SIZE
];
176 EXPORT_SYMBOL_GPL(vsock_connected_table
);
177 DEFINE_SPINLOCK(vsock_table_lock
);
178 EXPORT_SYMBOL_GPL(vsock_table_lock
);
180 /* Autobind this socket to the local address if necessary. */
181 static int vsock_auto_bind(struct vsock_sock
*vsk
)
183 struct sock
*sk
= sk_vsock(vsk
);
184 struct sockaddr_vm local_addr
;
186 if (vsock_addr_bound(&vsk
->local_addr
))
188 vsock_addr_init(&local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
189 return __vsock_bind(sk
, &local_addr
);
192 static void vsock_init_tables(void)
196 for (i
= 0; i
< ARRAY_SIZE(vsock_bind_table
); i
++)
197 INIT_LIST_HEAD(&vsock_bind_table
[i
]);
199 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++)
200 INIT_LIST_HEAD(&vsock_connected_table
[i
]);
203 static void __vsock_insert_bound(struct list_head
*list
,
204 struct vsock_sock
*vsk
)
207 list_add(&vsk
->bound_table
, list
);
210 static void __vsock_insert_connected(struct list_head
*list
,
211 struct vsock_sock
*vsk
)
214 list_add(&vsk
->connected_table
, list
);
217 static void __vsock_remove_bound(struct vsock_sock
*vsk
)
219 list_del_init(&vsk
->bound_table
);
223 static void __vsock_remove_connected(struct vsock_sock
*vsk
)
225 list_del_init(&vsk
->connected_table
);
229 static struct sock
*__vsock_find_bound_socket(struct sockaddr_vm
*addr
)
231 struct vsock_sock
*vsk
;
233 list_for_each_entry(vsk
, vsock_bound_sockets(addr
), bound_table
) {
234 if (vsock_addr_equals_addr(addr
, &vsk
->local_addr
))
235 return sk_vsock(vsk
);
237 if (addr
->svm_port
== vsk
->local_addr
.svm_port
&&
238 (vsk
->local_addr
.svm_cid
== VMADDR_CID_ANY
||
239 addr
->svm_cid
== VMADDR_CID_ANY
))
240 return sk_vsock(vsk
);
246 static struct sock
*__vsock_find_connected_socket(struct sockaddr_vm
*src
,
247 struct sockaddr_vm
*dst
)
249 struct vsock_sock
*vsk
;
251 list_for_each_entry(vsk
, vsock_connected_sockets(src
, dst
),
253 if (vsock_addr_equals_addr(src
, &vsk
->remote_addr
) &&
254 dst
->svm_port
== vsk
->local_addr
.svm_port
) {
255 return sk_vsock(vsk
);
262 static void vsock_insert_unbound(struct vsock_sock
*vsk
)
264 spin_lock_bh(&vsock_table_lock
);
265 __vsock_insert_bound(vsock_unbound_sockets
, vsk
);
266 spin_unlock_bh(&vsock_table_lock
);
269 void vsock_insert_connected(struct vsock_sock
*vsk
)
271 struct list_head
*list
= vsock_connected_sockets(
272 &vsk
->remote_addr
, &vsk
->local_addr
);
274 spin_lock_bh(&vsock_table_lock
);
275 __vsock_insert_connected(list
, vsk
);
276 spin_unlock_bh(&vsock_table_lock
);
278 EXPORT_SYMBOL_GPL(vsock_insert_connected
);
280 void vsock_remove_bound(struct vsock_sock
*vsk
)
282 spin_lock_bh(&vsock_table_lock
);
283 if (__vsock_in_bound_table(vsk
))
284 __vsock_remove_bound(vsk
);
285 spin_unlock_bh(&vsock_table_lock
);
287 EXPORT_SYMBOL_GPL(vsock_remove_bound
);
289 void vsock_remove_connected(struct vsock_sock
*vsk
)
291 spin_lock_bh(&vsock_table_lock
);
292 if (__vsock_in_connected_table(vsk
))
293 __vsock_remove_connected(vsk
);
294 spin_unlock_bh(&vsock_table_lock
);
296 EXPORT_SYMBOL_GPL(vsock_remove_connected
);
298 struct sock
*vsock_find_bound_socket(struct sockaddr_vm
*addr
)
302 spin_lock_bh(&vsock_table_lock
);
303 sk
= __vsock_find_bound_socket(addr
);
307 spin_unlock_bh(&vsock_table_lock
);
311 EXPORT_SYMBOL_GPL(vsock_find_bound_socket
);
313 struct sock
*vsock_find_connected_socket(struct sockaddr_vm
*src
,
314 struct sockaddr_vm
*dst
)
318 spin_lock_bh(&vsock_table_lock
);
319 sk
= __vsock_find_connected_socket(src
, dst
);
323 spin_unlock_bh(&vsock_table_lock
);
327 EXPORT_SYMBOL_GPL(vsock_find_connected_socket
);
329 void vsock_remove_sock(struct vsock_sock
*vsk
)
331 vsock_remove_bound(vsk
);
332 vsock_remove_connected(vsk
);
334 EXPORT_SYMBOL_GPL(vsock_remove_sock
);
336 void vsock_for_each_connected_socket(struct vsock_transport
*transport
,
337 void (*fn
)(struct sock
*sk
))
341 spin_lock_bh(&vsock_table_lock
);
343 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++) {
344 struct vsock_sock
*vsk
;
345 list_for_each_entry(vsk
, &vsock_connected_table
[i
],
347 if (vsk
->transport
!= transport
)
354 spin_unlock_bh(&vsock_table_lock
);
356 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket
);
358 void vsock_add_pending(struct sock
*listener
, struct sock
*pending
)
360 struct vsock_sock
*vlistener
;
361 struct vsock_sock
*vpending
;
363 vlistener
= vsock_sk(listener
);
364 vpending
= vsock_sk(pending
);
368 list_add_tail(&vpending
->pending_links
, &vlistener
->pending_links
);
370 EXPORT_SYMBOL_GPL(vsock_add_pending
);
372 void vsock_remove_pending(struct sock
*listener
, struct sock
*pending
)
374 struct vsock_sock
*vpending
= vsock_sk(pending
);
376 list_del_init(&vpending
->pending_links
);
380 EXPORT_SYMBOL_GPL(vsock_remove_pending
);
382 void vsock_enqueue_accept(struct sock
*listener
, struct sock
*connected
)
384 struct vsock_sock
*vlistener
;
385 struct vsock_sock
*vconnected
;
387 vlistener
= vsock_sk(listener
);
388 vconnected
= vsock_sk(connected
);
390 sock_hold(connected
);
392 list_add_tail(&vconnected
->accept_queue
, &vlistener
->accept_queue
);
394 EXPORT_SYMBOL_GPL(vsock_enqueue_accept
);
396 static bool vsock_use_local_transport(unsigned int remote_cid
)
398 if (!transport_local
)
401 if (remote_cid
== VMADDR_CID_LOCAL
)
405 return remote_cid
== transport_g2h
->get_local_cid();
407 return remote_cid
== VMADDR_CID_HOST
;
411 static void vsock_deassign_transport(struct vsock_sock
*vsk
)
416 vsk
->transport
->destruct(vsk
);
417 module_put(vsk
->transport
->module
);
418 vsk
->transport
= NULL
;
421 /* Assign a transport to a socket and call the .init transport callback.
423 * Note: for connection oriented socket this must be called when vsk->remote_addr
424 * is set (e.g. during the connect() or when a connection request on a listener
425 * socket is received).
426 * The vsk->remote_addr is used to decide which transport to use:
427 * - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
428 * g2h is not loaded, will use local transport;
429 * - remote CID <= VMADDR_CID_HOST or h2g is not loaded or remote flags field
430 * includes VMADDR_FLAG_TO_HOST flag value, will use guest->host transport;
431 * - remote CID > VMADDR_CID_HOST will use host->guest transport;
433 int vsock_assign_transport(struct vsock_sock
*vsk
, struct vsock_sock
*psk
)
435 const struct vsock_transport
*new_transport
;
436 struct sock
*sk
= sk_vsock(vsk
);
437 unsigned int remote_cid
= vsk
->remote_addr
.svm_cid
;
441 /* If the packet is coming with the source and destination CIDs higher
442 * than VMADDR_CID_HOST, then a vsock channel where all the packets are
443 * forwarded to the host should be established. Then the host will
444 * need to forward the packets to the guest.
446 * The flag is set on the (listen) receive path (psk is not NULL). On
447 * the connect path the flag can be set by the user space application.
449 if (psk
&& vsk
->local_addr
.svm_cid
> VMADDR_CID_HOST
&&
450 vsk
->remote_addr
.svm_cid
> VMADDR_CID_HOST
)
451 vsk
->remote_addr
.svm_flags
|= VMADDR_FLAG_TO_HOST
;
453 remote_flags
= vsk
->remote_addr
.svm_flags
;
455 switch (sk
->sk_type
) {
457 new_transport
= transport_dgram
;
461 if (vsock_use_local_transport(remote_cid
))
462 new_transport
= transport_local
;
463 else if (remote_cid
<= VMADDR_CID_HOST
|| !transport_h2g
||
464 (remote_flags
& VMADDR_FLAG_TO_HOST
))
465 new_transport
= transport_g2h
;
467 new_transport
= transport_h2g
;
470 return -ESOCKTNOSUPPORT
;
473 if (vsk
->transport
) {
474 if (vsk
->transport
== new_transport
)
477 /* transport->release() must be called with sock lock acquired.
478 * This path can only be taken during vsock_connect(), where we
479 * have already held the sock lock. In the other cases, this
480 * function is called on a new socket which is not assigned to
483 vsk
->transport
->release(vsk
);
484 vsock_deassign_transport(vsk
);
487 /* We increase the module refcnt to prevent the transport unloading
488 * while there are open sockets assigned to it.
490 if (!new_transport
|| !try_module_get(new_transport
->module
))
493 if (sk
->sk_type
== SOCK_SEQPACKET
) {
494 if (!new_transport
->seqpacket_allow
||
495 !new_transport
->seqpacket_allow(remote_cid
)) {
496 module_put(new_transport
->module
);
497 return -ESOCKTNOSUPPORT
;
501 ret
= new_transport
->init(vsk
, psk
);
503 module_put(new_transport
->module
);
507 vsk
->transport
= new_transport
;
511 EXPORT_SYMBOL_GPL(vsock_assign_transport
);
513 bool vsock_find_cid(unsigned int cid
)
515 if (transport_g2h
&& cid
== transport_g2h
->get_local_cid())
518 if (transport_h2g
&& cid
== VMADDR_CID_HOST
)
521 if (transport_local
&& cid
== VMADDR_CID_LOCAL
)
526 EXPORT_SYMBOL_GPL(vsock_find_cid
);
528 static struct sock
*vsock_dequeue_accept(struct sock
*listener
)
530 struct vsock_sock
*vlistener
;
531 struct vsock_sock
*vconnected
;
533 vlistener
= vsock_sk(listener
);
535 if (list_empty(&vlistener
->accept_queue
))
538 vconnected
= list_entry(vlistener
->accept_queue
.next
,
539 struct vsock_sock
, accept_queue
);
541 list_del_init(&vconnected
->accept_queue
);
543 /* The caller will need a reference on the connected socket so we let
544 * it call sock_put().
547 return sk_vsock(vconnected
);
550 static bool vsock_is_accept_queue_empty(struct sock
*sk
)
552 struct vsock_sock
*vsk
= vsock_sk(sk
);
553 return list_empty(&vsk
->accept_queue
);
556 static bool vsock_is_pending(struct sock
*sk
)
558 struct vsock_sock
*vsk
= vsock_sk(sk
);
559 return !list_empty(&vsk
->pending_links
);
562 static int vsock_send_shutdown(struct sock
*sk
, int mode
)
564 struct vsock_sock
*vsk
= vsock_sk(sk
);
569 return vsk
->transport
->shutdown(vsk
, mode
);
572 static void vsock_pending_work(struct work_struct
*work
)
575 struct sock
*listener
;
576 struct vsock_sock
*vsk
;
579 vsk
= container_of(work
, struct vsock_sock
, pending_work
.work
);
581 listener
= vsk
->listener
;
585 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
587 if (vsock_is_pending(sk
)) {
588 vsock_remove_pending(listener
, sk
);
590 sk_acceptq_removed(listener
);
591 } else if (!vsk
->rejected
) {
592 /* We are not on the pending list and accept() did not reject
593 * us, so we must have been accepted by our user process. We
594 * just need to drop our references to the sockets and be on
601 /* We need to remove ourself from the global connected sockets list so
602 * incoming packets can't find this socket, and to reduce the reference
605 vsock_remove_connected(vsk
);
607 sk
->sk_state
= TCP_CLOSE
;
611 release_sock(listener
);
619 /**** SOCKET OPERATIONS ****/
621 static int __vsock_bind_connectible(struct vsock_sock
*vsk
,
622 struct sockaddr_vm
*addr
)
625 struct sockaddr_vm new_addr
;
628 port
= LAST_RESERVED_PORT
+ 1 +
629 prandom_u32_max(U32_MAX
- LAST_RESERVED_PORT
);
631 vsock_addr_init(&new_addr
, addr
->svm_cid
, addr
->svm_port
);
633 if (addr
->svm_port
== VMADDR_PORT_ANY
) {
637 for (i
= 0; i
< MAX_PORT_RETRIES
; i
++) {
638 if (port
<= LAST_RESERVED_PORT
)
639 port
= LAST_RESERVED_PORT
+ 1;
641 new_addr
.svm_port
= port
++;
643 if (!__vsock_find_bound_socket(&new_addr
)) {
650 return -EADDRNOTAVAIL
;
652 /* If port is in reserved range, ensure caller
653 * has necessary privileges.
655 if (addr
->svm_port
<= LAST_RESERVED_PORT
&&
656 !capable(CAP_NET_BIND_SERVICE
)) {
660 if (__vsock_find_bound_socket(&new_addr
))
664 vsock_addr_init(&vsk
->local_addr
, new_addr
.svm_cid
, new_addr
.svm_port
);
666 /* Remove connection oriented sockets from the unbound list and add them
667 * to the hash table for easy lookup by its address. The unbound list
668 * is simply an extra entry at the end of the hash table, a trick used
671 __vsock_remove_bound(vsk
);
672 __vsock_insert_bound(vsock_bound_sockets(&vsk
->local_addr
), vsk
);
677 static int __vsock_bind_dgram(struct vsock_sock
*vsk
,
678 struct sockaddr_vm
*addr
)
680 return vsk
->transport
->dgram_bind(vsk
, addr
);
683 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
)
685 struct vsock_sock
*vsk
= vsock_sk(sk
);
688 /* First ensure this socket isn't already bound. */
689 if (vsock_addr_bound(&vsk
->local_addr
))
692 /* Now bind to the provided address or select appropriate values if
693 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
694 * like AF_INET prevents binding to a non-local IP address (in most
695 * cases), we only allow binding to a local CID.
697 if (addr
->svm_cid
!= VMADDR_CID_ANY
&& !vsock_find_cid(addr
->svm_cid
))
698 return -EADDRNOTAVAIL
;
700 switch (sk
->sk_socket
->type
) {
703 spin_lock_bh(&vsock_table_lock
);
704 retval
= __vsock_bind_connectible(vsk
, addr
);
705 spin_unlock_bh(&vsock_table_lock
);
709 retval
= __vsock_bind_dgram(vsk
, addr
);
720 static void vsock_connect_timeout(struct work_struct
*work
);
722 static struct sock
*__vsock_create(struct net
*net
,
730 struct vsock_sock
*psk
;
731 struct vsock_sock
*vsk
;
733 sk
= sk_alloc(net
, AF_VSOCK
, priority
, &vsock_proto
, kern
);
737 sock_init_data(sock
, sk
);
739 /* sk->sk_type is normally set in sock_init_data, but only if sock is
740 * non-NULL. We make sure that our sockets always have a type by
741 * setting it here if needed.
747 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
748 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
750 sk
->sk_destruct
= vsock_sk_destruct
;
751 sk
->sk_backlog_rcv
= vsock_queue_rcv_skb
;
752 sock_reset_flag(sk
, SOCK_DONE
);
754 INIT_LIST_HEAD(&vsk
->bound_table
);
755 INIT_LIST_HEAD(&vsk
->connected_table
);
756 vsk
->listener
= NULL
;
757 INIT_LIST_HEAD(&vsk
->pending_links
);
758 INIT_LIST_HEAD(&vsk
->accept_queue
);
759 vsk
->rejected
= false;
760 vsk
->sent_request
= false;
761 vsk
->ignore_connecting_rst
= false;
762 vsk
->peer_shutdown
= 0;
763 INIT_DELAYED_WORK(&vsk
->connect_work
, vsock_connect_timeout
);
764 INIT_DELAYED_WORK(&vsk
->pending_work
, vsock_pending_work
);
766 psk
= parent
? vsock_sk(parent
) : NULL
;
768 vsk
->trusted
= psk
->trusted
;
769 vsk
->owner
= get_cred(psk
->owner
);
770 vsk
->connect_timeout
= psk
->connect_timeout
;
771 vsk
->buffer_size
= psk
->buffer_size
;
772 vsk
->buffer_min_size
= psk
->buffer_min_size
;
773 vsk
->buffer_max_size
= psk
->buffer_max_size
;
774 security_sk_clone(parent
, sk
);
776 vsk
->trusted
= ns_capable_noaudit(&init_user_ns
, CAP_NET_ADMIN
);
777 vsk
->owner
= get_current_cred();
778 vsk
->connect_timeout
= VSOCK_DEFAULT_CONNECT_TIMEOUT
;
779 vsk
->buffer_size
= VSOCK_DEFAULT_BUFFER_SIZE
;
780 vsk
->buffer_min_size
= VSOCK_DEFAULT_BUFFER_MIN_SIZE
;
781 vsk
->buffer_max_size
= VSOCK_DEFAULT_BUFFER_MAX_SIZE
;
787 static bool sock_type_connectible(u16 type
)
789 return (type
== SOCK_STREAM
) || (type
== SOCK_SEQPACKET
);
792 static void __vsock_release(struct sock
*sk
, int level
)
795 struct sock
*pending
;
796 struct vsock_sock
*vsk
;
799 pending
= NULL
; /* Compiler warning. */
801 /* When "level" is SINGLE_DEPTH_NESTING, use the nested
802 * version to avoid the warning "possible recursive locking
803 * detected". When "level" is 0, lock_sock_nested(sk, level)
804 * is the same as lock_sock(sk).
806 lock_sock_nested(sk
, level
);
809 vsk
->transport
->release(vsk
);
810 else if (sock_type_connectible(sk
->sk_type
))
811 vsock_remove_sock(vsk
);
814 sk
->sk_shutdown
= SHUTDOWN_MASK
;
816 skb_queue_purge(&sk
->sk_receive_queue
);
818 /* Clean up any sockets that never were accepted. */
819 while ((pending
= vsock_dequeue_accept(sk
)) != NULL
) {
820 __vsock_release(pending
, SINGLE_DEPTH_NESTING
);
829 static void vsock_sk_destruct(struct sock
*sk
)
831 struct vsock_sock
*vsk
= vsock_sk(sk
);
833 vsock_deassign_transport(vsk
);
835 /* When clearing these addresses, there's no need to set the family and
836 * possibly register the address family with the kernel.
838 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
839 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
841 put_cred(vsk
->owner
);
844 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
848 err
= sock_queue_rcv_skb(sk
, skb
);
855 struct sock
*vsock_create_connected(struct sock
*parent
)
857 return __vsock_create(sock_net(parent
), NULL
, parent
, GFP_KERNEL
,
860 EXPORT_SYMBOL_GPL(vsock_create_connected
);
862 s64
vsock_stream_has_data(struct vsock_sock
*vsk
)
864 return vsk
->transport
->stream_has_data(vsk
);
866 EXPORT_SYMBOL_GPL(vsock_stream_has_data
);
868 static s64
vsock_connectible_has_data(struct vsock_sock
*vsk
)
870 struct sock
*sk
= sk_vsock(vsk
);
872 if (sk
->sk_type
== SOCK_SEQPACKET
)
873 return vsk
->transport
->seqpacket_has_data(vsk
);
875 return vsock_stream_has_data(vsk
);
878 s64
vsock_stream_has_space(struct vsock_sock
*vsk
)
880 return vsk
->transport
->stream_has_space(vsk
);
882 EXPORT_SYMBOL_GPL(vsock_stream_has_space
);
884 static int vsock_release(struct socket
*sock
)
886 __vsock_release(sock
->sk
, 0);
888 sock
->state
= SS_FREE
;
894 vsock_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
898 struct sockaddr_vm
*vm_addr
;
902 if (vsock_addr_cast(addr
, addr_len
, &vm_addr
) != 0)
906 err
= __vsock_bind(sk
, vm_addr
);
912 static int vsock_getname(struct socket
*sock
,
913 struct sockaddr
*addr
, int peer
)
917 struct vsock_sock
*vsk
;
918 struct sockaddr_vm
*vm_addr
;
927 if (sock
->state
!= SS_CONNECTED
) {
931 vm_addr
= &vsk
->remote_addr
;
933 vm_addr
= &vsk
->local_addr
;
941 /* sys_getsockname() and sys_getpeername() pass us a
942 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
943 * that macro is defined in socket.c instead of .h, so we hardcode its
946 BUILD_BUG_ON(sizeof(*vm_addr
) > 128);
947 memcpy(addr
, vm_addr
, sizeof(*vm_addr
));
948 err
= sizeof(*vm_addr
);
955 static int vsock_shutdown(struct socket
*sock
, int mode
)
960 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
961 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
962 * here like the other address families do. Note also that the
963 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
964 * which is what we want.
968 if ((mode
& ~SHUTDOWN_MASK
) || !mode
)
971 /* If this is a connection oriented socket and it is not connected then
972 * bail out immediately. If it is a DGRAM socket then we must first
973 * kick the socket so that it wakes up from any sleeping calls, for
974 * example recv(), and then afterwards return the error.
980 if (sock
->state
== SS_UNCONNECTED
) {
982 if (sock_type_connectible(sk
->sk_type
))
985 sock
->state
= SS_DISCONNECTING
;
989 /* Receive and send shutdowns are treated alike. */
990 mode
= mode
& (RCV_SHUTDOWN
| SEND_SHUTDOWN
);
992 sk
->sk_shutdown
|= mode
;
993 sk
->sk_state_change(sk
);
995 if (sock_type_connectible(sk
->sk_type
)) {
996 sock_reset_flag(sk
, SOCK_DONE
);
997 vsock_send_shutdown(sk
, mode
);
1006 static __poll_t
vsock_poll(struct file
*file
, struct socket
*sock
,
1011 struct vsock_sock
*vsk
;
1016 poll_wait(file
, sk_sleep(sk
), wait
);
1020 /* Signify that there has been an error on this socket. */
1023 /* INET sockets treat local write shutdown and peer write shutdown as a
1024 * case of EPOLLHUP set.
1026 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
1027 ((sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
1028 (vsk
->peer_shutdown
& SEND_SHUTDOWN
))) {
1032 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
1033 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
1037 if (sock
->type
== SOCK_DGRAM
) {
1038 /* For datagram sockets we can read if there is something in
1039 * the queue and write as long as the socket isn't shutdown for
1042 if (!skb_queue_empty_lockless(&sk
->sk_receive_queue
) ||
1043 (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1044 mask
|= EPOLLIN
| EPOLLRDNORM
;
1047 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
1048 mask
|= EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
;
1050 } else if (sock_type_connectible(sk
->sk_type
)) {
1051 const struct vsock_transport
*transport
;
1055 transport
= vsk
->transport
;
1057 /* Listening sockets that have connections in their accept
1058 * queue can be read.
1060 if (sk
->sk_state
== TCP_LISTEN
1061 && !vsock_is_accept_queue_empty(sk
))
1062 mask
|= EPOLLIN
| EPOLLRDNORM
;
1064 /* If there is something in the queue then we can read. */
1065 if (transport
&& transport
->stream_is_active(vsk
) &&
1066 !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1067 bool data_ready_now
= false;
1068 int ret
= transport
->notify_poll_in(
1069 vsk
, 1, &data_ready_now
);
1074 mask
|= EPOLLIN
| EPOLLRDNORM
;
1079 /* Sockets whose connections have been closed, reset, or
1080 * terminated should also be considered read, and we check the
1081 * shutdown flag for that.
1083 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
1084 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
1085 mask
|= EPOLLIN
| EPOLLRDNORM
;
1088 /* Connected sockets that can produce data can be written. */
1089 if (transport
&& sk
->sk_state
== TCP_ESTABLISHED
) {
1090 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
1091 bool space_avail_now
= false;
1092 int ret
= transport
->notify_poll_out(
1093 vsk
, 1, &space_avail_now
);
1097 if (space_avail_now
)
1098 /* Remove EPOLLWRBAND since INET
1099 * sockets are not setting it.
1101 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1107 /* Simulate INET socket poll behaviors, which sets
1108 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
1109 * but local send is not shutdown.
1111 if (sk
->sk_state
== TCP_CLOSE
|| sk
->sk_state
== TCP_CLOSING
) {
1112 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
1113 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1123 static int vsock_dgram_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1128 struct vsock_sock
*vsk
;
1129 struct sockaddr_vm
*remote_addr
;
1130 const struct vsock_transport
*transport
;
1132 if (msg
->msg_flags
& MSG_OOB
)
1135 /* For now, MSG_DONTWAIT is always assumed... */
1142 transport
= vsk
->transport
;
1144 err
= vsock_auto_bind(vsk
);
1149 /* If the provided message contains an address, use that. Otherwise
1150 * fall back on the socket's remote handle (if it has been connected).
1152 if (msg
->msg_name
&&
1153 vsock_addr_cast(msg
->msg_name
, msg
->msg_namelen
,
1154 &remote_addr
) == 0) {
1155 /* Ensure this address is of the right type and is a valid
1159 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
1160 remote_addr
->svm_cid
= transport
->get_local_cid();
1162 if (!vsock_addr_bound(remote_addr
)) {
1166 } else if (sock
->state
== SS_CONNECTED
) {
1167 remote_addr
= &vsk
->remote_addr
;
1169 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
1170 remote_addr
->svm_cid
= transport
->get_local_cid();
1172 /* XXX Should connect() or this function ensure remote_addr is
1175 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1184 if (!transport
->dgram_allow(remote_addr
->svm_cid
,
1185 remote_addr
->svm_port
)) {
1190 err
= transport
->dgram_enqueue(vsk
, remote_addr
, msg
, len
);
1197 static int vsock_dgram_connect(struct socket
*sock
,
1198 struct sockaddr
*addr
, int addr_len
, int flags
)
1202 struct vsock_sock
*vsk
;
1203 struct sockaddr_vm
*remote_addr
;
1208 err
= vsock_addr_cast(addr
, addr_len
, &remote_addr
);
1209 if (err
== -EAFNOSUPPORT
&& remote_addr
->svm_family
== AF_UNSPEC
) {
1211 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
,
1213 sock
->state
= SS_UNCONNECTED
;
1216 } else if (err
!= 0)
1221 err
= vsock_auto_bind(vsk
);
1225 if (!vsk
->transport
->dgram_allow(remote_addr
->svm_cid
,
1226 remote_addr
->svm_port
)) {
1231 memcpy(&vsk
->remote_addr
, remote_addr
, sizeof(vsk
->remote_addr
));
1232 sock
->state
= SS_CONNECTED
;
1239 static int vsock_dgram_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1240 size_t len
, int flags
)
1242 struct vsock_sock
*vsk
= vsock_sk(sock
->sk
);
1244 return vsk
->transport
->dgram_dequeue(vsk
, msg
, len
, flags
);
1247 static const struct proto_ops vsock_dgram_ops
= {
1249 .owner
= THIS_MODULE
,
1250 .release
= vsock_release
,
1252 .connect
= vsock_dgram_connect
,
1253 .socketpair
= sock_no_socketpair
,
1254 .accept
= sock_no_accept
,
1255 .getname
= vsock_getname
,
1257 .ioctl
= sock_no_ioctl
,
1258 .listen
= sock_no_listen
,
1259 .shutdown
= vsock_shutdown
,
1260 .sendmsg
= vsock_dgram_sendmsg
,
1261 .recvmsg
= vsock_dgram_recvmsg
,
1262 .mmap
= sock_no_mmap
,
1263 .sendpage
= sock_no_sendpage
,
1266 static int vsock_transport_cancel_pkt(struct vsock_sock
*vsk
)
1268 const struct vsock_transport
*transport
= vsk
->transport
;
1270 if (!transport
|| !transport
->cancel_pkt
)
1273 return transport
->cancel_pkt(vsk
);
1276 static void vsock_connect_timeout(struct work_struct
*work
)
1279 struct vsock_sock
*vsk
;
1281 vsk
= container_of(work
, struct vsock_sock
, connect_work
.work
);
1285 if (sk
->sk_state
== TCP_SYN_SENT
&&
1286 (sk
->sk_shutdown
!= SHUTDOWN_MASK
)) {
1287 sk
->sk_state
= TCP_CLOSE
;
1288 sk
->sk_socket
->state
= SS_UNCONNECTED
;
1289 sk
->sk_err
= ETIMEDOUT
;
1290 sk_error_report(sk
);
1291 vsock_transport_cancel_pkt(vsk
);
1298 static int vsock_connect(struct socket
*sock
, struct sockaddr
*addr
,
1299 int addr_len
, int flags
)
1303 struct vsock_sock
*vsk
;
1304 const struct vsock_transport
*transport
;
1305 struct sockaddr_vm
*remote_addr
;
1315 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1316 switch (sock
->state
) {
1320 case SS_DISCONNECTING
:
1324 /* This continues on so we can move sock into the SS_CONNECTED
1325 * state once the connection has completed (at which point err
1326 * will be set to zero also). Otherwise, we will either wait
1327 * for the connection or return -EALREADY should this be a
1328 * non-blocking call.
1331 if (flags
& O_NONBLOCK
)
1335 if ((sk
->sk_state
== TCP_LISTEN
) ||
1336 vsock_addr_cast(addr
, addr_len
, &remote_addr
) != 0) {
1341 /* Set the remote address that we are connecting to. */
1342 memcpy(&vsk
->remote_addr
, remote_addr
,
1343 sizeof(vsk
->remote_addr
));
1345 err
= vsock_assign_transport(vsk
, NULL
);
1349 transport
= vsk
->transport
;
1351 /* The hypervisor and well-known contexts do not have socket
1355 !transport
->stream_allow(remote_addr
->svm_cid
,
1356 remote_addr
->svm_port
)) {
1361 err
= vsock_auto_bind(vsk
);
1365 sk
->sk_state
= TCP_SYN_SENT
;
1367 err
= transport
->connect(vsk
);
1371 /* Mark sock as connecting and set the error code to in
1372 * progress in case this is a non-blocking connect.
1374 sock
->state
= SS_CONNECTING
;
1378 /* The receive path will handle all communication until we are able to
1379 * enter the connected state. Here we wait for the connection to be
1380 * completed or a notification of an error.
1382 timeout
= vsk
->connect_timeout
;
1383 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1385 while (sk
->sk_state
!= TCP_ESTABLISHED
&& sk
->sk_err
== 0) {
1386 if (flags
& O_NONBLOCK
) {
1387 /* If we're not going to block, we schedule a timeout
1388 * function to generate a timeout on the connection
1389 * attempt, in case the peer doesn't respond in a
1390 * timely manner. We hold on to the socket until the
1395 /* If the timeout function is already scheduled,
1396 * reschedule it, then ungrab the socket refcount to
1399 if (mod_delayed_work(system_wq
, &vsk
->connect_work
,
1403 /* Skip ahead to preserve error code set above. */
1408 timeout
= schedule_timeout(timeout
);
1411 if (signal_pending(current
)) {
1412 err
= sock_intr_errno(timeout
);
1413 sk
->sk_state
= sk
->sk_state
== TCP_ESTABLISHED
? TCP_CLOSING
: TCP_CLOSE
;
1414 sock
->state
= SS_UNCONNECTED
;
1415 vsock_transport_cancel_pkt(vsk
);
1416 vsock_remove_connected(vsk
);
1418 } else if (timeout
== 0) {
1420 sk
->sk_state
= TCP_CLOSE
;
1421 sock
->state
= SS_UNCONNECTED
;
1422 vsock_transport_cancel_pkt(vsk
);
1426 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1431 sk
->sk_state
= TCP_CLOSE
;
1432 sock
->state
= SS_UNCONNECTED
;
1438 finish_wait(sk_sleep(sk
), &wait
);
1444 static int vsock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
,
1447 struct sock
*listener
;
1449 struct sock
*connected
;
1450 struct vsock_sock
*vconnected
;
1455 listener
= sock
->sk
;
1457 lock_sock(listener
);
1459 if (!sock_type_connectible(sock
->type
)) {
1464 if (listener
->sk_state
!= TCP_LISTEN
) {
1469 /* Wait for children sockets to appear; these are the new sockets
1470 * created upon connection establishment.
1472 timeout
= sock_rcvtimeo(listener
, flags
& O_NONBLOCK
);
1473 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1475 while ((connected
= vsock_dequeue_accept(listener
)) == NULL
&&
1476 listener
->sk_err
== 0) {
1477 release_sock(listener
);
1478 timeout
= schedule_timeout(timeout
);
1479 finish_wait(sk_sleep(listener
), &wait
);
1480 lock_sock(listener
);
1482 if (signal_pending(current
)) {
1483 err
= sock_intr_errno(timeout
);
1485 } else if (timeout
== 0) {
1490 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1492 finish_wait(sk_sleep(listener
), &wait
);
1494 if (listener
->sk_err
)
1495 err
= -listener
->sk_err
;
1498 sk_acceptq_removed(listener
);
1500 lock_sock_nested(connected
, SINGLE_DEPTH_NESTING
);
1501 vconnected
= vsock_sk(connected
);
1503 /* If the listener socket has received an error, then we should
1504 * reject this socket and return. Note that we simply mark the
1505 * socket rejected, drop our reference, and let the cleanup
1506 * function handle the cleanup; the fact that we found it in
1507 * the listener's accept queue guarantees that the cleanup
1508 * function hasn't run yet.
1511 vconnected
->rejected
= true;
1513 newsock
->state
= SS_CONNECTED
;
1514 sock_graft(connected
, newsock
);
1517 release_sock(connected
);
1518 sock_put(connected
);
1522 release_sock(listener
);
1526 static int vsock_listen(struct socket
*sock
, int backlog
)
1530 struct vsock_sock
*vsk
;
1536 if (!sock_type_connectible(sk
->sk_type
)) {
1541 if (sock
->state
!= SS_UNCONNECTED
) {
1548 if (!vsock_addr_bound(&vsk
->local_addr
)) {
1553 sk
->sk_max_ack_backlog
= backlog
;
1554 sk
->sk_state
= TCP_LISTEN
;
1563 static void vsock_update_buffer_size(struct vsock_sock
*vsk
,
1564 const struct vsock_transport
*transport
,
1567 if (val
> vsk
->buffer_max_size
)
1568 val
= vsk
->buffer_max_size
;
1570 if (val
< vsk
->buffer_min_size
)
1571 val
= vsk
->buffer_min_size
;
1573 if (val
!= vsk
->buffer_size
&&
1574 transport
&& transport
->notify_buffer_size
)
1575 transport
->notify_buffer_size(vsk
, &val
);
1577 vsk
->buffer_size
= val
;
1580 static int vsock_connectible_setsockopt(struct socket
*sock
,
1584 unsigned int optlen
)
1588 struct vsock_sock
*vsk
;
1589 const struct vsock_transport
*transport
;
1592 if (level
!= AF_VSOCK
)
1593 return -ENOPROTOOPT
;
1595 #define COPY_IN(_v) \
1597 if (optlen < sizeof(_v)) { \
1601 if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \
1613 transport
= vsk
->transport
;
1616 case SO_VM_SOCKETS_BUFFER_SIZE
:
1618 vsock_update_buffer_size(vsk
, transport
, val
);
1621 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1623 vsk
->buffer_max_size
= val
;
1624 vsock_update_buffer_size(vsk
, transport
, vsk
->buffer_size
);
1627 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1629 vsk
->buffer_min_size
= val
;
1630 vsock_update_buffer_size(vsk
, transport
, vsk
->buffer_size
);
1633 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1634 struct __kernel_old_timeval tv
;
1636 if (tv
.tv_sec
>= 0 && tv
.tv_usec
< USEC_PER_SEC
&&
1637 tv
.tv_sec
< (MAX_SCHEDULE_TIMEOUT
/ HZ
- 1)) {
1638 vsk
->connect_timeout
= tv
.tv_sec
* HZ
+
1639 DIV_ROUND_UP(tv
.tv_usec
, (1000000 / HZ
));
1640 if (vsk
->connect_timeout
== 0)
1641 vsk
->connect_timeout
=
1642 VSOCK_DEFAULT_CONNECT_TIMEOUT
;
1662 static int vsock_connectible_getsockopt(struct socket
*sock
,
1663 int level
, int optname
,
1664 char __user
*optval
,
1670 struct vsock_sock
*vsk
;
1673 if (level
!= AF_VSOCK
)
1674 return -ENOPROTOOPT
;
1676 err
= get_user(len
, optlen
);
1680 #define COPY_OUT(_v) \
1682 if (len < sizeof(_v)) \
1686 if (copy_to_user(optval, &_v, len) != 0) \
1696 case SO_VM_SOCKETS_BUFFER_SIZE
:
1697 val
= vsk
->buffer_size
;
1701 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1702 val
= vsk
->buffer_max_size
;
1706 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1707 val
= vsk
->buffer_min_size
;
1711 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1712 struct __kernel_old_timeval tv
;
1713 tv
.tv_sec
= vsk
->connect_timeout
/ HZ
;
1715 (vsk
->connect_timeout
-
1716 tv
.tv_sec
* HZ
) * (1000000 / HZ
);
1721 return -ENOPROTOOPT
;
1724 err
= put_user(len
, optlen
);
1733 static int vsock_connectible_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1737 struct vsock_sock
*vsk
;
1738 const struct vsock_transport
*transport
;
1739 ssize_t total_written
;
1742 struct vsock_transport_send_notify_data send_data
;
1743 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1750 if (msg
->msg_flags
& MSG_OOB
)
1755 transport
= vsk
->transport
;
1757 /* Callers should not provide a destination with connection oriented
1760 if (msg
->msg_namelen
) {
1761 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1765 /* Send data only if both sides are not shutdown in the direction. */
1766 if (sk
->sk_shutdown
& SEND_SHUTDOWN
||
1767 vsk
->peer_shutdown
& RCV_SHUTDOWN
) {
1772 if (!transport
|| sk
->sk_state
!= TCP_ESTABLISHED
||
1773 !vsock_addr_bound(&vsk
->local_addr
)) {
1778 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1779 err
= -EDESTADDRREQ
;
1783 /* Wait for room in the produce queue to enqueue our user's data. */
1784 timeout
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1786 err
= transport
->notify_send_init(vsk
, &send_data
);
1790 while (total_written
< len
) {
1793 add_wait_queue(sk_sleep(sk
), &wait
);
1794 while (vsock_stream_has_space(vsk
) == 0 &&
1796 !(sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
1797 !(vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1799 /* Don't wait for non-blocking sockets. */
1802 remove_wait_queue(sk_sleep(sk
), &wait
);
1806 err
= transport
->notify_send_pre_block(vsk
, &send_data
);
1808 remove_wait_queue(sk_sleep(sk
), &wait
);
1813 timeout
= wait_woken(&wait
, TASK_INTERRUPTIBLE
, timeout
);
1815 if (signal_pending(current
)) {
1816 err
= sock_intr_errno(timeout
);
1817 remove_wait_queue(sk_sleep(sk
), &wait
);
1819 } else if (timeout
== 0) {
1821 remove_wait_queue(sk_sleep(sk
), &wait
);
1825 remove_wait_queue(sk_sleep(sk
), &wait
);
1827 /* These checks occur both as part of and after the loop
1828 * conditional since we need to check before and after
1834 } else if ((sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
1835 (vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1840 err
= transport
->notify_send_pre_enqueue(vsk
, &send_data
);
1844 /* Note that enqueue will only write as many bytes as are free
1845 * in the produce queue, so we don't need to ensure len is
1846 * smaller than the queue size. It is the caller's
1847 * responsibility to check how many bytes we were able to send.
1850 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1851 written
= transport
->seqpacket_enqueue(vsk
,
1852 msg
, len
- total_written
);
1854 written
= transport
->stream_enqueue(vsk
,
1855 msg
, len
- total_written
);
1862 total_written
+= written
;
1864 err
= transport
->notify_send_post_enqueue(
1865 vsk
, written
, &send_data
);
1872 if (total_written
> 0) {
1873 /* Return number of written bytes only if:
1874 * 1) SOCK_STREAM socket.
1875 * 2) SOCK_SEQPACKET socket when whole buffer is sent.
1877 if (sk
->sk_type
== SOCK_STREAM
|| total_written
== len
)
1878 err
= total_written
;
1885 static int vsock_connectible_wait_data(struct sock
*sk
,
1886 struct wait_queue_entry
*wait
,
1888 struct vsock_transport_recv_notify_data
*recv_data
,
1891 const struct vsock_transport
*transport
;
1892 struct vsock_sock
*vsk
;
1898 transport
= vsk
->transport
;
1900 while ((data
= vsock_connectible_has_data(vsk
)) == 0) {
1901 prepare_to_wait(sk_sleep(sk
), wait
, TASK_INTERRUPTIBLE
);
1903 if (sk
->sk_err
!= 0 ||
1904 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1905 (vsk
->peer_shutdown
& SEND_SHUTDOWN
)) {
1909 /* Don't wait for non-blocking sockets. */
1916 err
= transport
->notify_recv_pre_block(vsk
, target
, recv_data
);
1922 timeout
= schedule_timeout(timeout
);
1925 if (signal_pending(current
)) {
1926 err
= sock_intr_errno(timeout
);
1928 } else if (timeout
== 0) {
1934 finish_wait(sk_sleep(sk
), wait
);
1939 /* Internal transport error when checking for available
1940 * data. XXX This should be changed to a connection
1941 * reset in a later change.
1949 static int __vsock_stream_recvmsg(struct sock
*sk
, struct msghdr
*msg
,
1950 size_t len
, int flags
)
1952 struct vsock_transport_recv_notify_data recv_data
;
1953 const struct vsock_transport
*transport
;
1954 struct vsock_sock
*vsk
;
1963 transport
= vsk
->transport
;
1965 /* We must not copy less than target bytes into the user's buffer
1966 * before returning successfully, so we wait for the consume queue to
1967 * have that much data to consume before dequeueing. Note that this
1968 * makes it impossible to handle cases where target is greater than the
1971 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1972 if (target
>= transport
->stream_rcvhiwat(vsk
)) {
1976 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1979 err
= transport
->notify_recv_init(vsk
, target
, &recv_data
);
1987 err
= vsock_connectible_wait_data(sk
, &wait
, timeout
,
1988 &recv_data
, target
);
1992 err
= transport
->notify_recv_pre_dequeue(vsk
, target
,
1997 read
= transport
->stream_dequeue(vsk
, msg
, len
- copied
, flags
);
2005 err
= transport
->notify_recv_post_dequeue(vsk
, target
, read
,
2006 !(flags
& MSG_PEEK
), &recv_data
);
2010 if (read
>= target
|| flags
& MSG_PEEK
)
2018 else if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2028 static int __vsock_seqpacket_recvmsg(struct sock
*sk
, struct msghdr
*msg
,
2029 size_t len
, int flags
)
2031 const struct vsock_transport
*transport
;
2032 struct vsock_sock
*vsk
;
2039 transport
= vsk
->transport
;
2041 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
2043 err
= vsock_connectible_wait_data(sk
, &wait
, timeout
, NULL
, 0);
2047 msg_len
= transport
->seqpacket_dequeue(vsk
, msg
, flags
);
2056 } else if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2059 /* User sets MSG_TRUNC, so return real length of
2062 if (flags
& MSG_TRUNC
)
2065 err
= len
- msg_data_left(msg
);
2067 /* Always set MSG_TRUNC if real length of packet is
2068 * bigger than user's buffer.
2071 msg
->msg_flags
|= MSG_TRUNC
;
2079 vsock_connectible_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
2083 struct vsock_sock
*vsk
;
2084 const struct vsock_transport
*transport
;
2095 transport
= vsk
->transport
;
2097 if (!transport
|| sk
->sk_state
!= TCP_ESTABLISHED
) {
2098 /* Recvmsg is supposed to return 0 if a peer performs an
2099 * orderly shutdown. Differentiate between that case and when a
2100 * peer has not connected or a local shutdown occurred with the
2103 if (sock_flag(sk
, SOCK_DONE
))
2111 if (flags
& MSG_OOB
) {
2116 /* We don't check peer_shutdown flag here since peer may actually shut
2117 * down, but there can be data in the queue that a local socket can
2120 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2125 /* It is valid on Linux to pass in a zero-length receive buffer. This
2126 * is not an error. We may as well bail out now.
2133 if (sk
->sk_type
== SOCK_STREAM
)
2134 err
= __vsock_stream_recvmsg(sk
, msg
, len
, flags
);
2136 err
= __vsock_seqpacket_recvmsg(sk
, msg
, len
, flags
);
2143 static const struct proto_ops vsock_stream_ops
= {
2145 .owner
= THIS_MODULE
,
2146 .release
= vsock_release
,
2148 .connect
= vsock_connect
,
2149 .socketpair
= sock_no_socketpair
,
2150 .accept
= vsock_accept
,
2151 .getname
= vsock_getname
,
2153 .ioctl
= sock_no_ioctl
,
2154 .listen
= vsock_listen
,
2155 .shutdown
= vsock_shutdown
,
2156 .setsockopt
= vsock_connectible_setsockopt
,
2157 .getsockopt
= vsock_connectible_getsockopt
,
2158 .sendmsg
= vsock_connectible_sendmsg
,
2159 .recvmsg
= vsock_connectible_recvmsg
,
2160 .mmap
= sock_no_mmap
,
2161 .sendpage
= sock_no_sendpage
,
2164 static const struct proto_ops vsock_seqpacket_ops
= {
2166 .owner
= THIS_MODULE
,
2167 .release
= vsock_release
,
2169 .connect
= vsock_connect
,
2170 .socketpair
= sock_no_socketpair
,
2171 .accept
= vsock_accept
,
2172 .getname
= vsock_getname
,
2174 .ioctl
= sock_no_ioctl
,
2175 .listen
= vsock_listen
,
2176 .shutdown
= vsock_shutdown
,
2177 .setsockopt
= vsock_connectible_setsockopt
,
2178 .getsockopt
= vsock_connectible_getsockopt
,
2179 .sendmsg
= vsock_connectible_sendmsg
,
2180 .recvmsg
= vsock_connectible_recvmsg
,
2181 .mmap
= sock_no_mmap
,
2182 .sendpage
= sock_no_sendpage
,
2185 static int vsock_create(struct net
*net
, struct socket
*sock
,
2186 int protocol
, int kern
)
2188 struct vsock_sock
*vsk
;
2195 if (protocol
&& protocol
!= PF_VSOCK
)
2196 return -EPROTONOSUPPORT
;
2198 switch (sock
->type
) {
2200 sock
->ops
= &vsock_dgram_ops
;
2203 sock
->ops
= &vsock_stream_ops
;
2205 case SOCK_SEQPACKET
:
2206 sock
->ops
= &vsock_seqpacket_ops
;
2209 return -ESOCKTNOSUPPORT
;
2212 sock
->state
= SS_UNCONNECTED
;
2214 sk
= __vsock_create(net
, sock
, NULL
, GFP_KERNEL
, 0, kern
);
2220 if (sock
->type
== SOCK_DGRAM
) {
2221 ret
= vsock_assign_transport(vsk
, NULL
);
2228 vsock_insert_unbound(vsk
);
2233 static const struct net_proto_family vsock_family_ops
= {
2235 .create
= vsock_create
,
2236 .owner
= THIS_MODULE
,
2239 static long vsock_dev_do_ioctl(struct file
*filp
,
2240 unsigned int cmd
, void __user
*ptr
)
2242 u32 __user
*p
= ptr
;
2243 u32 cid
= VMADDR_CID_ANY
;
2247 case IOCTL_VM_SOCKETS_GET_LOCAL_CID
:
2248 /* To be compatible with the VMCI behavior, we prioritize the
2249 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2252 cid
= transport_g2h
->get_local_cid();
2253 else if (transport_h2g
)
2254 cid
= transport_h2g
->get_local_cid();
2256 if (put_user(cid
, p
) != 0)
2261 retval
= -ENOIOCTLCMD
;
2267 static long vsock_dev_ioctl(struct file
*filp
,
2268 unsigned int cmd
, unsigned long arg
)
2270 return vsock_dev_do_ioctl(filp
, cmd
, (void __user
*)arg
);
2273 #ifdef CONFIG_COMPAT
2274 static long vsock_dev_compat_ioctl(struct file
*filp
,
2275 unsigned int cmd
, unsigned long arg
)
2277 return vsock_dev_do_ioctl(filp
, cmd
, compat_ptr(arg
));
2281 static const struct file_operations vsock_device_ops
= {
2282 .owner
= THIS_MODULE
,
2283 .unlocked_ioctl
= vsock_dev_ioctl
,
2284 #ifdef CONFIG_COMPAT
2285 .compat_ioctl
= vsock_dev_compat_ioctl
,
2287 .open
= nonseekable_open
,
2290 static struct miscdevice vsock_device
= {
2292 .fops
= &vsock_device_ops
,
2295 static int __init
vsock_init(void)
2299 vsock_init_tables();
2301 vsock_proto
.owner
= THIS_MODULE
;
2302 vsock_device
.minor
= MISC_DYNAMIC_MINOR
;
2303 err
= misc_register(&vsock_device
);
2305 pr_err("Failed to register misc device\n");
2306 goto err_reset_transport
;
2309 err
= proto_register(&vsock_proto
, 1); /* we want our slab */
2311 pr_err("Cannot register vsock protocol\n");
2312 goto err_deregister_misc
;
2315 err
= sock_register(&vsock_family_ops
);
2317 pr_err("could not register af_vsock (%d) address family: %d\n",
2319 goto err_unregister_proto
;
2324 err_unregister_proto
:
2325 proto_unregister(&vsock_proto
);
2326 err_deregister_misc
:
2327 misc_deregister(&vsock_device
);
2328 err_reset_transport
:
2332 static void __exit
vsock_exit(void)
2334 misc_deregister(&vsock_device
);
2335 sock_unregister(AF_VSOCK
);
2336 proto_unregister(&vsock_proto
);
2339 const struct vsock_transport
*vsock_core_get_transport(struct vsock_sock
*vsk
)
2341 return vsk
->transport
;
2343 EXPORT_SYMBOL_GPL(vsock_core_get_transport
);
2345 int vsock_core_register(const struct vsock_transport
*t
, int features
)
2347 const struct vsock_transport
*t_h2g
, *t_g2h
, *t_dgram
, *t_local
;
2348 int err
= mutex_lock_interruptible(&vsock_register_mutex
);
2353 t_h2g
= transport_h2g
;
2354 t_g2h
= transport_g2h
;
2355 t_dgram
= transport_dgram
;
2356 t_local
= transport_local
;
2358 if (features
& VSOCK_TRANSPORT_F_H2G
) {
2366 if (features
& VSOCK_TRANSPORT_F_G2H
) {
2374 if (features
& VSOCK_TRANSPORT_F_DGRAM
) {
2382 if (features
& VSOCK_TRANSPORT_F_LOCAL
) {
2390 transport_h2g
= t_h2g
;
2391 transport_g2h
= t_g2h
;
2392 transport_dgram
= t_dgram
;
2393 transport_local
= t_local
;
2396 mutex_unlock(&vsock_register_mutex
);
2399 EXPORT_SYMBOL_GPL(vsock_core_register
);
2401 void vsock_core_unregister(const struct vsock_transport
*t
)
2403 mutex_lock(&vsock_register_mutex
);
2405 if (transport_h2g
== t
)
2406 transport_h2g
= NULL
;
2408 if (transport_g2h
== t
)
2409 transport_g2h
= NULL
;
2411 if (transport_dgram
== t
)
2412 transport_dgram
= NULL
;
2414 if (transport_local
== t
)
2415 transport_local
= NULL
;
2417 mutex_unlock(&vsock_register_mutex
);
2419 EXPORT_SYMBOL_GPL(vsock_core_unregister
);
2421 module_init(vsock_init
);
2422 module_exit(vsock_exit
);
2424 MODULE_AUTHOR("VMware, Inc.");
2425 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2426 MODULE_VERSION("1.0.2.0-k");
2427 MODULE_LICENSE("GPL v2");