]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/tipc/socket.c
tipc: change socket buffer overflow control to respect sk_rcvbuf
[mirror_ubuntu-eoan-kernel.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012 Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2012, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "port.h"
39
40 #include <linux/export.h>
41 #include <net/sock.h>
42
43 #define SS_LISTENING -1 /* socket is listening */
44 #define SS_READY -2 /* socket is connectionless */
45
46 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
47
48 struct tipc_sock {
49 struct sock sk;
50 struct tipc_port *p;
51 struct tipc_portid peer_name;
52 unsigned int conn_timeout;
53 };
54
55 #define tipc_sk(sk) ((struct tipc_sock *)(sk))
56 #define tipc_sk_port(sk) (tipc_sk(sk)->p)
57
58 #define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
59 (sock->state == SS_DISCONNECTING))
60
61 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
62 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
63 static void wakeupdispatch(struct tipc_port *tport);
64 static void tipc_data_ready(struct sock *sk, int len);
65 static void tipc_write_space(struct sock *sk);
66
67 static const struct proto_ops packet_ops;
68 static const struct proto_ops stream_ops;
69 static const struct proto_ops msg_ops;
70
71 static struct proto tipc_proto;
72
73 static int sockets_enabled;
74
75 /*
76 * Revised TIPC socket locking policy:
77 *
78 * Most socket operations take the standard socket lock when they start
79 * and hold it until they finish (or until they need to sleep). Acquiring
80 * this lock grants the owner exclusive access to the fields of the socket
81 * data structures, with the exception of the backlog queue. A few socket
82 * operations can be done without taking the socket lock because they only
83 * read socket information that never changes during the life of the socket.
84 *
85 * Socket operations may acquire the lock for the associated TIPC port if they
86 * need to perform an operation on the port. If any routine needs to acquire
87 * both the socket lock and the port lock it must take the socket lock first
88 * to avoid the risk of deadlock.
89 *
90 * The dispatcher handling incoming messages cannot grab the socket lock in
91 * the standard fashion, since invoked it runs at the BH level and cannot block.
92 * Instead, it checks to see if the socket lock is currently owned by someone,
93 * and either handles the message itself or adds it to the socket's backlog
94 * queue; in the latter case the queued message is processed once the process
95 * owning the socket lock releases it.
96 *
97 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
98 * the problem of a blocked socket operation preventing any other operations
99 * from occurring. However, applications must be careful if they have
100 * multiple threads trying to send (or receive) on the same socket, as these
101 * operations might interfere with each other. For example, doing a connect
102 * and a receive at the same time might allow the receive to consume the
103 * ACK message meant for the connect. While additional work could be done
104 * to try and overcome this, it doesn't seem to be worthwhile at the present.
105 *
106 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
107 * that another operation that must be performed in a non-blocking manner is
108 * not delayed for very long because the lock has already been taken.
109 *
110 * NOTE: This code assumes that certain fields of a port/socket pair are
111 * constant over its lifetime; such fields can be examined without taking
112 * the socket lock and/or port lock, and do not need to be re-read even
113 * after resuming processing after waiting. These fields include:
114 * - socket type
115 * - pointer to socket sk structure (aka tipc_sock structure)
116 * - pointer to port structure
117 * - port reference
118 */
119
120 /**
121 * advance_rx_queue - discard first buffer in socket receive queue
122 *
123 * Caller must hold socket lock
124 */
125 static void advance_rx_queue(struct sock *sk)
126 {
127 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
128 }
129
130 /**
131 * reject_rx_queue - reject all buffers in socket receive queue
132 *
133 * Caller must hold socket lock
134 */
135 static void reject_rx_queue(struct sock *sk)
136 {
137 struct sk_buff *buf;
138
139 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
140 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
141 }
142
143 /**
144 * tipc_create - create a TIPC socket
145 * @net: network namespace (must be default network)
146 * @sock: pre-allocated socket structure
147 * @protocol: protocol indicator (must be 0)
148 * @kern: caused by kernel or by userspace?
149 *
150 * This routine creates additional data structures used by the TIPC socket,
151 * initializes them, and links them together.
152 *
153 * Returns 0 on success, errno otherwise
154 */
155 static int tipc_create(struct net *net, struct socket *sock, int protocol,
156 int kern)
157 {
158 const struct proto_ops *ops;
159 socket_state state;
160 struct sock *sk;
161 struct tipc_port *tp_ptr;
162
163 /* Validate arguments */
164 if (unlikely(protocol != 0))
165 return -EPROTONOSUPPORT;
166
167 switch (sock->type) {
168 case SOCK_STREAM:
169 ops = &stream_ops;
170 state = SS_UNCONNECTED;
171 break;
172 case SOCK_SEQPACKET:
173 ops = &packet_ops;
174 state = SS_UNCONNECTED;
175 break;
176 case SOCK_DGRAM:
177 case SOCK_RDM:
178 ops = &msg_ops;
179 state = SS_READY;
180 break;
181 default:
182 return -EPROTOTYPE;
183 }
184
185 /* Allocate socket's protocol area */
186 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
187 if (sk == NULL)
188 return -ENOMEM;
189
190 /* Allocate TIPC port for socket to use */
191 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
192 TIPC_LOW_IMPORTANCE);
193 if (unlikely(!tp_ptr)) {
194 sk_free(sk);
195 return -ENOMEM;
196 }
197
198 /* Finish initializing socket data structures */
199 sock->ops = ops;
200 sock->state = state;
201
202 sock_init_data(sock, sk);
203 sk->sk_backlog_rcv = backlog_rcv;
204 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
205 sk->sk_data_ready = tipc_data_ready;
206 sk->sk_write_space = tipc_write_space;
207 tipc_sk(sk)->p = tp_ptr;
208 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
209
210 spin_unlock_bh(tp_ptr->lock);
211
212 if (sock->state == SS_READY) {
213 tipc_set_portunreturnable(tp_ptr->ref, 1);
214 if (sock->type == SOCK_DGRAM)
215 tipc_set_portunreliable(tp_ptr->ref, 1);
216 }
217
218 return 0;
219 }
220
221 /**
222 * release - destroy a TIPC socket
223 * @sock: socket to destroy
224 *
225 * This routine cleans up any messages that are still queued on the socket.
226 * For DGRAM and RDM socket types, all queued messages are rejected.
227 * For SEQPACKET and STREAM socket types, the first message is rejected
228 * and any others are discarded. (If the first message on a STREAM socket
229 * is partially-read, it is discarded and the next one is rejected instead.)
230 *
231 * NOTE: Rejected messages are not necessarily returned to the sender! They
232 * are returned or discarded according to the "destination droppable" setting
233 * specified for the message by the sender.
234 *
235 * Returns 0 on success, errno otherwise
236 */
237 static int release(struct socket *sock)
238 {
239 struct sock *sk = sock->sk;
240 struct tipc_port *tport;
241 struct sk_buff *buf;
242 int res;
243
244 /*
245 * Exit if socket isn't fully initialized (occurs when a failed accept()
246 * releases a pre-allocated child socket that was never used)
247 */
248 if (sk == NULL)
249 return 0;
250
251 tport = tipc_sk_port(sk);
252 lock_sock(sk);
253
254 /*
255 * Reject all unreceived messages, except on an active connection
256 * (which disconnects locally & sends a 'FIN+' to peer)
257 */
258 while (sock->state != SS_DISCONNECTING) {
259 buf = __skb_dequeue(&sk->sk_receive_queue);
260 if (buf == NULL)
261 break;
262 if (TIPC_SKB_CB(buf)->handle != 0)
263 kfree_skb(buf);
264 else {
265 if ((sock->state == SS_CONNECTING) ||
266 (sock->state == SS_CONNECTED)) {
267 sock->state = SS_DISCONNECTING;
268 tipc_disconnect(tport->ref);
269 }
270 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
271 }
272 }
273
274 /*
275 * Delete TIPC port; this ensures no more messages are queued
276 * (also disconnects an active connection & sends a 'FIN-' to peer)
277 */
278 res = tipc_deleteport(tport->ref);
279
280 /* Discard any remaining (connection-based) messages in receive queue */
281 __skb_queue_purge(&sk->sk_receive_queue);
282
283 /* Reject any messages that accumulated in backlog queue */
284 sock->state = SS_DISCONNECTING;
285 release_sock(sk);
286
287 sock_put(sk);
288 sock->sk = NULL;
289
290 return res;
291 }
292
293 /**
294 * bind - associate or disassocate TIPC name(s) with a socket
295 * @sock: socket structure
296 * @uaddr: socket address describing name(s) and desired operation
297 * @uaddr_len: size of socket address data structure
298 *
299 * Name and name sequence binding is indicated using a positive scope value;
300 * a negative scope value unbinds the specified name. Specifying no name
301 * (i.e. a socket address length of 0) unbinds all names from the socket.
302 *
303 * Returns 0 on success, errno otherwise
304 *
305 * NOTE: This routine doesn't need to take the socket lock since it doesn't
306 * access any non-constant socket information.
307 */
308 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
309 {
310 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
311 u32 portref = tipc_sk_port(sock->sk)->ref;
312
313 if (unlikely(!uaddr_len))
314 return tipc_withdraw(portref, 0, NULL);
315
316 if (uaddr_len < sizeof(struct sockaddr_tipc))
317 return -EINVAL;
318 if (addr->family != AF_TIPC)
319 return -EAFNOSUPPORT;
320
321 if (addr->addrtype == TIPC_ADDR_NAME)
322 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
323 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
324 return -EAFNOSUPPORT;
325
326 if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES)
327 return -EACCES;
328
329 return (addr->scope > 0) ?
330 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
331 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
332 }
333
334 /**
335 * get_name - get port ID of socket or peer socket
336 * @sock: socket structure
337 * @uaddr: area for returned socket address
338 * @uaddr_len: area for returned length of socket address
339 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
340 *
341 * Returns 0 on success, errno otherwise
342 *
343 * NOTE: This routine doesn't need to take the socket lock since it only
344 * accesses socket information that is unchanging (or which changes in
345 * a completely predictable manner).
346 */
347 static int get_name(struct socket *sock, struct sockaddr *uaddr,
348 int *uaddr_len, int peer)
349 {
350 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
351 struct tipc_sock *tsock = tipc_sk(sock->sk);
352
353 memset(addr, 0, sizeof(*addr));
354 if (peer) {
355 if ((sock->state != SS_CONNECTED) &&
356 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
357 return -ENOTCONN;
358 addr->addr.id.ref = tsock->peer_name.ref;
359 addr->addr.id.node = tsock->peer_name.node;
360 } else {
361 addr->addr.id.ref = tsock->p->ref;
362 addr->addr.id.node = tipc_own_addr;
363 }
364
365 *uaddr_len = sizeof(*addr);
366 addr->addrtype = TIPC_ADDR_ID;
367 addr->family = AF_TIPC;
368 addr->scope = 0;
369 addr->addr.name.domain = 0;
370
371 return 0;
372 }
373
374 /**
375 * poll - read and possibly block on pollmask
376 * @file: file structure associated with the socket
377 * @sock: socket for which to calculate the poll bits
378 * @wait: ???
379 *
380 * Returns pollmask value
381 *
382 * COMMENTARY:
383 * It appears that the usual socket locking mechanisms are not useful here
384 * since the pollmask info is potentially out-of-date the moment this routine
385 * exits. TCP and other protocols seem to rely on higher level poll routines
386 * to handle any preventable race conditions, so TIPC will do the same ...
387 *
388 * TIPC sets the returned events as follows:
389 *
390 * socket state flags set
391 * ------------ ---------
392 * unconnected no read flags
393 * POLLOUT if port is not congested
394 *
395 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
396 * no write flags
397 *
398 * connected POLLIN/POLLRDNORM if data in rx queue
399 * POLLOUT if port is not congested
400 *
401 * disconnecting POLLIN/POLLRDNORM/POLLHUP
402 * no write flags
403 *
404 * listening POLLIN if SYN in rx queue
405 * no write flags
406 *
407 * ready POLLIN/POLLRDNORM if data in rx queue
408 * [connectionless] POLLOUT (since port cannot be congested)
409 *
410 * IMPORTANT: The fact that a read or write operation is indicated does NOT
411 * imply that the operation will succeed, merely that it should be performed
412 * and will not block.
413 */
414 static unsigned int poll(struct file *file, struct socket *sock,
415 poll_table *wait)
416 {
417 struct sock *sk = sock->sk;
418 u32 mask = 0;
419
420 sock_poll_wait(file, sk_sleep(sk), wait);
421
422 switch ((int)sock->state) {
423 case SS_UNCONNECTED:
424 if (!tipc_sk_port(sk)->congested)
425 mask |= POLLOUT;
426 break;
427 case SS_READY:
428 case SS_CONNECTED:
429 if (!tipc_sk_port(sk)->congested)
430 mask |= POLLOUT;
431 /* fall thru' */
432 case SS_CONNECTING:
433 case SS_LISTENING:
434 if (!skb_queue_empty(&sk->sk_receive_queue))
435 mask |= (POLLIN | POLLRDNORM);
436 break;
437 case SS_DISCONNECTING:
438 mask = (POLLIN | POLLRDNORM | POLLHUP);
439 break;
440 }
441
442 return mask;
443 }
444
445 /**
446 * dest_name_check - verify user is permitted to send to specified port name
447 * @dest: destination address
448 * @m: descriptor for message to be sent
449 *
450 * Prevents restricted configuration commands from being issued by
451 * unauthorized users.
452 *
453 * Returns 0 if permission is granted, otherwise errno
454 */
455 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
456 {
457 struct tipc_cfg_msg_hdr hdr;
458
459 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
460 return 0;
461 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
462 return 0;
463 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
464 return -EACCES;
465
466 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
467 return -EMSGSIZE;
468 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
469 return -EFAULT;
470 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
471 return -EACCES;
472
473 return 0;
474 }
475
476 /**
477 * send_msg - send message in connectionless manner
478 * @iocb: if NULL, indicates that socket lock is already held
479 * @sock: socket structure
480 * @m: message to send
481 * @total_len: length of message
482 *
483 * Message must have an destination specified explicitly.
484 * Used for SOCK_RDM and SOCK_DGRAM messages,
485 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
486 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
487 *
488 * Returns the number of bytes sent on success, or errno otherwise
489 */
490 static int send_msg(struct kiocb *iocb, struct socket *sock,
491 struct msghdr *m, size_t total_len)
492 {
493 struct sock *sk = sock->sk;
494 struct tipc_port *tport = tipc_sk_port(sk);
495 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
496 int needs_conn;
497 long timeout_val;
498 int res = -EINVAL;
499
500 if (unlikely(!dest))
501 return -EDESTADDRREQ;
502 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
503 (dest->family != AF_TIPC)))
504 return -EINVAL;
505 if (total_len > TIPC_MAX_USER_MSG_SIZE)
506 return -EMSGSIZE;
507
508 if (iocb)
509 lock_sock(sk);
510
511 needs_conn = (sock->state != SS_READY);
512 if (unlikely(needs_conn)) {
513 if (sock->state == SS_LISTENING) {
514 res = -EPIPE;
515 goto exit;
516 }
517 if (sock->state != SS_UNCONNECTED) {
518 res = -EISCONN;
519 goto exit;
520 }
521 if ((tport->published) ||
522 ((sock->type == SOCK_STREAM) && (total_len != 0))) {
523 res = -EOPNOTSUPP;
524 goto exit;
525 }
526 if (dest->addrtype == TIPC_ADDR_NAME) {
527 tport->conn_type = dest->addr.name.name.type;
528 tport->conn_instance = dest->addr.name.name.instance;
529 }
530
531 /* Abort any pending connection attempts (very unlikely) */
532 reject_rx_queue(sk);
533 }
534
535 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
536
537 do {
538 if (dest->addrtype == TIPC_ADDR_NAME) {
539 res = dest_name_check(dest, m);
540 if (res)
541 break;
542 res = tipc_send2name(tport->ref,
543 &dest->addr.name.name,
544 dest->addr.name.domain,
545 m->msg_iovlen,
546 m->msg_iov,
547 total_len);
548 } else if (dest->addrtype == TIPC_ADDR_ID) {
549 res = tipc_send2port(tport->ref,
550 &dest->addr.id,
551 m->msg_iovlen,
552 m->msg_iov,
553 total_len);
554 } else if (dest->addrtype == TIPC_ADDR_MCAST) {
555 if (needs_conn) {
556 res = -EOPNOTSUPP;
557 break;
558 }
559 res = dest_name_check(dest, m);
560 if (res)
561 break;
562 res = tipc_multicast(tport->ref,
563 &dest->addr.nameseq,
564 m->msg_iovlen,
565 m->msg_iov,
566 total_len);
567 }
568 if (likely(res != -ELINKCONG)) {
569 if (needs_conn && (res >= 0))
570 sock->state = SS_CONNECTING;
571 break;
572 }
573 if (timeout_val <= 0L) {
574 res = timeout_val ? timeout_val : -EWOULDBLOCK;
575 break;
576 }
577 release_sock(sk);
578 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
579 !tport->congested, timeout_val);
580 lock_sock(sk);
581 } while (1);
582
583 exit:
584 if (iocb)
585 release_sock(sk);
586 return res;
587 }
588
589 /**
590 * send_packet - send a connection-oriented message
591 * @iocb: if NULL, indicates that socket lock is already held
592 * @sock: socket structure
593 * @m: message to send
594 * @total_len: length of message
595 *
596 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
597 *
598 * Returns the number of bytes sent on success, or errno otherwise
599 */
600 static int send_packet(struct kiocb *iocb, struct socket *sock,
601 struct msghdr *m, size_t total_len)
602 {
603 struct sock *sk = sock->sk;
604 struct tipc_port *tport = tipc_sk_port(sk);
605 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
606 long timeout_val;
607 int res;
608
609 /* Handle implied connection establishment */
610 if (unlikely(dest))
611 return send_msg(iocb, sock, m, total_len);
612
613 if (total_len > TIPC_MAX_USER_MSG_SIZE)
614 return -EMSGSIZE;
615
616 if (iocb)
617 lock_sock(sk);
618
619 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
620
621 do {
622 if (unlikely(sock->state != SS_CONNECTED)) {
623 if (sock->state == SS_DISCONNECTING)
624 res = -EPIPE;
625 else
626 res = -ENOTCONN;
627 break;
628 }
629
630 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
631 total_len);
632 if (likely(res != -ELINKCONG))
633 break;
634 if (timeout_val <= 0L) {
635 res = timeout_val ? timeout_val : -EWOULDBLOCK;
636 break;
637 }
638 release_sock(sk);
639 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
640 (!tport->congested || !tport->connected), timeout_val);
641 lock_sock(sk);
642 } while (1);
643
644 if (iocb)
645 release_sock(sk);
646 return res;
647 }
648
649 /**
650 * send_stream - send stream-oriented data
651 * @iocb: (unused)
652 * @sock: socket structure
653 * @m: data to send
654 * @total_len: total length of data to be sent
655 *
656 * Used for SOCK_STREAM data.
657 *
658 * Returns the number of bytes sent on success (or partial success),
659 * or errno if no data sent
660 */
661 static int send_stream(struct kiocb *iocb, struct socket *sock,
662 struct msghdr *m, size_t total_len)
663 {
664 struct sock *sk = sock->sk;
665 struct tipc_port *tport = tipc_sk_port(sk);
666 struct msghdr my_msg;
667 struct iovec my_iov;
668 struct iovec *curr_iov;
669 int curr_iovlen;
670 char __user *curr_start;
671 u32 hdr_size;
672 int curr_left;
673 int bytes_to_send;
674 int bytes_sent;
675 int res;
676
677 lock_sock(sk);
678
679 /* Handle special cases where there is no connection */
680 if (unlikely(sock->state != SS_CONNECTED)) {
681 if (sock->state == SS_UNCONNECTED) {
682 res = send_packet(NULL, sock, m, total_len);
683 goto exit;
684 } else if (sock->state == SS_DISCONNECTING) {
685 res = -EPIPE;
686 goto exit;
687 } else {
688 res = -ENOTCONN;
689 goto exit;
690 }
691 }
692
693 if (unlikely(m->msg_name)) {
694 res = -EISCONN;
695 goto exit;
696 }
697
698 if (total_len > (unsigned int)INT_MAX) {
699 res = -EMSGSIZE;
700 goto exit;
701 }
702
703 /*
704 * Send each iovec entry using one or more messages
705 *
706 * Note: This algorithm is good for the most likely case
707 * (i.e. one large iovec entry), but could be improved to pass sets
708 * of small iovec entries into send_packet().
709 */
710 curr_iov = m->msg_iov;
711 curr_iovlen = m->msg_iovlen;
712 my_msg.msg_iov = &my_iov;
713 my_msg.msg_iovlen = 1;
714 my_msg.msg_flags = m->msg_flags;
715 my_msg.msg_name = NULL;
716 bytes_sent = 0;
717
718 hdr_size = msg_hdr_sz(&tport->phdr);
719
720 while (curr_iovlen--) {
721 curr_start = curr_iov->iov_base;
722 curr_left = curr_iov->iov_len;
723
724 while (curr_left) {
725 bytes_to_send = tport->max_pkt - hdr_size;
726 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
727 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
728 if (curr_left < bytes_to_send)
729 bytes_to_send = curr_left;
730 my_iov.iov_base = curr_start;
731 my_iov.iov_len = bytes_to_send;
732 res = send_packet(NULL, sock, &my_msg, bytes_to_send);
733 if (res < 0) {
734 if (bytes_sent)
735 res = bytes_sent;
736 goto exit;
737 }
738 curr_left -= bytes_to_send;
739 curr_start += bytes_to_send;
740 bytes_sent += bytes_to_send;
741 }
742
743 curr_iov++;
744 }
745 res = bytes_sent;
746 exit:
747 release_sock(sk);
748 return res;
749 }
750
751 /**
752 * auto_connect - complete connection setup to a remote port
753 * @sock: socket structure
754 * @msg: peer's response message
755 *
756 * Returns 0 on success, errno otherwise
757 */
758 static int auto_connect(struct socket *sock, struct tipc_msg *msg)
759 {
760 struct tipc_sock *tsock = tipc_sk(sock->sk);
761 struct tipc_port *p_ptr;
762
763 tsock->peer_name.ref = msg_origport(msg);
764 tsock->peer_name.node = msg_orignode(msg);
765 p_ptr = tipc_port_deref(tsock->p->ref);
766 if (!p_ptr)
767 return -EINVAL;
768
769 __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
770
771 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
772 return -EINVAL;
773 msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
774 sock->state = SS_CONNECTED;
775 return 0;
776 }
777
778 /**
779 * set_orig_addr - capture sender's address for received message
780 * @m: descriptor for message info
781 * @msg: received message header
782 *
783 * Note: Address is not captured if not requested by receiver.
784 */
785 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
786 {
787 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
788
789 if (addr) {
790 addr->family = AF_TIPC;
791 addr->addrtype = TIPC_ADDR_ID;
792 memset(&addr->addr, 0, sizeof(addr->addr));
793 addr->addr.id.ref = msg_origport(msg);
794 addr->addr.id.node = msg_orignode(msg);
795 addr->addr.name.domain = 0; /* could leave uninitialized */
796 addr->scope = 0; /* could leave uninitialized */
797 m->msg_namelen = sizeof(struct sockaddr_tipc);
798 }
799 }
800
801 /**
802 * anc_data_recv - optionally capture ancillary data for received message
803 * @m: descriptor for message info
804 * @msg: received message header
805 * @tport: TIPC port associated with message
806 *
807 * Note: Ancillary data is not captured if not requested by receiver.
808 *
809 * Returns 0 if successful, otherwise errno
810 */
811 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
812 struct tipc_port *tport)
813 {
814 u32 anc_data[3];
815 u32 err;
816 u32 dest_type;
817 int has_name;
818 int res;
819
820 if (likely(m->msg_controllen == 0))
821 return 0;
822
823 /* Optionally capture errored message object(s) */
824 err = msg ? msg_errcode(msg) : 0;
825 if (unlikely(err)) {
826 anc_data[0] = err;
827 anc_data[1] = msg_data_sz(msg);
828 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
829 if (res)
830 return res;
831 if (anc_data[1]) {
832 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
833 msg_data(msg));
834 if (res)
835 return res;
836 }
837 }
838
839 /* Optionally capture message destination object */
840 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
841 switch (dest_type) {
842 case TIPC_NAMED_MSG:
843 has_name = 1;
844 anc_data[0] = msg_nametype(msg);
845 anc_data[1] = msg_namelower(msg);
846 anc_data[2] = msg_namelower(msg);
847 break;
848 case TIPC_MCAST_MSG:
849 has_name = 1;
850 anc_data[0] = msg_nametype(msg);
851 anc_data[1] = msg_namelower(msg);
852 anc_data[2] = msg_nameupper(msg);
853 break;
854 case TIPC_CONN_MSG:
855 has_name = (tport->conn_type != 0);
856 anc_data[0] = tport->conn_type;
857 anc_data[1] = tport->conn_instance;
858 anc_data[2] = tport->conn_instance;
859 break;
860 default:
861 has_name = 0;
862 }
863 if (has_name) {
864 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
865 if (res)
866 return res;
867 }
868
869 return 0;
870 }
871
872 /**
873 * recv_msg - receive packet-oriented message
874 * @iocb: (unused)
875 * @m: descriptor for message info
876 * @buf_len: total size of user buffer area
877 * @flags: receive flags
878 *
879 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
880 * If the complete message doesn't fit in user area, truncate it.
881 *
882 * Returns size of returned message data, errno otherwise
883 */
884 static int recv_msg(struct kiocb *iocb, struct socket *sock,
885 struct msghdr *m, size_t buf_len, int flags)
886 {
887 struct sock *sk = sock->sk;
888 struct tipc_port *tport = tipc_sk_port(sk);
889 struct sk_buff *buf;
890 struct tipc_msg *msg;
891 long timeout;
892 unsigned int sz;
893 u32 err;
894 int res;
895
896 /* Catch invalid receive requests */
897 if (unlikely(!buf_len))
898 return -EINVAL;
899
900 lock_sock(sk);
901
902 if (unlikely(sock->state == SS_UNCONNECTED)) {
903 res = -ENOTCONN;
904 goto exit;
905 }
906
907 /* will be updated in set_orig_addr() if needed */
908 m->msg_namelen = 0;
909
910 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
911 restart:
912
913 /* Look for a message in receive queue; wait if necessary */
914 while (skb_queue_empty(&sk->sk_receive_queue)) {
915 if (sock->state == SS_DISCONNECTING) {
916 res = -ENOTCONN;
917 goto exit;
918 }
919 if (timeout <= 0L) {
920 res = timeout ? timeout : -EWOULDBLOCK;
921 goto exit;
922 }
923 release_sock(sk);
924 timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
925 tipc_rx_ready(sock),
926 timeout);
927 lock_sock(sk);
928 }
929
930 /* Look at first message in receive queue */
931 buf = skb_peek(&sk->sk_receive_queue);
932 msg = buf_msg(buf);
933 sz = msg_data_sz(msg);
934 err = msg_errcode(msg);
935
936 /* Discard an empty non-errored message & try again */
937 if ((!sz) && (!err)) {
938 advance_rx_queue(sk);
939 goto restart;
940 }
941
942 /* Capture sender's address (optional) */
943 set_orig_addr(m, msg);
944
945 /* Capture ancillary data (optional) */
946 res = anc_data_recv(m, msg, tport);
947 if (res)
948 goto exit;
949
950 /* Capture message data (if valid) & compute return value (always) */
951 if (!err) {
952 if (unlikely(buf_len < sz)) {
953 sz = buf_len;
954 m->msg_flags |= MSG_TRUNC;
955 }
956 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
957 m->msg_iov, sz);
958 if (res)
959 goto exit;
960 res = sz;
961 } else {
962 if ((sock->state == SS_READY) ||
963 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
964 res = 0;
965 else
966 res = -ECONNRESET;
967 }
968
969 /* Consume received message (optional) */
970 if (likely(!(flags & MSG_PEEK))) {
971 if ((sock->state != SS_READY) &&
972 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
973 tipc_acknowledge(tport->ref, tport->conn_unacked);
974 advance_rx_queue(sk);
975 }
976 exit:
977 release_sock(sk);
978 return res;
979 }
980
981 /**
982 * recv_stream - receive stream-oriented data
983 * @iocb: (unused)
984 * @m: descriptor for message info
985 * @buf_len: total size of user buffer area
986 * @flags: receive flags
987 *
988 * Used for SOCK_STREAM messages only. If not enough data is available
989 * will optionally wait for more; never truncates data.
990 *
991 * Returns size of returned message data, errno otherwise
992 */
993 static int recv_stream(struct kiocb *iocb, struct socket *sock,
994 struct msghdr *m, size_t buf_len, int flags)
995 {
996 struct sock *sk = sock->sk;
997 struct tipc_port *tport = tipc_sk_port(sk);
998 struct sk_buff *buf;
999 struct tipc_msg *msg;
1000 long timeout;
1001 unsigned int sz;
1002 int sz_to_copy, target, needed;
1003 int sz_copied = 0;
1004 u32 err;
1005 int res = 0;
1006
1007 /* Catch invalid receive attempts */
1008 if (unlikely(!buf_len))
1009 return -EINVAL;
1010
1011 lock_sock(sk);
1012
1013 if (unlikely((sock->state == SS_UNCONNECTED) ||
1014 (sock->state == SS_CONNECTING))) {
1015 res = -ENOTCONN;
1016 goto exit;
1017 }
1018
1019 /* will be updated in set_orig_addr() if needed */
1020 m->msg_namelen = 0;
1021
1022 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1023 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1024
1025 restart:
1026 /* Look for a message in receive queue; wait if necessary */
1027 while (skb_queue_empty(&sk->sk_receive_queue)) {
1028 if (sock->state == SS_DISCONNECTING) {
1029 res = -ENOTCONN;
1030 goto exit;
1031 }
1032 if (timeout <= 0L) {
1033 res = timeout ? timeout : -EWOULDBLOCK;
1034 goto exit;
1035 }
1036 release_sock(sk);
1037 timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
1038 tipc_rx_ready(sock),
1039 timeout);
1040 lock_sock(sk);
1041 }
1042
1043 /* Look at first message in receive queue */
1044 buf = skb_peek(&sk->sk_receive_queue);
1045 msg = buf_msg(buf);
1046 sz = msg_data_sz(msg);
1047 err = msg_errcode(msg);
1048
1049 /* Discard an empty non-errored message & try again */
1050 if ((!sz) && (!err)) {
1051 advance_rx_queue(sk);
1052 goto restart;
1053 }
1054
1055 /* Optionally capture sender's address & ancillary data of first msg */
1056 if (sz_copied == 0) {
1057 set_orig_addr(m, msg);
1058 res = anc_data_recv(m, msg, tport);
1059 if (res)
1060 goto exit;
1061 }
1062
1063 /* Capture message data (if valid) & compute return value (always) */
1064 if (!err) {
1065 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1066
1067 sz -= offset;
1068 needed = (buf_len - sz_copied);
1069 sz_to_copy = (sz <= needed) ? sz : needed;
1070
1071 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
1072 m->msg_iov, sz_to_copy);
1073 if (res)
1074 goto exit;
1075
1076 sz_copied += sz_to_copy;
1077
1078 if (sz_to_copy < sz) {
1079 if (!(flags & MSG_PEEK))
1080 TIPC_SKB_CB(buf)->handle =
1081 (void *)(unsigned long)(offset + sz_to_copy);
1082 goto exit;
1083 }
1084 } else {
1085 if (sz_copied != 0)
1086 goto exit; /* can't add error msg to valid data */
1087
1088 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1089 res = 0;
1090 else
1091 res = -ECONNRESET;
1092 }
1093
1094 /* Consume received message (optional) */
1095 if (likely(!(flags & MSG_PEEK))) {
1096 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1097 tipc_acknowledge(tport->ref, tport->conn_unacked);
1098 advance_rx_queue(sk);
1099 }
1100
1101 /* Loop around if more data is required */
1102 if ((sz_copied < buf_len) && /* didn't get all requested data */
1103 (!skb_queue_empty(&sk->sk_receive_queue) ||
1104 (sz_copied < target)) && /* and more is ready or required */
1105 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1106 (!err)) /* and haven't reached a FIN */
1107 goto restart;
1108
1109 exit:
1110 release_sock(sk);
1111 return sz_copied ? sz_copied : res;
1112 }
1113
1114 /**
1115 * tipc_write_space - wake up thread if port congestion is released
1116 * @sk: socket
1117 */
1118 static void tipc_write_space(struct sock *sk)
1119 {
1120 struct socket_wq *wq;
1121
1122 rcu_read_lock();
1123 wq = rcu_dereference(sk->sk_wq);
1124 if (wq_has_sleeper(wq))
1125 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1126 POLLWRNORM | POLLWRBAND);
1127 rcu_read_unlock();
1128 }
1129
1130 /**
1131 * tipc_data_ready - wake up threads to indicate messages have been received
1132 * @sk: socket
1133 * @len: the length of messages
1134 */
1135 static void tipc_data_ready(struct sock *sk, int len)
1136 {
1137 struct socket_wq *wq;
1138
1139 rcu_read_lock();
1140 wq = rcu_dereference(sk->sk_wq);
1141 if (wq_has_sleeper(wq))
1142 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1143 POLLRDNORM | POLLRDBAND);
1144 rcu_read_unlock();
1145 }
1146
1147 /**
1148 * filter_connect - Handle all incoming messages for a connection-based socket
1149 * @tsock: TIPC socket
1150 * @msg: message
1151 *
1152 * Returns TIPC error status code and socket error status code
1153 * once it encounters some errors
1154 */
1155 static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1156 {
1157 struct socket *sock = tsock->sk.sk_socket;
1158 struct tipc_msg *msg = buf_msg(*buf);
1159 struct sock *sk = &tsock->sk;
1160 u32 retval = TIPC_ERR_NO_PORT;
1161 int res;
1162
1163 if (msg_mcast(msg))
1164 return retval;
1165
1166 switch ((int)sock->state) {
1167 case SS_CONNECTED:
1168 /* Accept only connection-based messages sent by peer */
1169 if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
1170 if (unlikely(msg_errcode(msg))) {
1171 sock->state = SS_DISCONNECTING;
1172 __tipc_disconnect(tsock->p);
1173 }
1174 retval = TIPC_OK;
1175 }
1176 break;
1177 case SS_CONNECTING:
1178 /* Accept only ACK or NACK message */
1179 if (unlikely(msg_errcode(msg))) {
1180 sock->state = SS_DISCONNECTING;
1181 sk->sk_err = -ECONNREFUSED;
1182 retval = TIPC_OK;
1183 break;
1184 }
1185
1186 if (unlikely(!msg_connected(msg)))
1187 break;
1188
1189 res = auto_connect(sock, msg);
1190 if (res) {
1191 sock->state = SS_DISCONNECTING;
1192 sk->sk_err = res;
1193 retval = TIPC_OK;
1194 break;
1195 }
1196
1197 /* If an incoming message is an 'ACK-', it should be
1198 * discarded here because it doesn't contain useful
1199 * data. In addition, we should try to wake up
1200 * connect() routine if sleeping.
1201 */
1202 if (msg_data_sz(msg) == 0) {
1203 kfree_skb(*buf);
1204 *buf = NULL;
1205 if (waitqueue_active(sk_sleep(sk)))
1206 wake_up_interruptible(sk_sleep(sk));
1207 }
1208 retval = TIPC_OK;
1209 break;
1210 case SS_LISTENING:
1211 case SS_UNCONNECTED:
1212 /* Accept only SYN message */
1213 if (!msg_connected(msg) && !(msg_errcode(msg)))
1214 retval = TIPC_OK;
1215 break;
1216 case SS_DISCONNECTING:
1217 break;
1218 default:
1219 pr_err("Unknown socket state %u\n", sock->state);
1220 }
1221 return retval;
1222 }
1223
1224 /**
1225 * rcvbuf_limit - get proper overload limit of socket receive queue
1226 * @sk: socket
1227 * @buf: message
1228 *
1229 * For all connection oriented messages, irrespective of importance,
1230 * the default overload value (i.e. 67MB) is set as limit.
1231 *
1232 * For all connectionless messages, by default new queue limits are
1233 * as belows:
1234 *
1235 * TIPC_LOW_IMPORTANCE (4 MB)
1236 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1237 * TIPC_HIGH_IMPORTANCE (16 MB)
1238 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1239 *
1240 * Returns overload limit according to corresponding message importance
1241 */
1242 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1243 {
1244 struct tipc_msg *msg = buf_msg(buf);
1245 unsigned int limit;
1246
1247 if (msg_connected(msg))
1248 limit = sysctl_tipc_rmem[2];
1249 else
1250 limit = sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1251 msg_importance(msg);
1252 return limit;
1253 }
1254
1255 /**
1256 * filter_rcv - validate incoming message
1257 * @sk: socket
1258 * @buf: message
1259 *
1260 * Enqueues message on receive queue if acceptable; optionally handles
1261 * disconnect indication for a connected socket.
1262 *
1263 * Called with socket lock already taken; port lock may also be taken.
1264 *
1265 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1266 */
1267 static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1268 {
1269 struct socket *sock = sk->sk_socket;
1270 struct tipc_msg *msg = buf_msg(buf);
1271 unsigned int limit = rcvbuf_limit(sk, buf);
1272 u32 res = TIPC_OK;
1273
1274 /* Reject message if it is wrong sort of message for socket */
1275 if (msg_type(msg) > TIPC_DIRECT_MSG)
1276 return TIPC_ERR_NO_PORT;
1277
1278 if (sock->state == SS_READY) {
1279 if (msg_connected(msg))
1280 return TIPC_ERR_NO_PORT;
1281 } else {
1282 res = filter_connect(tipc_sk(sk), &buf);
1283 if (res != TIPC_OK || buf == NULL)
1284 return res;
1285 }
1286
1287 /* Reject message if there isn't room to queue it */
1288 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1289 return TIPC_ERR_OVERLOAD;
1290
1291 /* Enqueue message */
1292 TIPC_SKB_CB(buf)->handle = 0;
1293 __skb_queue_tail(&sk->sk_receive_queue, buf);
1294 skb_set_owner_r(buf, sk);
1295
1296 sk->sk_data_ready(sk, 0);
1297 return TIPC_OK;
1298 }
1299
1300 /**
1301 * backlog_rcv - handle incoming message from backlog queue
1302 * @sk: socket
1303 * @buf: message
1304 *
1305 * Caller must hold socket lock, but not port lock.
1306 *
1307 * Returns 0
1308 */
1309 static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1310 {
1311 u32 res;
1312
1313 res = filter_rcv(sk, buf);
1314 if (res)
1315 tipc_reject_msg(buf, res);
1316 return 0;
1317 }
1318
1319 /**
1320 * dispatch - handle incoming message
1321 * @tport: TIPC port that received message
1322 * @buf: message
1323 *
1324 * Called with port lock already taken.
1325 *
1326 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1327 */
1328 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1329 {
1330 struct sock *sk = (struct sock *)tport->usr_handle;
1331 u32 res;
1332
1333 /*
1334 * Process message if socket is unlocked; otherwise add to backlog queue
1335 *
1336 * This code is based on sk_receive_skb(), but must be distinct from it
1337 * since a TIPC-specific filter/reject mechanism is utilized
1338 */
1339 bh_lock_sock(sk);
1340 if (!sock_owned_by_user(sk)) {
1341 res = filter_rcv(sk, buf);
1342 } else {
1343 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
1344 res = TIPC_ERR_OVERLOAD;
1345 else
1346 res = TIPC_OK;
1347 }
1348 bh_unlock_sock(sk);
1349
1350 return res;
1351 }
1352
1353 /**
1354 * wakeupdispatch - wake up port after congestion
1355 * @tport: port to wakeup
1356 *
1357 * Called with port lock already taken.
1358 */
1359 static void wakeupdispatch(struct tipc_port *tport)
1360 {
1361 struct sock *sk = (struct sock *)tport->usr_handle;
1362
1363 sk->sk_write_space(sk);
1364 }
1365
1366 /**
1367 * connect - establish a connection to another TIPC port
1368 * @sock: socket structure
1369 * @dest: socket address for destination port
1370 * @destlen: size of socket address data structure
1371 * @flags: file-related flags associated with socket
1372 *
1373 * Returns 0 on success, errno otherwise
1374 */
1375 static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1376 int flags)
1377 {
1378 struct sock *sk = sock->sk;
1379 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1380 struct msghdr m = {NULL,};
1381 unsigned int timeout;
1382 int res;
1383
1384 lock_sock(sk);
1385
1386 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1387 if (sock->state == SS_READY) {
1388 res = -EOPNOTSUPP;
1389 goto exit;
1390 }
1391
1392 /*
1393 * Reject connection attempt using multicast address
1394 *
1395 * Note: send_msg() validates the rest of the address fields,
1396 * so there's no need to do it here
1397 */
1398 if (dst->addrtype == TIPC_ADDR_MCAST) {
1399 res = -EINVAL;
1400 goto exit;
1401 }
1402
1403 timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1404
1405 switch (sock->state) {
1406 case SS_UNCONNECTED:
1407 /* Send a 'SYN-' to destination */
1408 m.msg_name = dest;
1409 m.msg_namelen = destlen;
1410
1411 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1412 * indicate send_msg() is never blocked.
1413 */
1414 if (!timeout)
1415 m.msg_flags = MSG_DONTWAIT;
1416
1417 res = send_msg(NULL, sock, &m, 0);
1418 if ((res < 0) && (res != -EWOULDBLOCK))
1419 goto exit;
1420
1421 /* Just entered SS_CONNECTING state; the only
1422 * difference is that return value in non-blocking
1423 * case is EINPROGRESS, rather than EALREADY.
1424 */
1425 res = -EINPROGRESS;
1426 break;
1427 case SS_CONNECTING:
1428 res = -EALREADY;
1429 break;
1430 case SS_CONNECTED:
1431 res = -EISCONN;
1432 break;
1433 default:
1434 res = -EINVAL;
1435 goto exit;
1436 }
1437
1438 if (sock->state == SS_CONNECTING) {
1439 if (!timeout)
1440 goto exit;
1441
1442 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1443 release_sock(sk);
1444 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1445 sock->state != SS_CONNECTING,
1446 timeout ? (long)msecs_to_jiffies(timeout)
1447 : MAX_SCHEDULE_TIMEOUT);
1448 lock_sock(sk);
1449 if (res <= 0) {
1450 if (res == 0)
1451 res = -ETIMEDOUT;
1452 else
1453 ; /* leave "res" unchanged */
1454 goto exit;
1455 }
1456 }
1457
1458 if (unlikely(sock->state == SS_DISCONNECTING))
1459 res = sock_error(sk);
1460 else
1461 res = 0;
1462
1463 exit:
1464 release_sock(sk);
1465 return res;
1466 }
1467
1468 /**
1469 * listen - allow socket to listen for incoming connections
1470 * @sock: socket structure
1471 * @len: (unused)
1472 *
1473 * Returns 0 on success, errno otherwise
1474 */
1475 static int listen(struct socket *sock, int len)
1476 {
1477 struct sock *sk = sock->sk;
1478 int res;
1479
1480 lock_sock(sk);
1481
1482 if (sock->state != SS_UNCONNECTED)
1483 res = -EINVAL;
1484 else {
1485 sock->state = SS_LISTENING;
1486 res = 0;
1487 }
1488
1489 release_sock(sk);
1490 return res;
1491 }
1492
1493 /**
1494 * accept - wait for connection request
1495 * @sock: listening socket
1496 * @newsock: new socket that is to be connected
1497 * @flags: file-related flags associated with socket
1498 *
1499 * Returns 0 on success, errno otherwise
1500 */
1501 static int accept(struct socket *sock, struct socket *new_sock, int flags)
1502 {
1503 struct sock *new_sk, *sk = sock->sk;
1504 struct sk_buff *buf;
1505 struct tipc_sock *new_tsock;
1506 struct tipc_port *new_tport;
1507 struct tipc_msg *msg;
1508 u32 new_ref;
1509
1510 int res;
1511
1512 lock_sock(sk);
1513
1514 if (sock->state != SS_LISTENING) {
1515 res = -EINVAL;
1516 goto exit;
1517 }
1518
1519 while (skb_queue_empty(&sk->sk_receive_queue)) {
1520 if (flags & O_NONBLOCK) {
1521 res = -EWOULDBLOCK;
1522 goto exit;
1523 }
1524 release_sock(sk);
1525 res = wait_event_interruptible(*sk_sleep(sk),
1526 (!skb_queue_empty(&sk->sk_receive_queue)));
1527 lock_sock(sk);
1528 if (res)
1529 goto exit;
1530 }
1531
1532 buf = skb_peek(&sk->sk_receive_queue);
1533
1534 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
1535 if (res)
1536 goto exit;
1537
1538 new_sk = new_sock->sk;
1539 new_tsock = tipc_sk(new_sk);
1540 new_tport = new_tsock->p;
1541 new_ref = new_tport->ref;
1542 msg = buf_msg(buf);
1543
1544 /* we lock on new_sk; but lockdep sees the lock on sk */
1545 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1546
1547 /*
1548 * Reject any stray messages received by new socket
1549 * before the socket lock was taken (very, very unlikely)
1550 */
1551 reject_rx_queue(new_sk);
1552
1553 /* Connect new socket to it's peer */
1554 new_tsock->peer_name.ref = msg_origport(msg);
1555 new_tsock->peer_name.node = msg_orignode(msg);
1556 tipc_connect(new_ref, &new_tsock->peer_name);
1557 new_sock->state = SS_CONNECTED;
1558
1559 tipc_set_portimportance(new_ref, msg_importance(msg));
1560 if (msg_named(msg)) {
1561 new_tport->conn_type = msg_nametype(msg);
1562 new_tport->conn_instance = msg_nameinst(msg);
1563 }
1564
1565 /*
1566 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1567 * Respond to 'SYN+' by queuing it on new socket.
1568 */
1569 if (!msg_data_sz(msg)) {
1570 struct msghdr m = {NULL,};
1571
1572 advance_rx_queue(sk);
1573 send_packet(NULL, new_sock, &m, 0);
1574 } else {
1575 __skb_dequeue(&sk->sk_receive_queue);
1576 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1577 skb_set_owner_r(buf, new_sk);
1578 }
1579 release_sock(new_sk);
1580
1581 exit:
1582 release_sock(sk);
1583 return res;
1584 }
1585
1586 /**
1587 * shutdown - shutdown socket connection
1588 * @sock: socket structure
1589 * @how: direction to close (must be SHUT_RDWR)
1590 *
1591 * Terminates connection (if necessary), then purges socket's receive queue.
1592 *
1593 * Returns 0 on success, errno otherwise
1594 */
1595 static int shutdown(struct socket *sock, int how)
1596 {
1597 struct sock *sk = sock->sk;
1598 struct tipc_port *tport = tipc_sk_port(sk);
1599 struct sk_buff *buf;
1600 int res;
1601
1602 if (how != SHUT_RDWR)
1603 return -EINVAL;
1604
1605 lock_sock(sk);
1606
1607 switch (sock->state) {
1608 case SS_CONNECTING:
1609 case SS_CONNECTED:
1610
1611 restart:
1612 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1613 buf = __skb_dequeue(&sk->sk_receive_queue);
1614 if (buf) {
1615 if (TIPC_SKB_CB(buf)->handle != 0) {
1616 kfree_skb(buf);
1617 goto restart;
1618 }
1619 tipc_disconnect(tport->ref);
1620 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1621 } else {
1622 tipc_shutdown(tport->ref);
1623 }
1624
1625 sock->state = SS_DISCONNECTING;
1626
1627 /* fall through */
1628
1629 case SS_DISCONNECTING:
1630
1631 /* Discard any unreceived messages */
1632 __skb_queue_purge(&sk->sk_receive_queue);
1633
1634 /* Wake up anyone sleeping in poll */
1635 sk->sk_state_change(sk);
1636 res = 0;
1637 break;
1638
1639 default:
1640 res = -ENOTCONN;
1641 }
1642
1643 release_sock(sk);
1644 return res;
1645 }
1646
1647 /**
1648 * setsockopt - set socket option
1649 * @sock: socket structure
1650 * @lvl: option level
1651 * @opt: option identifier
1652 * @ov: pointer to new option value
1653 * @ol: length of option value
1654 *
1655 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1656 * (to ease compatibility).
1657 *
1658 * Returns 0 on success, errno otherwise
1659 */
1660 static int setsockopt(struct socket *sock,
1661 int lvl, int opt, char __user *ov, unsigned int ol)
1662 {
1663 struct sock *sk = sock->sk;
1664 struct tipc_port *tport = tipc_sk_port(sk);
1665 u32 value;
1666 int res;
1667
1668 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1669 return 0;
1670 if (lvl != SOL_TIPC)
1671 return -ENOPROTOOPT;
1672 if (ol < sizeof(value))
1673 return -EINVAL;
1674 res = get_user(value, (u32 __user *)ov);
1675 if (res)
1676 return res;
1677
1678 lock_sock(sk);
1679
1680 switch (opt) {
1681 case TIPC_IMPORTANCE:
1682 res = tipc_set_portimportance(tport->ref, value);
1683 break;
1684 case TIPC_SRC_DROPPABLE:
1685 if (sock->type != SOCK_STREAM)
1686 res = tipc_set_portunreliable(tport->ref, value);
1687 else
1688 res = -ENOPROTOOPT;
1689 break;
1690 case TIPC_DEST_DROPPABLE:
1691 res = tipc_set_portunreturnable(tport->ref, value);
1692 break;
1693 case TIPC_CONN_TIMEOUT:
1694 tipc_sk(sk)->conn_timeout = value;
1695 /* no need to set "res", since already 0 at this point */
1696 break;
1697 default:
1698 res = -EINVAL;
1699 }
1700
1701 release_sock(sk);
1702
1703 return res;
1704 }
1705
1706 /**
1707 * getsockopt - get socket option
1708 * @sock: socket structure
1709 * @lvl: option level
1710 * @opt: option identifier
1711 * @ov: receptacle for option value
1712 * @ol: receptacle for length of option value
1713 *
1714 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1715 * (to ease compatibility).
1716 *
1717 * Returns 0 on success, errno otherwise
1718 */
1719 static int getsockopt(struct socket *sock,
1720 int lvl, int opt, char __user *ov, int __user *ol)
1721 {
1722 struct sock *sk = sock->sk;
1723 struct tipc_port *tport = tipc_sk_port(sk);
1724 int len;
1725 u32 value;
1726 int res;
1727
1728 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1729 return put_user(0, ol);
1730 if (lvl != SOL_TIPC)
1731 return -ENOPROTOOPT;
1732 res = get_user(len, ol);
1733 if (res)
1734 return res;
1735
1736 lock_sock(sk);
1737
1738 switch (opt) {
1739 case TIPC_IMPORTANCE:
1740 res = tipc_portimportance(tport->ref, &value);
1741 break;
1742 case TIPC_SRC_DROPPABLE:
1743 res = tipc_portunreliable(tport->ref, &value);
1744 break;
1745 case TIPC_DEST_DROPPABLE:
1746 res = tipc_portunreturnable(tport->ref, &value);
1747 break;
1748 case TIPC_CONN_TIMEOUT:
1749 value = tipc_sk(sk)->conn_timeout;
1750 /* no need to set "res", since already 0 at this point */
1751 break;
1752 case TIPC_NODE_RECVQ_DEPTH:
1753 value = 0; /* was tipc_queue_size, now obsolete */
1754 break;
1755 case TIPC_SOCK_RECVQ_DEPTH:
1756 value = skb_queue_len(&sk->sk_receive_queue);
1757 break;
1758 default:
1759 res = -EINVAL;
1760 }
1761
1762 release_sock(sk);
1763
1764 if (res)
1765 return res; /* "get" failed */
1766
1767 if (len < sizeof(value))
1768 return -EINVAL;
1769
1770 if (copy_to_user(ov, &value, sizeof(value)))
1771 return -EFAULT;
1772
1773 return put_user(sizeof(value), ol);
1774 }
1775
1776 /* Protocol switches for the various types of TIPC sockets */
1777
1778 static const struct proto_ops msg_ops = {
1779 .owner = THIS_MODULE,
1780 .family = AF_TIPC,
1781 .release = release,
1782 .bind = bind,
1783 .connect = connect,
1784 .socketpair = sock_no_socketpair,
1785 .accept = sock_no_accept,
1786 .getname = get_name,
1787 .poll = poll,
1788 .ioctl = sock_no_ioctl,
1789 .listen = sock_no_listen,
1790 .shutdown = shutdown,
1791 .setsockopt = setsockopt,
1792 .getsockopt = getsockopt,
1793 .sendmsg = send_msg,
1794 .recvmsg = recv_msg,
1795 .mmap = sock_no_mmap,
1796 .sendpage = sock_no_sendpage
1797 };
1798
1799 static const struct proto_ops packet_ops = {
1800 .owner = THIS_MODULE,
1801 .family = AF_TIPC,
1802 .release = release,
1803 .bind = bind,
1804 .connect = connect,
1805 .socketpair = sock_no_socketpair,
1806 .accept = accept,
1807 .getname = get_name,
1808 .poll = poll,
1809 .ioctl = sock_no_ioctl,
1810 .listen = listen,
1811 .shutdown = shutdown,
1812 .setsockopt = setsockopt,
1813 .getsockopt = getsockopt,
1814 .sendmsg = send_packet,
1815 .recvmsg = recv_msg,
1816 .mmap = sock_no_mmap,
1817 .sendpage = sock_no_sendpage
1818 };
1819
1820 static const struct proto_ops stream_ops = {
1821 .owner = THIS_MODULE,
1822 .family = AF_TIPC,
1823 .release = release,
1824 .bind = bind,
1825 .connect = connect,
1826 .socketpair = sock_no_socketpair,
1827 .accept = accept,
1828 .getname = get_name,
1829 .poll = poll,
1830 .ioctl = sock_no_ioctl,
1831 .listen = listen,
1832 .shutdown = shutdown,
1833 .setsockopt = setsockopt,
1834 .getsockopt = getsockopt,
1835 .sendmsg = send_stream,
1836 .recvmsg = recv_stream,
1837 .mmap = sock_no_mmap,
1838 .sendpage = sock_no_sendpage
1839 };
1840
1841 static const struct net_proto_family tipc_family_ops = {
1842 .owner = THIS_MODULE,
1843 .family = AF_TIPC,
1844 .create = tipc_create
1845 };
1846
1847 static struct proto tipc_proto = {
1848 .name = "TIPC",
1849 .owner = THIS_MODULE,
1850 .obj_size = sizeof(struct tipc_sock),
1851 .sysctl_rmem = sysctl_tipc_rmem
1852 };
1853
1854 /**
1855 * tipc_socket_init - initialize TIPC socket interface
1856 *
1857 * Returns 0 on success, errno otherwise
1858 */
1859 int tipc_socket_init(void)
1860 {
1861 int res;
1862
1863 res = proto_register(&tipc_proto, 1);
1864 if (res) {
1865 pr_err("Failed to register TIPC protocol type\n");
1866 goto out;
1867 }
1868
1869 res = sock_register(&tipc_family_ops);
1870 if (res) {
1871 pr_err("Failed to register TIPC socket type\n");
1872 proto_unregister(&tipc_proto);
1873 goto out;
1874 }
1875
1876 sockets_enabled = 1;
1877 out:
1878 return res;
1879 }
1880
1881 /**
1882 * tipc_socket_stop - stop TIPC socket interface
1883 */
1884 void tipc_socket_stop(void)
1885 {
1886 if (!sockets_enabled)
1887 return;
1888
1889 sockets_enabled = 0;
1890 sock_unregister(tipc_family_ops.family);
1891 proto_unregister(&tipc_proto);
1892 }