]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/tipc/socket.c
tipc: fix sparse non static symbol warnings
[mirror_ubuntu-eoan-kernel.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "port.h"
39 #include "name_table.h"
40 #include "node.h"
41 #include "link.h"
42 #include <linux/export.h>
43 #include "link.h"
44
45 #define SS_LISTENING -1 /* socket is listening */
46 #define SS_READY -2 /* socket is connectionless */
47
48 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
49 #define TIPC_FWD_MSG 1
50
51 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
52 static void tipc_data_ready(struct sock *sk);
53 static void tipc_write_space(struct sock *sk);
54 static int tipc_release(struct socket *sock);
55 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
56 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
57
58 static const struct proto_ops packet_ops;
59 static const struct proto_ops stream_ops;
60 static const struct proto_ops msg_ops;
61
62 static struct proto tipc_proto;
63 static struct proto tipc_proto_kern;
64
65 /*
66 * Revised TIPC socket locking policy:
67 *
68 * Most socket operations take the standard socket lock when they start
69 * and hold it until they finish (or until they need to sleep). Acquiring
70 * this lock grants the owner exclusive access to the fields of the socket
71 * data structures, with the exception of the backlog queue. A few socket
72 * operations can be done without taking the socket lock because they only
73 * read socket information that never changes during the life of the socket.
74 *
75 * Socket operations may acquire the lock for the associated TIPC port if they
76 * need to perform an operation on the port. If any routine needs to acquire
77 * both the socket lock and the port lock it must take the socket lock first
78 * to avoid the risk of deadlock.
79 *
80 * The dispatcher handling incoming messages cannot grab the socket lock in
81 * the standard fashion, since invoked it runs at the BH level and cannot block.
82 * Instead, it checks to see if the socket lock is currently owned by someone,
83 * and either handles the message itself or adds it to the socket's backlog
84 * queue; in the latter case the queued message is processed once the process
85 * owning the socket lock releases it.
86 *
87 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
88 * the problem of a blocked socket operation preventing any other operations
89 * from occurring. However, applications must be careful if they have
90 * multiple threads trying to send (or receive) on the same socket, as these
91 * operations might interfere with each other. For example, doing a connect
92 * and a receive at the same time might allow the receive to consume the
93 * ACK message meant for the connect. While additional work could be done
94 * to try and overcome this, it doesn't seem to be worthwhile at the present.
95 *
96 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
97 * that another operation that must be performed in a non-blocking manner is
98 * not delayed for very long because the lock has already been taken.
99 *
100 * NOTE: This code assumes that certain fields of a port/socket pair are
101 * constant over its lifetime; such fields can be examined without taking
102 * the socket lock and/or port lock, and do not need to be re-read even
103 * after resuming processing after waiting. These fields include:
104 * - socket type
105 * - pointer to socket sk structure (aka tipc_sock structure)
106 * - pointer to port structure
107 * - port reference
108 */
109
110 #include "socket.h"
111
112 /**
113 * advance_rx_queue - discard first buffer in socket receive queue
114 *
115 * Caller must hold socket lock
116 */
117 static void advance_rx_queue(struct sock *sk)
118 {
119 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
120 }
121
122 /**
123 * reject_rx_queue - reject all buffers in socket receive queue
124 *
125 * Caller must hold socket lock
126 */
127 static void reject_rx_queue(struct sock *sk)
128 {
129 struct sk_buff *buf;
130 u32 dnode;
131
132 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
133 if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
134 tipc_link_xmit(buf, dnode, 0);
135 }
136 }
137
138 /**
139 * tipc_sk_create - create a TIPC socket
140 * @net: network namespace (must be default network)
141 * @sock: pre-allocated socket structure
142 * @protocol: protocol indicator (must be 0)
143 * @kern: caused by kernel or by userspace?
144 *
145 * This routine creates additional data structures used by the TIPC socket,
146 * initializes them, and links them together.
147 *
148 * Returns 0 on success, errno otherwise
149 */
150 static int tipc_sk_create(struct net *net, struct socket *sock,
151 int protocol, int kern)
152 {
153 const struct proto_ops *ops;
154 socket_state state;
155 struct sock *sk;
156 struct tipc_sock *tsk;
157 struct tipc_port *port;
158 u32 ref;
159
160 /* Validate arguments */
161 if (unlikely(protocol != 0))
162 return -EPROTONOSUPPORT;
163
164 switch (sock->type) {
165 case SOCK_STREAM:
166 ops = &stream_ops;
167 state = SS_UNCONNECTED;
168 break;
169 case SOCK_SEQPACKET:
170 ops = &packet_ops;
171 state = SS_UNCONNECTED;
172 break;
173 case SOCK_DGRAM:
174 case SOCK_RDM:
175 ops = &msg_ops;
176 state = SS_READY;
177 break;
178 default:
179 return -EPROTOTYPE;
180 }
181
182 /* Allocate socket's protocol area */
183 if (!kern)
184 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
185 else
186 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
187
188 if (sk == NULL)
189 return -ENOMEM;
190
191 tsk = tipc_sk(sk);
192 port = &tsk->port;
193
194 ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE);
195 if (!ref) {
196 pr_warn("Socket registration failed, ref. table exhausted\n");
197 sk_free(sk);
198 return -ENOMEM;
199 }
200
201 /* Finish initializing socket data structures */
202 sock->ops = ops;
203 sock->state = state;
204
205 sock_init_data(sock, sk);
206 sk->sk_backlog_rcv = tipc_backlog_rcv;
207 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
208 sk->sk_data_ready = tipc_data_ready;
209 sk->sk_write_space = tipc_write_space;
210 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
211 tsk->sent_unacked = 0;
212 atomic_set(&tsk->dupl_rcvcnt, 0);
213 tipc_port_unlock(port);
214
215 if (sock->state == SS_READY) {
216 tipc_port_set_unreturnable(port, true);
217 if (sock->type == SOCK_DGRAM)
218 tipc_port_set_unreliable(port, true);
219 }
220 return 0;
221 }
222
223 /**
224 * tipc_sock_create_local - create TIPC socket from inside TIPC module
225 * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
226 *
227 * We cannot use sock_creat_kern here because it bumps module user count.
228 * Since socket owner and creator is the same module we must make sure
229 * that module count remains zero for module local sockets, otherwise
230 * we cannot do rmmod.
231 *
232 * Returns 0 on success, errno otherwise
233 */
234 int tipc_sock_create_local(int type, struct socket **res)
235 {
236 int rc;
237
238 rc = sock_create_lite(AF_TIPC, type, 0, res);
239 if (rc < 0) {
240 pr_err("Failed to create kernel socket\n");
241 return rc;
242 }
243 tipc_sk_create(&init_net, *res, 0, 1);
244
245 return 0;
246 }
247
248 /**
249 * tipc_sock_release_local - release socket created by tipc_sock_create_local
250 * @sock: the socket to be released.
251 *
252 * Module reference count is not incremented when such sockets are created,
253 * so we must keep it from being decremented when they are released.
254 */
255 void tipc_sock_release_local(struct socket *sock)
256 {
257 tipc_release(sock);
258 sock->ops = NULL;
259 sock_release(sock);
260 }
261
262 /**
263 * tipc_sock_accept_local - accept a connection on a socket created
264 * with tipc_sock_create_local. Use this function to avoid that
265 * module reference count is inadvertently incremented.
266 *
267 * @sock: the accepting socket
268 * @newsock: reference to the new socket to be created
269 * @flags: socket flags
270 */
271
272 int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
273 int flags)
274 {
275 struct sock *sk = sock->sk;
276 int ret;
277
278 ret = sock_create_lite(sk->sk_family, sk->sk_type,
279 sk->sk_protocol, newsock);
280 if (ret < 0)
281 return ret;
282
283 ret = tipc_accept(sock, *newsock, flags);
284 if (ret < 0) {
285 sock_release(*newsock);
286 return ret;
287 }
288 (*newsock)->ops = sock->ops;
289 return ret;
290 }
291
292 /**
293 * tipc_release - destroy a TIPC socket
294 * @sock: socket to destroy
295 *
296 * This routine cleans up any messages that are still queued on the socket.
297 * For DGRAM and RDM socket types, all queued messages are rejected.
298 * For SEQPACKET and STREAM socket types, the first message is rejected
299 * and any others are discarded. (If the first message on a STREAM socket
300 * is partially-read, it is discarded and the next one is rejected instead.)
301 *
302 * NOTE: Rejected messages are not necessarily returned to the sender! They
303 * are returned or discarded according to the "destination droppable" setting
304 * specified for the message by the sender.
305 *
306 * Returns 0 on success, errno otherwise
307 */
308 static int tipc_release(struct socket *sock)
309 {
310 struct sock *sk = sock->sk;
311 struct tipc_sock *tsk;
312 struct tipc_port *port;
313 struct sk_buff *buf;
314 u32 dnode;
315
316 /*
317 * Exit if socket isn't fully initialized (occurs when a failed accept()
318 * releases a pre-allocated child socket that was never used)
319 */
320 if (sk == NULL)
321 return 0;
322
323 tsk = tipc_sk(sk);
324 port = &tsk->port;
325 lock_sock(sk);
326
327 /*
328 * Reject all unreceived messages, except on an active connection
329 * (which disconnects locally & sends a 'FIN+' to peer)
330 */
331 while (sock->state != SS_DISCONNECTING) {
332 buf = __skb_dequeue(&sk->sk_receive_queue);
333 if (buf == NULL)
334 break;
335 if (TIPC_SKB_CB(buf)->handle != NULL)
336 kfree_skb(buf);
337 else {
338 if ((sock->state == SS_CONNECTING) ||
339 (sock->state == SS_CONNECTED)) {
340 sock->state = SS_DISCONNECTING;
341 tipc_port_disconnect(port->ref);
342 }
343 if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
344 tipc_link_xmit(buf, dnode, 0);
345 }
346 }
347
348 /* Destroy TIPC port; also disconnects an active connection and
349 * sends a 'FIN-' to peer.
350 */
351 tipc_port_destroy(port);
352
353 /* Discard any remaining (connection-based) messages in receive queue */
354 __skb_queue_purge(&sk->sk_receive_queue);
355
356 /* Reject any messages that accumulated in backlog queue */
357 sock->state = SS_DISCONNECTING;
358 release_sock(sk);
359
360 sock_put(sk);
361 sock->sk = NULL;
362
363 return 0;
364 }
365
366 /**
367 * tipc_bind - associate or disassocate TIPC name(s) with a socket
368 * @sock: socket structure
369 * @uaddr: socket address describing name(s) and desired operation
370 * @uaddr_len: size of socket address data structure
371 *
372 * Name and name sequence binding is indicated using a positive scope value;
373 * a negative scope value unbinds the specified name. Specifying no name
374 * (i.e. a socket address length of 0) unbinds all names from the socket.
375 *
376 * Returns 0 on success, errno otherwise
377 *
378 * NOTE: This routine doesn't need to take the socket lock since it doesn't
379 * access any non-constant socket information.
380 */
381 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
382 int uaddr_len)
383 {
384 struct sock *sk = sock->sk;
385 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
386 struct tipc_sock *tsk = tipc_sk(sk);
387 int res = -EINVAL;
388
389 lock_sock(sk);
390 if (unlikely(!uaddr_len)) {
391 res = tipc_withdraw(&tsk->port, 0, NULL);
392 goto exit;
393 }
394
395 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
396 res = -EINVAL;
397 goto exit;
398 }
399 if (addr->family != AF_TIPC) {
400 res = -EAFNOSUPPORT;
401 goto exit;
402 }
403
404 if (addr->addrtype == TIPC_ADDR_NAME)
405 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
406 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
407 res = -EAFNOSUPPORT;
408 goto exit;
409 }
410
411 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
412 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
413 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
414 res = -EACCES;
415 goto exit;
416 }
417
418 res = (addr->scope > 0) ?
419 tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) :
420 tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq);
421 exit:
422 release_sock(sk);
423 return res;
424 }
425
426 /**
427 * tipc_getname - get port ID of socket or peer socket
428 * @sock: socket structure
429 * @uaddr: area for returned socket address
430 * @uaddr_len: area for returned length of socket address
431 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
432 *
433 * Returns 0 on success, errno otherwise
434 *
435 * NOTE: This routine doesn't need to take the socket lock since it only
436 * accesses socket information that is unchanging (or which changes in
437 * a completely predictable manner).
438 */
439 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
440 int *uaddr_len, int peer)
441 {
442 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
443 struct tipc_sock *tsk = tipc_sk(sock->sk);
444
445 memset(addr, 0, sizeof(*addr));
446 if (peer) {
447 if ((sock->state != SS_CONNECTED) &&
448 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
449 return -ENOTCONN;
450 addr->addr.id.ref = tipc_port_peerport(&tsk->port);
451 addr->addr.id.node = tipc_port_peernode(&tsk->port);
452 } else {
453 addr->addr.id.ref = tsk->port.ref;
454 addr->addr.id.node = tipc_own_addr;
455 }
456
457 *uaddr_len = sizeof(*addr);
458 addr->addrtype = TIPC_ADDR_ID;
459 addr->family = AF_TIPC;
460 addr->scope = 0;
461 addr->addr.name.domain = 0;
462
463 return 0;
464 }
465
466 /**
467 * tipc_poll - read and possibly block on pollmask
468 * @file: file structure associated with the socket
469 * @sock: socket for which to calculate the poll bits
470 * @wait: ???
471 *
472 * Returns pollmask value
473 *
474 * COMMENTARY:
475 * It appears that the usual socket locking mechanisms are not useful here
476 * since the pollmask info is potentially out-of-date the moment this routine
477 * exits. TCP and other protocols seem to rely on higher level poll routines
478 * to handle any preventable race conditions, so TIPC will do the same ...
479 *
480 * TIPC sets the returned events as follows:
481 *
482 * socket state flags set
483 * ------------ ---------
484 * unconnected no read flags
485 * POLLOUT if port is not congested
486 *
487 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
488 * no write flags
489 *
490 * connected POLLIN/POLLRDNORM if data in rx queue
491 * POLLOUT if port is not congested
492 *
493 * disconnecting POLLIN/POLLRDNORM/POLLHUP
494 * no write flags
495 *
496 * listening POLLIN if SYN in rx queue
497 * no write flags
498 *
499 * ready POLLIN/POLLRDNORM if data in rx queue
500 * [connectionless] POLLOUT (since port cannot be congested)
501 *
502 * IMPORTANT: The fact that a read or write operation is indicated does NOT
503 * imply that the operation will succeed, merely that it should be performed
504 * and will not block.
505 */
506 static unsigned int tipc_poll(struct file *file, struct socket *sock,
507 poll_table *wait)
508 {
509 struct sock *sk = sock->sk;
510 struct tipc_sock *tsk = tipc_sk(sk);
511 u32 mask = 0;
512
513 sock_poll_wait(file, sk_sleep(sk), wait);
514
515 switch ((int)sock->state) {
516 case SS_UNCONNECTED:
517 if (!tsk->link_cong)
518 mask |= POLLOUT;
519 break;
520 case SS_READY:
521 case SS_CONNECTED:
522 if (!tsk->link_cong && !tipc_sk_conn_cong(tsk))
523 mask |= POLLOUT;
524 /* fall thru' */
525 case SS_CONNECTING:
526 case SS_LISTENING:
527 if (!skb_queue_empty(&sk->sk_receive_queue))
528 mask |= (POLLIN | POLLRDNORM);
529 break;
530 case SS_DISCONNECTING:
531 mask = (POLLIN | POLLRDNORM | POLLHUP);
532 break;
533 }
534
535 return mask;
536 }
537
538 /**
539 * tipc_sendmcast - send multicast message
540 * @sock: socket structure
541 * @seq: destination address
542 * @iov: message data to send
543 * @dsz: total length of message data
544 * @timeo: timeout to wait for wakeup
545 *
546 * Called from function tipc_sendmsg(), which has done all sanity checks
547 * Returns the number of bytes sent on success, or errno
548 */
549 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
550 struct iovec *iov, size_t dsz, long timeo)
551 {
552 struct sock *sk = sock->sk;
553 struct tipc_msg *mhdr = &tipc_sk(sk)->port.phdr;
554 struct sk_buff *buf;
555 uint mtu;
556 int rc;
557
558 msg_set_type(mhdr, TIPC_MCAST_MSG);
559 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
560 msg_set_destport(mhdr, 0);
561 msg_set_destnode(mhdr, 0);
562 msg_set_nametype(mhdr, seq->type);
563 msg_set_namelower(mhdr, seq->lower);
564 msg_set_nameupper(mhdr, seq->upper);
565 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
566
567 new_mtu:
568 mtu = tipc_bclink_get_mtu();
569 rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
570 if (unlikely(rc < 0))
571 return rc;
572
573 do {
574 rc = tipc_bclink_xmit(buf);
575 if (likely(rc >= 0)) {
576 rc = dsz;
577 break;
578 }
579 if (rc == -EMSGSIZE)
580 goto new_mtu;
581 if (rc != -ELINKCONG)
582 break;
583 rc = tipc_wait_for_sndmsg(sock, &timeo);
584 if (rc)
585 kfree_skb_list(buf);
586 } while (!rc);
587 return rc;
588 }
589
590 /* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
591 */
592 void tipc_sk_mcast_rcv(struct sk_buff *buf)
593 {
594 struct tipc_msg *msg = buf_msg(buf);
595 struct tipc_port_list dports = {0, NULL, };
596 struct tipc_port_list *item;
597 struct sk_buff *b;
598 uint i, last, dst = 0;
599 u32 scope = TIPC_CLUSTER_SCOPE;
600
601 if (in_own_node(msg_orignode(msg)))
602 scope = TIPC_NODE_SCOPE;
603
604 /* Create destination port list: */
605 tipc_nametbl_mc_translate(msg_nametype(msg),
606 msg_namelower(msg),
607 msg_nameupper(msg),
608 scope,
609 &dports);
610 last = dports.count;
611 if (!last) {
612 kfree_skb(buf);
613 return;
614 }
615
616 for (item = &dports; item; item = item->next) {
617 for (i = 0; i < PLSIZE && ++dst <= last; i++) {
618 b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
619 if (!b) {
620 pr_warn("Failed do clone mcast rcv buffer\n");
621 continue;
622 }
623 msg_set_destport(msg, item->ports[i]);
624 tipc_sk_rcv(b);
625 }
626 }
627 tipc_port_list_free(&dports);
628 }
629
630 /**
631 * tipc_sk_proto_rcv - receive a connection mng protocol message
632 * @tsk: receiving socket
633 * @dnode: node to send response message to, if any
634 * @buf: buffer containing protocol message
635 * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
636 * (CONN_PROBE_REPLY) message should be forwarded.
637 */
638 static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
639 struct sk_buff *buf)
640 {
641 struct tipc_msg *msg = buf_msg(buf);
642 struct tipc_port *port = &tsk->port;
643 int conn_cong;
644
645 /* Ignore if connection cannot be validated: */
646 if (!port->connected || !tipc_port_peer_msg(port, msg))
647 goto exit;
648
649 port->probing_state = TIPC_CONN_OK;
650
651 if (msg_type(msg) == CONN_ACK) {
652 conn_cong = tipc_sk_conn_cong(tsk);
653 tsk->sent_unacked -= msg_msgcnt(msg);
654 if (conn_cong)
655 tipc_sock_wakeup(tsk);
656 } else if (msg_type(msg) == CONN_PROBE) {
657 if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
658 return TIPC_OK;
659 msg_set_type(msg, CONN_PROBE_REPLY);
660 return TIPC_FWD_MSG;
661 }
662 /* Do nothing if msg_type() == CONN_PROBE_REPLY */
663 exit:
664 kfree_skb(buf);
665 return TIPC_OK;
666 }
667
668 /**
669 * dest_name_check - verify user is permitted to send to specified port name
670 * @dest: destination address
671 * @m: descriptor for message to be sent
672 *
673 * Prevents restricted configuration commands from being issued by
674 * unauthorized users.
675 *
676 * Returns 0 if permission is granted, otherwise errno
677 */
678 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
679 {
680 struct tipc_cfg_msg_hdr hdr;
681
682 if (unlikely(dest->addrtype == TIPC_ADDR_ID))
683 return 0;
684 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
685 return 0;
686 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
687 return 0;
688 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
689 return -EACCES;
690
691 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
692 return -EMSGSIZE;
693 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
694 return -EFAULT;
695 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
696 return -EACCES;
697
698 return 0;
699 }
700
701 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
702 {
703 struct sock *sk = sock->sk;
704 struct tipc_sock *tsk = tipc_sk(sk);
705 DEFINE_WAIT(wait);
706 int done;
707
708 do {
709 int err = sock_error(sk);
710 if (err)
711 return err;
712 if (sock->state == SS_DISCONNECTING)
713 return -EPIPE;
714 if (!*timeo_p)
715 return -EAGAIN;
716 if (signal_pending(current))
717 return sock_intr_errno(*timeo_p);
718
719 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
720 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
721 finish_wait(sk_sleep(sk), &wait);
722 } while (!done);
723 return 0;
724 }
725
726 /**
727 * tipc_sendmsg - send message in connectionless manner
728 * @iocb: if NULL, indicates that socket lock is already held
729 * @sock: socket structure
730 * @m: message to send
731 * @dsz: amount of user data to be sent
732 *
733 * Message must have an destination specified explicitly.
734 * Used for SOCK_RDM and SOCK_DGRAM messages,
735 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
736 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
737 *
738 * Returns the number of bytes sent on success, or errno otherwise
739 */
740 static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
741 struct msghdr *m, size_t dsz)
742 {
743 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
744 struct sock *sk = sock->sk;
745 struct tipc_sock *tsk = tipc_sk(sk);
746 struct tipc_port *port = &tsk->port;
747 struct tipc_msg *mhdr = &port->phdr;
748 struct iovec *iov = m->msg_iov;
749 u32 dnode, dport;
750 struct sk_buff *buf;
751 struct tipc_name_seq *seq = &dest->addr.nameseq;
752 u32 mtu;
753 long timeo;
754 int rc = -EINVAL;
755
756 if (unlikely(!dest))
757 return -EDESTADDRREQ;
758
759 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
760 (dest->family != AF_TIPC)))
761 return -EINVAL;
762
763 if (dsz > TIPC_MAX_USER_MSG_SIZE)
764 return -EMSGSIZE;
765
766 if (iocb)
767 lock_sock(sk);
768
769 if (unlikely(sock->state != SS_READY)) {
770 if (sock->state == SS_LISTENING) {
771 rc = -EPIPE;
772 goto exit;
773 }
774 if (sock->state != SS_UNCONNECTED) {
775 rc = -EISCONN;
776 goto exit;
777 }
778 if (tsk->port.published) {
779 rc = -EOPNOTSUPP;
780 goto exit;
781 }
782 if (dest->addrtype == TIPC_ADDR_NAME) {
783 tsk->port.conn_type = dest->addr.name.name.type;
784 tsk->port.conn_instance = dest->addr.name.name.instance;
785 }
786 }
787 rc = dest_name_check(dest, m);
788 if (rc)
789 goto exit;
790
791 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
792
793 if (dest->addrtype == TIPC_ADDR_MCAST) {
794 rc = tipc_sendmcast(sock, seq, iov, dsz, timeo);
795 goto exit;
796 } else if (dest->addrtype == TIPC_ADDR_NAME) {
797 u32 type = dest->addr.name.name.type;
798 u32 inst = dest->addr.name.name.instance;
799 u32 domain = dest->addr.name.domain;
800
801 dnode = domain;
802 msg_set_type(mhdr, TIPC_NAMED_MSG);
803 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
804 msg_set_nametype(mhdr, type);
805 msg_set_nameinst(mhdr, inst);
806 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
807 dport = tipc_nametbl_translate(type, inst, &dnode);
808 msg_set_destnode(mhdr, dnode);
809 msg_set_destport(mhdr, dport);
810 if (unlikely(!dport && !dnode)) {
811 rc = -EHOSTUNREACH;
812 goto exit;
813 }
814 } else if (dest->addrtype == TIPC_ADDR_ID) {
815 dnode = dest->addr.id.node;
816 msg_set_type(mhdr, TIPC_DIRECT_MSG);
817 msg_set_lookup_scope(mhdr, 0);
818 msg_set_destnode(mhdr, dnode);
819 msg_set_destport(mhdr, dest->addr.id.ref);
820 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
821 }
822
823 new_mtu:
824 mtu = tipc_node_get_mtu(dnode, tsk->port.ref);
825 rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
826 if (rc < 0)
827 goto exit;
828
829 do {
830 rc = tipc_link_xmit(buf, dnode, tsk->port.ref);
831 if (likely(rc >= 0)) {
832 if (sock->state != SS_READY)
833 sock->state = SS_CONNECTING;
834 rc = dsz;
835 break;
836 }
837 if (rc == -EMSGSIZE)
838 goto new_mtu;
839
840 if (rc != -ELINKCONG)
841 break;
842
843 rc = tipc_wait_for_sndmsg(sock, &timeo);
844 if (rc)
845 kfree_skb_list(buf);
846 } while (!rc);
847 exit:
848 if (iocb)
849 release_sock(sk);
850
851 return rc;
852 }
853
854 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
855 {
856 struct sock *sk = sock->sk;
857 struct tipc_sock *tsk = tipc_sk(sk);
858 DEFINE_WAIT(wait);
859 int done;
860
861 do {
862 int err = sock_error(sk);
863 if (err)
864 return err;
865 if (sock->state == SS_DISCONNECTING)
866 return -EPIPE;
867 else if (sock->state != SS_CONNECTED)
868 return -ENOTCONN;
869 if (!*timeo_p)
870 return -EAGAIN;
871 if (signal_pending(current))
872 return sock_intr_errno(*timeo_p);
873
874 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
875 done = sk_wait_event(sk, timeo_p,
876 (!tsk->link_cong &&
877 !tipc_sk_conn_cong(tsk)) ||
878 !tsk->port.connected);
879 finish_wait(sk_sleep(sk), &wait);
880 } while (!done);
881 return 0;
882 }
883
884 /**
885 * tipc_send_stream - send stream-oriented data
886 * @iocb: (unused)
887 * @sock: socket structure
888 * @m: data to send
889 * @dsz: total length of data to be transmitted
890 *
891 * Used for SOCK_STREAM data.
892 *
893 * Returns the number of bytes sent on success (or partial success),
894 * or errno if no data sent
895 */
896 static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
897 struct msghdr *m, size_t dsz)
898 {
899 struct sock *sk = sock->sk;
900 struct tipc_sock *tsk = tipc_sk(sk);
901 struct tipc_port *port = &tsk->port;
902 struct tipc_msg *mhdr = &port->phdr;
903 struct sk_buff *buf;
904 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
905 u32 ref = port->ref;
906 int rc = -EINVAL;
907 long timeo;
908 u32 dnode;
909 uint mtu, send, sent = 0;
910
911 /* Handle implied connection establishment */
912 if (unlikely(dest)) {
913 rc = tipc_sendmsg(iocb, sock, m, dsz);
914 if (dsz && (dsz == rc))
915 tsk->sent_unacked = 1;
916 return rc;
917 }
918 if (dsz > (uint)INT_MAX)
919 return -EMSGSIZE;
920
921 if (iocb)
922 lock_sock(sk);
923
924 if (unlikely(sock->state != SS_CONNECTED)) {
925 if (sock->state == SS_DISCONNECTING)
926 rc = -EPIPE;
927 else
928 rc = -ENOTCONN;
929 goto exit;
930 }
931
932 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
933 dnode = tipc_port_peernode(port);
934
935 next:
936 mtu = port->max_pkt;
937 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
938 rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf);
939 if (unlikely(rc < 0))
940 goto exit;
941 do {
942 if (likely(!tipc_sk_conn_cong(tsk))) {
943 rc = tipc_link_xmit(buf, dnode, ref);
944 if (likely(!rc)) {
945 tsk->sent_unacked++;
946 sent += send;
947 if (sent == dsz)
948 break;
949 goto next;
950 }
951 if (rc == -EMSGSIZE) {
952 port->max_pkt = tipc_node_get_mtu(dnode, ref);
953 goto next;
954 }
955 if (rc != -ELINKCONG)
956 break;
957 }
958 rc = tipc_wait_for_sndpkt(sock, &timeo);
959 if (rc)
960 kfree_skb_list(buf);
961 } while (!rc);
962 exit:
963 if (iocb)
964 release_sock(sk);
965 return sent ? sent : rc;
966 }
967
968 /**
969 * tipc_send_packet - send a connection-oriented message
970 * @iocb: if NULL, indicates that socket lock is already held
971 * @sock: socket structure
972 * @m: message to send
973 * @dsz: length of data to be transmitted
974 *
975 * Used for SOCK_SEQPACKET messages.
976 *
977 * Returns the number of bytes sent on success, or errno otherwise
978 */
979 static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
980 struct msghdr *m, size_t dsz)
981 {
982 if (dsz > TIPC_MAX_USER_MSG_SIZE)
983 return -EMSGSIZE;
984
985 return tipc_send_stream(iocb, sock, m, dsz);
986 }
987
988 /**
989 * auto_connect - complete connection setup to a remote port
990 * @tsk: tipc socket structure
991 * @msg: peer's response message
992 *
993 * Returns 0 on success, errno otherwise
994 */
995 static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg)
996 {
997 struct tipc_port *port = &tsk->port;
998 struct socket *sock = tsk->sk.sk_socket;
999 struct tipc_portid peer;
1000
1001 peer.ref = msg_origport(msg);
1002 peer.node = msg_orignode(msg);
1003
1004 __tipc_port_connect(port->ref, port, &peer);
1005
1006 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
1007 return -EINVAL;
1008 msg_set_importance(&port->phdr, (u32)msg_importance(msg));
1009 sock->state = SS_CONNECTED;
1010 return 0;
1011 }
1012
1013 /**
1014 * set_orig_addr - capture sender's address for received message
1015 * @m: descriptor for message info
1016 * @msg: received message header
1017 *
1018 * Note: Address is not captured if not requested by receiver.
1019 */
1020 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1021 {
1022 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1023
1024 if (addr) {
1025 addr->family = AF_TIPC;
1026 addr->addrtype = TIPC_ADDR_ID;
1027 memset(&addr->addr, 0, sizeof(addr->addr));
1028 addr->addr.id.ref = msg_origport(msg);
1029 addr->addr.id.node = msg_orignode(msg);
1030 addr->addr.name.domain = 0; /* could leave uninitialized */
1031 addr->scope = 0; /* could leave uninitialized */
1032 m->msg_namelen = sizeof(struct sockaddr_tipc);
1033 }
1034 }
1035
1036 /**
1037 * anc_data_recv - optionally capture ancillary data for received message
1038 * @m: descriptor for message info
1039 * @msg: received message header
1040 * @tport: TIPC port associated with message
1041 *
1042 * Note: Ancillary data is not captured if not requested by receiver.
1043 *
1044 * Returns 0 if successful, otherwise errno
1045 */
1046 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1047 struct tipc_port *tport)
1048 {
1049 u32 anc_data[3];
1050 u32 err;
1051 u32 dest_type;
1052 int has_name;
1053 int res;
1054
1055 if (likely(m->msg_controllen == 0))
1056 return 0;
1057
1058 /* Optionally capture errored message object(s) */
1059 err = msg ? msg_errcode(msg) : 0;
1060 if (unlikely(err)) {
1061 anc_data[0] = err;
1062 anc_data[1] = msg_data_sz(msg);
1063 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1064 if (res)
1065 return res;
1066 if (anc_data[1]) {
1067 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1068 msg_data(msg));
1069 if (res)
1070 return res;
1071 }
1072 }
1073
1074 /* Optionally capture message destination object */
1075 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1076 switch (dest_type) {
1077 case TIPC_NAMED_MSG:
1078 has_name = 1;
1079 anc_data[0] = msg_nametype(msg);
1080 anc_data[1] = msg_namelower(msg);
1081 anc_data[2] = msg_namelower(msg);
1082 break;
1083 case TIPC_MCAST_MSG:
1084 has_name = 1;
1085 anc_data[0] = msg_nametype(msg);
1086 anc_data[1] = msg_namelower(msg);
1087 anc_data[2] = msg_nameupper(msg);
1088 break;
1089 case TIPC_CONN_MSG:
1090 has_name = (tport->conn_type != 0);
1091 anc_data[0] = tport->conn_type;
1092 anc_data[1] = tport->conn_instance;
1093 anc_data[2] = tport->conn_instance;
1094 break;
1095 default:
1096 has_name = 0;
1097 }
1098 if (has_name) {
1099 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1100 if (res)
1101 return res;
1102 }
1103
1104 return 0;
1105 }
1106
1107 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1108 {
1109 struct sock *sk = sock->sk;
1110 DEFINE_WAIT(wait);
1111 long timeo = *timeop;
1112 int err;
1113
1114 for (;;) {
1115 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1116 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1117 if (sock->state == SS_DISCONNECTING) {
1118 err = -ENOTCONN;
1119 break;
1120 }
1121 release_sock(sk);
1122 timeo = schedule_timeout(timeo);
1123 lock_sock(sk);
1124 }
1125 err = 0;
1126 if (!skb_queue_empty(&sk->sk_receive_queue))
1127 break;
1128 err = sock_intr_errno(timeo);
1129 if (signal_pending(current))
1130 break;
1131 err = -EAGAIN;
1132 if (!timeo)
1133 break;
1134 }
1135 finish_wait(sk_sleep(sk), &wait);
1136 *timeop = timeo;
1137 return err;
1138 }
1139
1140 /**
1141 * tipc_recvmsg - receive packet-oriented message
1142 * @iocb: (unused)
1143 * @m: descriptor for message info
1144 * @buf_len: total size of user buffer area
1145 * @flags: receive flags
1146 *
1147 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1148 * If the complete message doesn't fit in user area, truncate it.
1149 *
1150 * Returns size of returned message data, errno otherwise
1151 */
1152 static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1153 struct msghdr *m, size_t buf_len, int flags)
1154 {
1155 struct sock *sk = sock->sk;
1156 struct tipc_sock *tsk = tipc_sk(sk);
1157 struct tipc_port *port = &tsk->port;
1158 struct sk_buff *buf;
1159 struct tipc_msg *msg;
1160 long timeo;
1161 unsigned int sz;
1162 u32 err;
1163 int res;
1164
1165 /* Catch invalid receive requests */
1166 if (unlikely(!buf_len))
1167 return -EINVAL;
1168
1169 lock_sock(sk);
1170
1171 if (unlikely(sock->state == SS_UNCONNECTED)) {
1172 res = -ENOTCONN;
1173 goto exit;
1174 }
1175
1176 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1177 restart:
1178
1179 /* Look for a message in receive queue; wait if necessary */
1180 res = tipc_wait_for_rcvmsg(sock, &timeo);
1181 if (res)
1182 goto exit;
1183
1184 /* Look at first message in receive queue */
1185 buf = skb_peek(&sk->sk_receive_queue);
1186 msg = buf_msg(buf);
1187 sz = msg_data_sz(msg);
1188 err = msg_errcode(msg);
1189
1190 /* Discard an empty non-errored message & try again */
1191 if ((!sz) && (!err)) {
1192 advance_rx_queue(sk);
1193 goto restart;
1194 }
1195
1196 /* Capture sender's address (optional) */
1197 set_orig_addr(m, msg);
1198
1199 /* Capture ancillary data (optional) */
1200 res = anc_data_recv(m, msg, port);
1201 if (res)
1202 goto exit;
1203
1204 /* Capture message data (if valid) & compute return value (always) */
1205 if (!err) {
1206 if (unlikely(buf_len < sz)) {
1207 sz = buf_len;
1208 m->msg_flags |= MSG_TRUNC;
1209 }
1210 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
1211 m->msg_iov, sz);
1212 if (res)
1213 goto exit;
1214 res = sz;
1215 } else {
1216 if ((sock->state == SS_READY) ||
1217 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1218 res = 0;
1219 else
1220 res = -ECONNRESET;
1221 }
1222
1223 /* Consume received message (optional) */
1224 if (likely(!(flags & MSG_PEEK))) {
1225 if ((sock->state != SS_READY) &&
1226 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1227 tipc_acknowledge(port->ref, tsk->rcv_unacked);
1228 tsk->rcv_unacked = 0;
1229 }
1230 advance_rx_queue(sk);
1231 }
1232 exit:
1233 release_sock(sk);
1234 return res;
1235 }
1236
1237 /**
1238 * tipc_recv_stream - receive stream-oriented data
1239 * @iocb: (unused)
1240 * @m: descriptor for message info
1241 * @buf_len: total size of user buffer area
1242 * @flags: receive flags
1243 *
1244 * Used for SOCK_STREAM messages only. If not enough data is available
1245 * will optionally wait for more; never truncates data.
1246 *
1247 * Returns size of returned message data, errno otherwise
1248 */
1249 static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1250 struct msghdr *m, size_t buf_len, int flags)
1251 {
1252 struct sock *sk = sock->sk;
1253 struct tipc_sock *tsk = tipc_sk(sk);
1254 struct tipc_port *port = &tsk->port;
1255 struct sk_buff *buf;
1256 struct tipc_msg *msg;
1257 long timeo;
1258 unsigned int sz;
1259 int sz_to_copy, target, needed;
1260 int sz_copied = 0;
1261 u32 err;
1262 int res = 0;
1263
1264 /* Catch invalid receive attempts */
1265 if (unlikely(!buf_len))
1266 return -EINVAL;
1267
1268 lock_sock(sk);
1269
1270 if (unlikely(sock->state == SS_UNCONNECTED)) {
1271 res = -ENOTCONN;
1272 goto exit;
1273 }
1274
1275 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1276 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1277
1278 restart:
1279 /* Look for a message in receive queue; wait if necessary */
1280 res = tipc_wait_for_rcvmsg(sock, &timeo);
1281 if (res)
1282 goto exit;
1283
1284 /* Look at first message in receive queue */
1285 buf = skb_peek(&sk->sk_receive_queue);
1286 msg = buf_msg(buf);
1287 sz = msg_data_sz(msg);
1288 err = msg_errcode(msg);
1289
1290 /* Discard an empty non-errored message & try again */
1291 if ((!sz) && (!err)) {
1292 advance_rx_queue(sk);
1293 goto restart;
1294 }
1295
1296 /* Optionally capture sender's address & ancillary data of first msg */
1297 if (sz_copied == 0) {
1298 set_orig_addr(m, msg);
1299 res = anc_data_recv(m, msg, port);
1300 if (res)
1301 goto exit;
1302 }
1303
1304 /* Capture message data (if valid) & compute return value (always) */
1305 if (!err) {
1306 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1307
1308 sz -= offset;
1309 needed = (buf_len - sz_copied);
1310 sz_to_copy = (sz <= needed) ? sz : needed;
1311
1312 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
1313 m->msg_iov, sz_to_copy);
1314 if (res)
1315 goto exit;
1316
1317 sz_copied += sz_to_copy;
1318
1319 if (sz_to_copy < sz) {
1320 if (!(flags & MSG_PEEK))
1321 TIPC_SKB_CB(buf)->handle =
1322 (void *)(unsigned long)(offset + sz_to_copy);
1323 goto exit;
1324 }
1325 } else {
1326 if (sz_copied != 0)
1327 goto exit; /* can't add error msg to valid data */
1328
1329 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1330 res = 0;
1331 else
1332 res = -ECONNRESET;
1333 }
1334
1335 /* Consume received message (optional) */
1336 if (likely(!(flags & MSG_PEEK))) {
1337 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1338 tipc_acknowledge(port->ref, tsk->rcv_unacked);
1339 tsk->rcv_unacked = 0;
1340 }
1341 advance_rx_queue(sk);
1342 }
1343
1344 /* Loop around if more data is required */
1345 if ((sz_copied < buf_len) && /* didn't get all requested data */
1346 (!skb_queue_empty(&sk->sk_receive_queue) ||
1347 (sz_copied < target)) && /* and more is ready or required */
1348 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1349 (!err)) /* and haven't reached a FIN */
1350 goto restart;
1351
1352 exit:
1353 release_sock(sk);
1354 return sz_copied ? sz_copied : res;
1355 }
1356
1357 /**
1358 * tipc_write_space - wake up thread if port congestion is released
1359 * @sk: socket
1360 */
1361 static void tipc_write_space(struct sock *sk)
1362 {
1363 struct socket_wq *wq;
1364
1365 rcu_read_lock();
1366 wq = rcu_dereference(sk->sk_wq);
1367 if (wq_has_sleeper(wq))
1368 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1369 POLLWRNORM | POLLWRBAND);
1370 rcu_read_unlock();
1371 }
1372
1373 /**
1374 * tipc_data_ready - wake up threads to indicate messages have been received
1375 * @sk: socket
1376 * @len: the length of messages
1377 */
1378 static void tipc_data_ready(struct sock *sk)
1379 {
1380 struct socket_wq *wq;
1381
1382 rcu_read_lock();
1383 wq = rcu_dereference(sk->sk_wq);
1384 if (wq_has_sleeper(wq))
1385 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1386 POLLRDNORM | POLLRDBAND);
1387 rcu_read_unlock();
1388 }
1389
1390 /**
1391 * filter_connect - Handle all incoming messages for a connection-based socket
1392 * @tsk: TIPC socket
1393 * @msg: message
1394 *
1395 * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise
1396 */
1397 static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1398 {
1399 struct sock *sk = &tsk->sk;
1400 struct tipc_port *port = &tsk->port;
1401 struct socket *sock = sk->sk_socket;
1402 struct tipc_msg *msg = buf_msg(*buf);
1403
1404 int retval = -TIPC_ERR_NO_PORT;
1405 int res;
1406
1407 if (msg_mcast(msg))
1408 return retval;
1409
1410 switch ((int)sock->state) {
1411 case SS_CONNECTED:
1412 /* Accept only connection-based messages sent by peer */
1413 if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) {
1414 if (unlikely(msg_errcode(msg))) {
1415 sock->state = SS_DISCONNECTING;
1416 __tipc_port_disconnect(port);
1417 }
1418 retval = TIPC_OK;
1419 }
1420 break;
1421 case SS_CONNECTING:
1422 /* Accept only ACK or NACK message */
1423 if (unlikely(msg_errcode(msg))) {
1424 sock->state = SS_DISCONNECTING;
1425 sk->sk_err = ECONNREFUSED;
1426 retval = TIPC_OK;
1427 break;
1428 }
1429
1430 if (unlikely(!msg_connected(msg)))
1431 break;
1432
1433 res = auto_connect(tsk, msg);
1434 if (res) {
1435 sock->state = SS_DISCONNECTING;
1436 sk->sk_err = -res;
1437 retval = TIPC_OK;
1438 break;
1439 }
1440
1441 /* If an incoming message is an 'ACK-', it should be
1442 * discarded here because it doesn't contain useful
1443 * data. In addition, we should try to wake up
1444 * connect() routine if sleeping.
1445 */
1446 if (msg_data_sz(msg) == 0) {
1447 kfree_skb(*buf);
1448 *buf = NULL;
1449 if (waitqueue_active(sk_sleep(sk)))
1450 wake_up_interruptible(sk_sleep(sk));
1451 }
1452 retval = TIPC_OK;
1453 break;
1454 case SS_LISTENING:
1455 case SS_UNCONNECTED:
1456 /* Accept only SYN message */
1457 if (!msg_connected(msg) && !(msg_errcode(msg)))
1458 retval = TIPC_OK;
1459 break;
1460 case SS_DISCONNECTING:
1461 break;
1462 default:
1463 pr_err("Unknown socket state %u\n", sock->state);
1464 }
1465 return retval;
1466 }
1467
1468 /**
1469 * rcvbuf_limit - get proper overload limit of socket receive queue
1470 * @sk: socket
1471 * @buf: message
1472 *
1473 * For all connection oriented messages, irrespective of importance,
1474 * the default overload value (i.e. 67MB) is set as limit.
1475 *
1476 * For all connectionless messages, by default new queue limits are
1477 * as belows:
1478 *
1479 * TIPC_LOW_IMPORTANCE (4 MB)
1480 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1481 * TIPC_HIGH_IMPORTANCE (16 MB)
1482 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1483 *
1484 * Returns overload limit according to corresponding message importance
1485 */
1486 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1487 {
1488 struct tipc_msg *msg = buf_msg(buf);
1489
1490 if (msg_connected(msg))
1491 return sysctl_tipc_rmem[2];
1492
1493 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1494 msg_importance(msg);
1495 }
1496
1497 /**
1498 * filter_rcv - validate incoming message
1499 * @sk: socket
1500 * @buf: message
1501 *
1502 * Enqueues message on receive queue if acceptable; optionally handles
1503 * disconnect indication for a connected socket.
1504 *
1505 * Called with socket lock already taken; port lock may also be taken.
1506 *
1507 * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
1508 * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
1509 */
1510 static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1511 {
1512 struct socket *sock = sk->sk_socket;
1513 struct tipc_sock *tsk = tipc_sk(sk);
1514 struct tipc_msg *msg = buf_msg(buf);
1515 unsigned int limit = rcvbuf_limit(sk, buf);
1516 u32 onode;
1517 int rc = TIPC_OK;
1518
1519 if (unlikely(msg_user(msg) == CONN_MANAGER))
1520 return tipc_sk_proto_rcv(tsk, &onode, buf);
1521
1522 /* Reject message if it is wrong sort of message for socket */
1523 if (msg_type(msg) > TIPC_DIRECT_MSG)
1524 return -TIPC_ERR_NO_PORT;
1525
1526 if (sock->state == SS_READY) {
1527 if (msg_connected(msg))
1528 return -TIPC_ERR_NO_PORT;
1529 } else {
1530 rc = filter_connect(tsk, &buf);
1531 if (rc != TIPC_OK || buf == NULL)
1532 return rc;
1533 }
1534
1535 /* Reject message if there isn't room to queue it */
1536 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1537 return -TIPC_ERR_OVERLOAD;
1538
1539 /* Enqueue message */
1540 TIPC_SKB_CB(buf)->handle = NULL;
1541 __skb_queue_tail(&sk->sk_receive_queue, buf);
1542 skb_set_owner_r(buf, sk);
1543
1544 sk->sk_data_ready(sk);
1545 return TIPC_OK;
1546 }
1547
1548 /**
1549 * tipc_backlog_rcv - handle incoming message from backlog queue
1550 * @sk: socket
1551 * @buf: message
1552 *
1553 * Caller must hold socket lock, but not port lock.
1554 *
1555 * Returns 0
1556 */
1557 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
1558 {
1559 int rc;
1560 u32 onode;
1561 struct tipc_sock *tsk = tipc_sk(sk);
1562 uint truesize = buf->truesize;
1563
1564 rc = filter_rcv(sk, buf);
1565
1566 if (likely(!rc)) {
1567 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1568 atomic_add(truesize, &tsk->dupl_rcvcnt);
1569 return 0;
1570 }
1571
1572 if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc))
1573 return 0;
1574
1575 tipc_link_xmit(buf, onode, 0);
1576
1577 return 0;
1578 }
1579
1580 /**
1581 * tipc_sk_rcv - handle incoming message
1582 * @buf: buffer containing arriving message
1583 * Consumes buffer
1584 * Returns 0 if success, or errno: -EHOSTUNREACH
1585 */
1586 int tipc_sk_rcv(struct sk_buff *buf)
1587 {
1588 struct tipc_sock *tsk;
1589 struct tipc_port *port;
1590 struct sock *sk;
1591 u32 dport = msg_destport(buf_msg(buf));
1592 int rc = TIPC_OK;
1593 uint limit;
1594 u32 dnode;
1595
1596 /* Validate destination and message */
1597 port = tipc_port_lock(dport);
1598 if (unlikely(!port)) {
1599 rc = tipc_msg_eval(buf, &dnode);
1600 goto exit;
1601 }
1602
1603 tsk = tipc_port_to_sock(port);
1604 sk = &tsk->sk;
1605
1606 /* Queue message */
1607 bh_lock_sock(sk);
1608
1609 if (!sock_owned_by_user(sk)) {
1610 rc = filter_rcv(sk, buf);
1611 } else {
1612 if (sk->sk_backlog.len == 0)
1613 atomic_set(&tsk->dupl_rcvcnt, 0);
1614 limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
1615 if (sk_add_backlog(sk, buf, limit))
1616 rc = -TIPC_ERR_OVERLOAD;
1617 }
1618 bh_unlock_sock(sk);
1619 tipc_port_unlock(port);
1620
1621 if (likely(!rc))
1622 return 0;
1623 exit:
1624 if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc))
1625 return -EHOSTUNREACH;
1626
1627 tipc_link_xmit(buf, dnode, 0);
1628 return (rc < 0) ? -EHOSTUNREACH : 0;
1629 }
1630
1631 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1632 {
1633 struct sock *sk = sock->sk;
1634 DEFINE_WAIT(wait);
1635 int done;
1636
1637 do {
1638 int err = sock_error(sk);
1639 if (err)
1640 return err;
1641 if (!*timeo_p)
1642 return -ETIMEDOUT;
1643 if (signal_pending(current))
1644 return sock_intr_errno(*timeo_p);
1645
1646 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1647 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1648 finish_wait(sk_sleep(sk), &wait);
1649 } while (!done);
1650 return 0;
1651 }
1652
1653 /**
1654 * tipc_connect - establish a connection to another TIPC port
1655 * @sock: socket structure
1656 * @dest: socket address for destination port
1657 * @destlen: size of socket address data structure
1658 * @flags: file-related flags associated with socket
1659 *
1660 * Returns 0 on success, errno otherwise
1661 */
1662 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1663 int destlen, int flags)
1664 {
1665 struct sock *sk = sock->sk;
1666 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1667 struct msghdr m = {NULL,};
1668 long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1669 socket_state previous;
1670 int res;
1671
1672 lock_sock(sk);
1673
1674 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1675 if (sock->state == SS_READY) {
1676 res = -EOPNOTSUPP;
1677 goto exit;
1678 }
1679
1680 /*
1681 * Reject connection attempt using multicast address
1682 *
1683 * Note: send_msg() validates the rest of the address fields,
1684 * so there's no need to do it here
1685 */
1686 if (dst->addrtype == TIPC_ADDR_MCAST) {
1687 res = -EINVAL;
1688 goto exit;
1689 }
1690
1691 previous = sock->state;
1692 switch (sock->state) {
1693 case SS_UNCONNECTED:
1694 /* Send a 'SYN-' to destination */
1695 m.msg_name = dest;
1696 m.msg_namelen = destlen;
1697
1698 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1699 * indicate send_msg() is never blocked.
1700 */
1701 if (!timeout)
1702 m.msg_flags = MSG_DONTWAIT;
1703
1704 res = tipc_sendmsg(NULL, sock, &m, 0);
1705 if ((res < 0) && (res != -EWOULDBLOCK))
1706 goto exit;
1707
1708 /* Just entered SS_CONNECTING state; the only
1709 * difference is that return value in non-blocking
1710 * case is EINPROGRESS, rather than EALREADY.
1711 */
1712 res = -EINPROGRESS;
1713 case SS_CONNECTING:
1714 if (previous == SS_CONNECTING)
1715 res = -EALREADY;
1716 if (!timeout)
1717 goto exit;
1718 timeout = msecs_to_jiffies(timeout);
1719 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1720 res = tipc_wait_for_connect(sock, &timeout);
1721 break;
1722 case SS_CONNECTED:
1723 res = -EISCONN;
1724 break;
1725 default:
1726 res = -EINVAL;
1727 break;
1728 }
1729 exit:
1730 release_sock(sk);
1731 return res;
1732 }
1733
1734 /**
1735 * tipc_listen - allow socket to listen for incoming connections
1736 * @sock: socket structure
1737 * @len: (unused)
1738 *
1739 * Returns 0 on success, errno otherwise
1740 */
1741 static int tipc_listen(struct socket *sock, int len)
1742 {
1743 struct sock *sk = sock->sk;
1744 int res;
1745
1746 lock_sock(sk);
1747
1748 if (sock->state != SS_UNCONNECTED)
1749 res = -EINVAL;
1750 else {
1751 sock->state = SS_LISTENING;
1752 res = 0;
1753 }
1754
1755 release_sock(sk);
1756 return res;
1757 }
1758
1759 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1760 {
1761 struct sock *sk = sock->sk;
1762 DEFINE_WAIT(wait);
1763 int err;
1764
1765 /* True wake-one mechanism for incoming connections: only
1766 * one process gets woken up, not the 'whole herd'.
1767 * Since we do not 'race & poll' for established sockets
1768 * anymore, the common case will execute the loop only once.
1769 */
1770 for (;;) {
1771 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1772 TASK_INTERRUPTIBLE);
1773 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1774 release_sock(sk);
1775 timeo = schedule_timeout(timeo);
1776 lock_sock(sk);
1777 }
1778 err = 0;
1779 if (!skb_queue_empty(&sk->sk_receive_queue))
1780 break;
1781 err = -EINVAL;
1782 if (sock->state != SS_LISTENING)
1783 break;
1784 err = sock_intr_errno(timeo);
1785 if (signal_pending(current))
1786 break;
1787 err = -EAGAIN;
1788 if (!timeo)
1789 break;
1790 }
1791 finish_wait(sk_sleep(sk), &wait);
1792 return err;
1793 }
1794
1795 /**
1796 * tipc_accept - wait for connection request
1797 * @sock: listening socket
1798 * @newsock: new socket that is to be connected
1799 * @flags: file-related flags associated with socket
1800 *
1801 * Returns 0 on success, errno otherwise
1802 */
1803 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1804 {
1805 struct sock *new_sk, *sk = sock->sk;
1806 struct sk_buff *buf;
1807 struct tipc_port *new_port;
1808 struct tipc_msg *msg;
1809 struct tipc_portid peer;
1810 u32 new_ref;
1811 long timeo;
1812 int res;
1813
1814 lock_sock(sk);
1815
1816 if (sock->state != SS_LISTENING) {
1817 res = -EINVAL;
1818 goto exit;
1819 }
1820 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1821 res = tipc_wait_for_accept(sock, timeo);
1822 if (res)
1823 goto exit;
1824
1825 buf = skb_peek(&sk->sk_receive_queue);
1826
1827 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
1828 if (res)
1829 goto exit;
1830
1831 new_sk = new_sock->sk;
1832 new_port = &tipc_sk(new_sk)->port;
1833 new_ref = new_port->ref;
1834 msg = buf_msg(buf);
1835
1836 /* we lock on new_sk; but lockdep sees the lock on sk */
1837 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1838
1839 /*
1840 * Reject any stray messages received by new socket
1841 * before the socket lock was taken (very, very unlikely)
1842 */
1843 reject_rx_queue(new_sk);
1844
1845 /* Connect new socket to it's peer */
1846 peer.ref = msg_origport(msg);
1847 peer.node = msg_orignode(msg);
1848 tipc_port_connect(new_ref, &peer);
1849 new_sock->state = SS_CONNECTED;
1850
1851 tipc_port_set_importance(new_port, msg_importance(msg));
1852 if (msg_named(msg)) {
1853 new_port->conn_type = msg_nametype(msg);
1854 new_port->conn_instance = msg_nameinst(msg);
1855 }
1856
1857 /*
1858 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1859 * Respond to 'SYN+' by queuing it on new socket.
1860 */
1861 if (!msg_data_sz(msg)) {
1862 struct msghdr m = {NULL,};
1863
1864 advance_rx_queue(sk);
1865 tipc_send_packet(NULL, new_sock, &m, 0);
1866 } else {
1867 __skb_dequeue(&sk->sk_receive_queue);
1868 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1869 skb_set_owner_r(buf, new_sk);
1870 }
1871 release_sock(new_sk);
1872 exit:
1873 release_sock(sk);
1874 return res;
1875 }
1876
1877 /**
1878 * tipc_shutdown - shutdown socket connection
1879 * @sock: socket structure
1880 * @how: direction to close (must be SHUT_RDWR)
1881 *
1882 * Terminates connection (if necessary), then purges socket's receive queue.
1883 *
1884 * Returns 0 on success, errno otherwise
1885 */
1886 static int tipc_shutdown(struct socket *sock, int how)
1887 {
1888 struct sock *sk = sock->sk;
1889 struct tipc_sock *tsk = tipc_sk(sk);
1890 struct tipc_port *port = &tsk->port;
1891 struct sk_buff *buf;
1892 u32 peer;
1893 int res;
1894
1895 if (how != SHUT_RDWR)
1896 return -EINVAL;
1897
1898 lock_sock(sk);
1899
1900 switch (sock->state) {
1901 case SS_CONNECTING:
1902 case SS_CONNECTED:
1903
1904 restart:
1905 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1906 buf = __skb_dequeue(&sk->sk_receive_queue);
1907 if (buf) {
1908 if (TIPC_SKB_CB(buf)->handle != NULL) {
1909 kfree_skb(buf);
1910 goto restart;
1911 }
1912 tipc_port_disconnect(port->ref);
1913 if (tipc_msg_reverse(buf, &peer, TIPC_CONN_SHUTDOWN))
1914 tipc_link_xmit(buf, peer, 0);
1915 } else {
1916 tipc_port_shutdown(port->ref);
1917 }
1918
1919 sock->state = SS_DISCONNECTING;
1920
1921 /* fall through */
1922
1923 case SS_DISCONNECTING:
1924
1925 /* Discard any unreceived messages */
1926 __skb_queue_purge(&sk->sk_receive_queue);
1927
1928 /* Wake up anyone sleeping in poll */
1929 sk->sk_state_change(sk);
1930 res = 0;
1931 break;
1932
1933 default:
1934 res = -ENOTCONN;
1935 }
1936
1937 release_sock(sk);
1938 return res;
1939 }
1940
1941 /**
1942 * tipc_setsockopt - set socket option
1943 * @sock: socket structure
1944 * @lvl: option level
1945 * @opt: option identifier
1946 * @ov: pointer to new option value
1947 * @ol: length of option value
1948 *
1949 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1950 * (to ease compatibility).
1951 *
1952 * Returns 0 on success, errno otherwise
1953 */
1954 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
1955 char __user *ov, unsigned int ol)
1956 {
1957 struct sock *sk = sock->sk;
1958 struct tipc_sock *tsk = tipc_sk(sk);
1959 struct tipc_port *port = &tsk->port;
1960 u32 value;
1961 int res;
1962
1963 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1964 return 0;
1965 if (lvl != SOL_TIPC)
1966 return -ENOPROTOOPT;
1967 if (ol < sizeof(value))
1968 return -EINVAL;
1969 res = get_user(value, (u32 __user *)ov);
1970 if (res)
1971 return res;
1972
1973 lock_sock(sk);
1974
1975 switch (opt) {
1976 case TIPC_IMPORTANCE:
1977 tipc_port_set_importance(port, value);
1978 break;
1979 case TIPC_SRC_DROPPABLE:
1980 if (sock->type != SOCK_STREAM)
1981 tipc_port_set_unreliable(port, value);
1982 else
1983 res = -ENOPROTOOPT;
1984 break;
1985 case TIPC_DEST_DROPPABLE:
1986 tipc_port_set_unreturnable(port, value);
1987 break;
1988 case TIPC_CONN_TIMEOUT:
1989 tipc_sk(sk)->conn_timeout = value;
1990 /* no need to set "res", since already 0 at this point */
1991 break;
1992 default:
1993 res = -EINVAL;
1994 }
1995
1996 release_sock(sk);
1997
1998 return res;
1999 }
2000
2001 /**
2002 * tipc_getsockopt - get socket option
2003 * @sock: socket structure
2004 * @lvl: option level
2005 * @opt: option identifier
2006 * @ov: receptacle for option value
2007 * @ol: receptacle for length of option value
2008 *
2009 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2010 * (to ease compatibility).
2011 *
2012 * Returns 0 on success, errno otherwise
2013 */
2014 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2015 char __user *ov, int __user *ol)
2016 {
2017 struct sock *sk = sock->sk;
2018 struct tipc_sock *tsk = tipc_sk(sk);
2019 struct tipc_port *port = &tsk->port;
2020 int len;
2021 u32 value;
2022 int res;
2023
2024 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2025 return put_user(0, ol);
2026 if (lvl != SOL_TIPC)
2027 return -ENOPROTOOPT;
2028 res = get_user(len, ol);
2029 if (res)
2030 return res;
2031
2032 lock_sock(sk);
2033
2034 switch (opt) {
2035 case TIPC_IMPORTANCE:
2036 value = tipc_port_importance(port);
2037 break;
2038 case TIPC_SRC_DROPPABLE:
2039 value = tipc_port_unreliable(port);
2040 break;
2041 case TIPC_DEST_DROPPABLE:
2042 value = tipc_port_unreturnable(port);
2043 break;
2044 case TIPC_CONN_TIMEOUT:
2045 value = tipc_sk(sk)->conn_timeout;
2046 /* no need to set "res", since already 0 at this point */
2047 break;
2048 case TIPC_NODE_RECVQ_DEPTH:
2049 value = 0; /* was tipc_queue_size, now obsolete */
2050 break;
2051 case TIPC_SOCK_RECVQ_DEPTH:
2052 value = skb_queue_len(&sk->sk_receive_queue);
2053 break;
2054 default:
2055 res = -EINVAL;
2056 }
2057
2058 release_sock(sk);
2059
2060 if (res)
2061 return res; /* "get" failed */
2062
2063 if (len < sizeof(value))
2064 return -EINVAL;
2065
2066 if (copy_to_user(ov, &value, sizeof(value)))
2067 return -EFAULT;
2068
2069 return put_user(sizeof(value), ol);
2070 }
2071
2072 static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
2073 {
2074 struct tipc_sioc_ln_req lnr;
2075 void __user *argp = (void __user *)arg;
2076
2077 switch (cmd) {
2078 case SIOCGETLINKNAME:
2079 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2080 return -EFAULT;
2081 if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
2082 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2083 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2084 return -EFAULT;
2085 return 0;
2086 }
2087 return -EADDRNOTAVAIL;
2088 default:
2089 return -ENOIOCTLCMD;
2090 }
2091 }
2092
2093 /* Protocol switches for the various types of TIPC sockets */
2094
2095 static const struct proto_ops msg_ops = {
2096 .owner = THIS_MODULE,
2097 .family = AF_TIPC,
2098 .release = tipc_release,
2099 .bind = tipc_bind,
2100 .connect = tipc_connect,
2101 .socketpair = sock_no_socketpair,
2102 .accept = sock_no_accept,
2103 .getname = tipc_getname,
2104 .poll = tipc_poll,
2105 .ioctl = tipc_ioctl,
2106 .listen = sock_no_listen,
2107 .shutdown = tipc_shutdown,
2108 .setsockopt = tipc_setsockopt,
2109 .getsockopt = tipc_getsockopt,
2110 .sendmsg = tipc_sendmsg,
2111 .recvmsg = tipc_recvmsg,
2112 .mmap = sock_no_mmap,
2113 .sendpage = sock_no_sendpage
2114 };
2115
2116 static const struct proto_ops packet_ops = {
2117 .owner = THIS_MODULE,
2118 .family = AF_TIPC,
2119 .release = tipc_release,
2120 .bind = tipc_bind,
2121 .connect = tipc_connect,
2122 .socketpair = sock_no_socketpair,
2123 .accept = tipc_accept,
2124 .getname = tipc_getname,
2125 .poll = tipc_poll,
2126 .ioctl = tipc_ioctl,
2127 .listen = tipc_listen,
2128 .shutdown = tipc_shutdown,
2129 .setsockopt = tipc_setsockopt,
2130 .getsockopt = tipc_getsockopt,
2131 .sendmsg = tipc_send_packet,
2132 .recvmsg = tipc_recvmsg,
2133 .mmap = sock_no_mmap,
2134 .sendpage = sock_no_sendpage
2135 };
2136
2137 static const struct proto_ops stream_ops = {
2138 .owner = THIS_MODULE,
2139 .family = AF_TIPC,
2140 .release = tipc_release,
2141 .bind = tipc_bind,
2142 .connect = tipc_connect,
2143 .socketpair = sock_no_socketpair,
2144 .accept = tipc_accept,
2145 .getname = tipc_getname,
2146 .poll = tipc_poll,
2147 .ioctl = tipc_ioctl,
2148 .listen = tipc_listen,
2149 .shutdown = tipc_shutdown,
2150 .setsockopt = tipc_setsockopt,
2151 .getsockopt = tipc_getsockopt,
2152 .sendmsg = tipc_send_stream,
2153 .recvmsg = tipc_recv_stream,
2154 .mmap = sock_no_mmap,
2155 .sendpage = sock_no_sendpage
2156 };
2157
2158 static const struct net_proto_family tipc_family_ops = {
2159 .owner = THIS_MODULE,
2160 .family = AF_TIPC,
2161 .create = tipc_sk_create
2162 };
2163
2164 static struct proto tipc_proto = {
2165 .name = "TIPC",
2166 .owner = THIS_MODULE,
2167 .obj_size = sizeof(struct tipc_sock),
2168 .sysctl_rmem = sysctl_tipc_rmem
2169 };
2170
2171 static struct proto tipc_proto_kern = {
2172 .name = "TIPC",
2173 .obj_size = sizeof(struct tipc_sock),
2174 .sysctl_rmem = sysctl_tipc_rmem
2175 };
2176
2177 /**
2178 * tipc_socket_init - initialize TIPC socket interface
2179 *
2180 * Returns 0 on success, errno otherwise
2181 */
2182 int tipc_socket_init(void)
2183 {
2184 int res;
2185
2186 res = proto_register(&tipc_proto, 1);
2187 if (res) {
2188 pr_err("Failed to register TIPC protocol type\n");
2189 goto out;
2190 }
2191
2192 res = sock_register(&tipc_family_ops);
2193 if (res) {
2194 pr_err("Failed to register TIPC socket type\n");
2195 proto_unregister(&tipc_proto);
2196 goto out;
2197 }
2198 out:
2199 return res;
2200 }
2201
2202 /**
2203 * tipc_socket_stop - stop TIPC socket interface
2204 */
2205 void tipc_socket_stop(void)
2206 {
2207 sock_unregister(tipc_family_ops.family);
2208 proto_unregister(&tipc_proto);
2209 }