]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/tipc/socket.c
tipc: remove socket state SS_READY
[mirror_ubuntu-jammy-kernel.git] / net / tipc / socket.c
CommitLineData
b97bf3fd 1/*
02c00c2a 2 * net/tipc/socket.c: TIPC socket API
c4307285 3 *
3c724acd 4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
c5fa7b3c 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
b97bf3fd
PL
6 * All rights reserved.
7 *
9ea1fd3c 8 * Redistribution and use in source and binary forms, with or without
b97bf3fd
PL
9 * modification, are permitted provided that the following conditions are met:
10 *
9ea1fd3c
PL
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
b97bf3fd 19 *
9ea1fd3c
PL
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
b97bf3fd
PL
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
07f6c4bc 37#include <linux/rhashtable.h>
b97bf3fd 38#include "core.h"
e2dafe87 39#include "name_table.h"
78acb1f9 40#include "node.h"
e2dafe87 41#include "link.h"
c637c103 42#include "name_distr.h"
2e84c60b 43#include "socket.h"
a6bf70f7 44#include "bcast.h"
49cc66ea 45#include "netlink.h"
2cf8aa19 46
07f6c4bc 47#define SS_LISTENING -1 /* socket is listening */
b97bf3fd 48
07f6c4bc 49#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
2f55c437 50#define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
07f6c4bc
YX
51#define TIPC_FWD_MSG 1
52#define TIPC_CONN_OK 0
53#define TIPC_CONN_PROBING 1
54#define TIPC_MAX_PORT 0xffffffff
55#define TIPC_MIN_PORT 1
301bae56
JPM
56
57/**
58 * struct tipc_sock - TIPC socket structure
59 * @sk: socket - interacts with 'port' and with user via the socket API
301bae56
JPM
60 * @conn_type: TIPC type used when connection was established
61 * @conn_instance: TIPC instance used when connection was established
62 * @published: non-zero if port has one or more associated names
63 * @max_pkt: maximum packet size "hint" used when building messages sent by port
07f6c4bc 64 * @portid: unique port identity in TIPC socket hash table
301bae56 65 * @phdr: preformatted message header used when sending messages
301bae56
JPM
66 * @publications: list of publications for port
67 * @pub_count: total # of publications port has made during its lifetime
68 * @probing_state:
301bae56
JPM
69 * @conn_timeout: the time we can wait for an unresponded setup request
70 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
71 * @link_cong: non-zero if owner must sleep because of link congestion
72 * @sent_unacked: # messages sent by socket, and not yet acked by peer
73 * @rcv_unacked: # messages read by user, but not yet acked back to peer
aeda16b6 74 * @peer: 'connected' peer for dgram/rdm
07f6c4bc
YX
75 * @node: hash table node
76 * @rcu: rcu struct for tipc_sock
301bae56
JPM
77 */
78struct tipc_sock {
79 struct sock sk;
301bae56
JPM
80 u32 conn_type;
81 u32 conn_instance;
82 int published;
83 u32 max_pkt;
07f6c4bc 84 u32 portid;
301bae56
JPM
85 struct tipc_msg phdr;
86 struct list_head sock_list;
87 struct list_head publications;
88 u32 pub_count;
89 u32 probing_state;
301bae56
JPM
90 uint conn_timeout;
91 atomic_t dupl_rcvcnt;
92 bool link_cong;
10724cc7
JPM
93 u16 snt_unacked;
94 u16 snd_win;
60020e18 95 u16 peer_caps;
10724cc7
JPM
96 u16 rcv_unacked;
97 u16 rcv_win;
aeda16b6 98 struct sockaddr_tipc peer;
07f6c4bc
YX
99 struct rhash_head node;
100 struct rcu_head rcu;
301bae56 101};
b97bf3fd 102
4f4482dc 103static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
676d2369 104static void tipc_data_ready(struct sock *sk);
f288bef4 105static void tipc_write_space(struct sock *sk);
f4195d1e 106static void tipc_sock_destruct(struct sock *sk);
247f0f3c
YX
107static int tipc_release(struct socket *sock);
108static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
0abd8ff2 109static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
f2f2a96a 110static void tipc_sk_timeout(unsigned long data);
301bae56 111static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
0fc87aae 112 struct tipc_name_seq const *seq);
301bae56 113static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
0fc87aae 114 struct tipc_name_seq const *seq);
e05b31f4 115static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
07f6c4bc
YX
116static int tipc_sk_insert(struct tipc_sock *tsk);
117static void tipc_sk_remove(struct tipc_sock *tsk);
39a0295f
YX
118static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
119 size_t dsz);
120static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
b97bf3fd 121
bca65eae
FW
122static const struct proto_ops packet_ops;
123static const struct proto_ops stream_ops;
124static const struct proto_ops msg_ops;
b97bf3fd
PL
125static struct proto tipc_proto;
126
6cca7289
HX
127static const struct rhashtable_params tsk_rht_params;
128
c4307285 129/*
0c3141e9
AS
130 * Revised TIPC socket locking policy:
131 *
132 * Most socket operations take the standard socket lock when they start
133 * and hold it until they finish (or until they need to sleep). Acquiring
134 * this lock grants the owner exclusive access to the fields of the socket
135 * data structures, with the exception of the backlog queue. A few socket
136 * operations can be done without taking the socket lock because they only
137 * read socket information that never changes during the life of the socket.
138 *
139 * Socket operations may acquire the lock for the associated TIPC port if they
140 * need to perform an operation on the port. If any routine needs to acquire
141 * both the socket lock and the port lock it must take the socket lock first
142 * to avoid the risk of deadlock.
143 *
144 * The dispatcher handling incoming messages cannot grab the socket lock in
145 * the standard fashion, since invoked it runs at the BH level and cannot block.
146 * Instead, it checks to see if the socket lock is currently owned by someone,
147 * and either handles the message itself or adds it to the socket's backlog
148 * queue; in the latter case the queued message is processed once the process
149 * owning the socket lock releases it.
150 *
151 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
152 * the problem of a blocked socket operation preventing any other operations
153 * from occurring. However, applications must be careful if they have
154 * multiple threads trying to send (or receive) on the same socket, as these
155 * operations might interfere with each other. For example, doing a connect
156 * and a receive at the same time might allow the receive to consume the
157 * ACK message meant for the connect. While additional work could be done
158 * to try and overcome this, it doesn't seem to be worthwhile at the present.
159 *
160 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
161 * that another operation that must be performed in a non-blocking manner is
162 * not delayed for very long because the lock has already been taken.
163 *
164 * NOTE: This code assumes that certain fields of a port/socket pair are
165 * constant over its lifetime; such fields can be examined without taking
166 * the socket lock and/or port lock, and do not need to be re-read even
167 * after resuming processing after waiting. These fields include:
168 * - socket type
169 * - pointer to socket sk structure (aka tipc_sock structure)
170 * - pointer to port structure
171 * - port reference
172 */
173
c5898636
JPM
174static u32 tsk_own_node(struct tipc_sock *tsk)
175{
176 return msg_prevnode(&tsk->phdr);
177}
178
301bae56 179static u32 tsk_peer_node(struct tipc_sock *tsk)
2e84c60b 180{
301bae56 181 return msg_destnode(&tsk->phdr);
2e84c60b
JPM
182}
183
301bae56 184static u32 tsk_peer_port(struct tipc_sock *tsk)
2e84c60b 185{
301bae56 186 return msg_destport(&tsk->phdr);
2e84c60b
JPM
187}
188
301bae56 189static bool tsk_unreliable(struct tipc_sock *tsk)
2e84c60b 190{
301bae56 191 return msg_src_droppable(&tsk->phdr) != 0;
2e84c60b
JPM
192}
193
301bae56 194static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
2e84c60b 195{
301bae56 196 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
2e84c60b
JPM
197}
198
301bae56 199static bool tsk_unreturnable(struct tipc_sock *tsk)
2e84c60b 200{
301bae56 201 return msg_dest_droppable(&tsk->phdr) != 0;
2e84c60b
JPM
202}
203
301bae56 204static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
2e84c60b 205{
301bae56 206 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
2e84c60b
JPM
207}
208
301bae56 209static int tsk_importance(struct tipc_sock *tsk)
2e84c60b 210{
301bae56 211 return msg_importance(&tsk->phdr);
2e84c60b
JPM
212}
213
301bae56 214static int tsk_set_importance(struct tipc_sock *tsk, int imp)
2e84c60b
JPM
215{
216 if (imp > TIPC_CRITICAL_IMPORTANCE)
217 return -EINVAL;
301bae56 218 msg_set_importance(&tsk->phdr, (u32)imp);
2e84c60b
JPM
219 return 0;
220}
8826cde6 221
301bae56
JPM
222static struct tipc_sock *tipc_sk(const struct sock *sk)
223{
224 return container_of(sk, struct tipc_sock, sk);
225}
226
10724cc7 227static bool tsk_conn_cong(struct tipc_sock *tsk)
301bae56 228{
10724cc7
JPM
229 return tsk->snt_unacked >= tsk->snd_win;
230}
231
232/* tsk_blocks(): translate a buffer size in bytes to number of
233 * advertisable blocks, taking into account the ratio truesize(len)/len
234 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
235 */
236static u16 tsk_adv_blocks(int len)
237{
238 return len / FLOWCTL_BLK_SZ / 4;
239}
240
241/* tsk_inc(): increment counter for sent or received data
242 * - If block based flow control is not supported by peer we
243 * fall back to message based ditto, incrementing the counter
244 */
245static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
246{
247 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
248 return ((msglen / FLOWCTL_BLK_SZ) + 1);
249 return 1;
301bae56
JPM
250}
251
0c3141e9 252/**
2e84c60b 253 * tsk_advance_rx_queue - discard first buffer in socket receive queue
0c3141e9
AS
254 *
255 * Caller must hold socket lock
b97bf3fd 256 */
2e84c60b 257static void tsk_advance_rx_queue(struct sock *sk)
b97bf3fd 258{
5f6d9123 259 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
b97bf3fd
PL
260}
261
bcd3ffd4
JPM
262/* tipc_sk_respond() : send response message back to sender
263 */
264static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
265{
266 u32 selector;
267 u32 dnode;
268 u32 onode = tipc_own_addr(sock_net(sk));
269
270 if (!tipc_msg_reverse(onode, &skb, err))
271 return;
272
273 dnode = msg_destnode(buf_msg(skb));
274 selector = msg_origport(buf_msg(skb));
275 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
276}
277
b97bf3fd 278/**
2e84c60b 279 * tsk_rej_rx_queue - reject all buffers in socket receive queue
0c3141e9
AS
280 *
281 * Caller must hold socket lock
b97bf3fd 282 */
2e84c60b 283static void tsk_rej_rx_queue(struct sock *sk)
b97bf3fd 284{
a6ca1094 285 struct sk_buff *skb;
0c3141e9 286
bcd3ffd4
JPM
287 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
288 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
b97bf3fd
PL
289}
290
d6fb7e9c
PB
291static bool tipc_sk_connected(struct sock *sk)
292{
293 return sk->sk_socket->state == SS_CONNECTED;
294}
295
c752023a
PB
296/* tipc_sk_type_connectionless - check if the socket is datagram socket
297 * @sk: socket
298 *
299 * Returns true if connection less, false otherwise
300 */
301static bool tipc_sk_type_connectionless(struct sock *sk)
302{
303 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
304}
305
2e84c60b 306/* tsk_peer_msg - verify if message was sent by connected port's peer
0fc87aae
JPM
307 *
308 * Handles cases where the node's network address has changed from
309 * the default of <0.0.0> to its configured setting.
310 */
2e84c60b 311static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
0fc87aae 312{
d6fb7e9c
PB
313 struct sock *sk = &tsk->sk;
314 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
301bae56 315 u32 peer_port = tsk_peer_port(tsk);
0fc87aae
JPM
316 u32 orig_node;
317 u32 peer_node;
318
d6fb7e9c 319 if (unlikely(!tipc_sk_connected(sk)))
0fc87aae
JPM
320 return false;
321
322 if (unlikely(msg_origport(msg) != peer_port))
323 return false;
324
325 orig_node = msg_orignode(msg);
301bae56 326 peer_node = tsk_peer_node(tsk);
0fc87aae
JPM
327
328 if (likely(orig_node == peer_node))
329 return true;
330
34747539 331 if (!orig_node && (peer_node == tn->own_addr))
0fc87aae
JPM
332 return true;
333
34747539 334 if (!peer_node && (orig_node == tn->own_addr))
0fc87aae
JPM
335 return true;
336
337 return false;
338}
339
b97bf3fd 340/**
c5fa7b3c 341 * tipc_sk_create - create a TIPC socket
0c3141e9 342 * @net: network namespace (must be default network)
b97bf3fd
PL
343 * @sock: pre-allocated socket structure
344 * @protocol: protocol indicator (must be 0)
3f378b68 345 * @kern: caused by kernel or by userspace?
c4307285 346 *
0c3141e9
AS
347 * This routine creates additional data structures used by the TIPC socket,
348 * initializes them, and links them together.
b97bf3fd
PL
349 *
350 * Returns 0 on success, errno otherwise
351 */
58ed9442
JPM
352static int tipc_sk_create(struct net *net, struct socket *sock,
353 int protocol, int kern)
b97bf3fd 354{
c5898636 355 struct tipc_net *tn;
0c3141e9 356 const struct proto_ops *ops;
b97bf3fd 357 struct sock *sk;
58ed9442 358 struct tipc_sock *tsk;
5b8fa7ce 359 struct tipc_msg *msg;
0c3141e9
AS
360
361 /* Validate arguments */
b97bf3fd
PL
362 if (unlikely(protocol != 0))
363 return -EPROTONOSUPPORT;
364
b97bf3fd
PL
365 switch (sock->type) {
366 case SOCK_STREAM:
0c3141e9 367 ops = &stream_ops;
b97bf3fd
PL
368 break;
369 case SOCK_SEQPACKET:
0c3141e9 370 ops = &packet_ops;
b97bf3fd
PL
371 break;
372 case SOCK_DGRAM:
b97bf3fd 373 case SOCK_RDM:
0c3141e9 374 ops = &msg_ops;
b97bf3fd 375 break;
49978651 376 default:
49978651 377 return -EPROTOTYPE;
b97bf3fd
PL
378 }
379
0c3141e9 380 /* Allocate socket's protocol area */
11aa9c28 381 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
0c3141e9 382 if (sk == NULL)
b97bf3fd 383 return -ENOMEM;
b97bf3fd 384
58ed9442 385 tsk = tipc_sk(sk);
301bae56 386 tsk->max_pkt = MAX_PKT_DEFAULT;
301bae56
JPM
387 INIT_LIST_HEAD(&tsk->publications);
388 msg = &tsk->phdr;
c5898636
JPM
389 tn = net_generic(sock_net(sk), tipc_net_id);
390 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
5b8fa7ce 391 NAMED_H_SIZE, 0);
b97bf3fd 392
0c3141e9 393 /* Finish initializing socket data structures */
0c3141e9 394 sock->ops = ops;
c752023a 395 sock->state = SS_UNCONNECTED;
0c3141e9 396 sock_init_data(sock, sk);
07f6c4bc 397 if (tipc_sk_insert(tsk)) {
c19ca6cb 398 pr_warn("Socket create failed; port number exhausted\n");
07f6c4bc
YX
399 return -EINVAL;
400 }
401 msg_set_origport(msg, tsk->portid);
3721e9c7 402 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
4f4482dc 403 sk->sk_backlog_rcv = tipc_backlog_rcv;
cc79dd1b 404 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
f288bef4
YX
405 sk->sk_data_ready = tipc_data_ready;
406 sk->sk_write_space = tipc_write_space;
f4195d1e 407 sk->sk_destruct = tipc_sock_destruct;
4f4482dc
JPM
408 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
409 atomic_set(&tsk->dupl_rcvcnt, 0);
7ef43eba 410
10724cc7
JPM
411 /* Start out with safe limits until we receive an advertised window */
412 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
413 tsk->rcv_win = tsk->snd_win;
414
c752023a 415 if (tipc_sk_type_connectionless(sk)) {
301bae56 416 tsk_set_unreturnable(tsk, true);
0c3141e9 417 if (sock->type == SOCK_DGRAM)
301bae56 418 tsk_set_unreliable(tsk, true);
0c3141e9 419 }
b97bf3fd
PL
420 return 0;
421}
422
07f6c4bc
YX
423static void tipc_sk_callback(struct rcu_head *head)
424{
425 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
426
427 sock_put(&tsk->sk);
428}
429
b97bf3fd 430/**
247f0f3c 431 * tipc_release - destroy a TIPC socket
b97bf3fd
PL
432 * @sock: socket to destroy
433 *
434 * This routine cleans up any messages that are still queued on the socket.
435 * For DGRAM and RDM socket types, all queued messages are rejected.
436 * For SEQPACKET and STREAM socket types, the first message is rejected
437 * and any others are discarded. (If the first message on a STREAM socket
438 * is partially-read, it is discarded and the next one is rejected instead.)
c4307285 439 *
b97bf3fd
PL
440 * NOTE: Rejected messages are not necessarily returned to the sender! They
441 * are returned or discarded according to the "destination droppable" setting
442 * specified for the message by the sender.
443 *
444 * Returns 0 on success, errno otherwise
445 */
247f0f3c 446static int tipc_release(struct socket *sock)
b97bf3fd 447{
b97bf3fd 448 struct sock *sk = sock->sk;
357c4774 449 struct net *net;
58ed9442 450 struct tipc_sock *tsk;
a6ca1094 451 struct sk_buff *skb;
1ea23a21 452 u32 dnode;
b97bf3fd 453
0c3141e9
AS
454 /*
455 * Exit if socket isn't fully initialized (occurs when a failed accept()
456 * releases a pre-allocated child socket that was never used)
457 */
0c3141e9 458 if (sk == NULL)
b97bf3fd 459 return 0;
c4307285 460
357c4774 461 net = sock_net(sk);
58ed9442 462 tsk = tipc_sk(sk);
0c3141e9
AS
463 lock_sock(sk);
464
465 /*
466 * Reject all unreceived messages, except on an active connection
467 * (which disconnects locally & sends a 'FIN+' to peer)
468 */
301bae56 469 dnode = tsk_peer_node(tsk);
b97bf3fd 470 while (sock->state != SS_DISCONNECTING) {
a6ca1094
YX
471 skb = __skb_dequeue(&sk->sk_receive_queue);
472 if (skb == NULL)
b97bf3fd 473 break;
ba8aebe9 474 if (TIPC_SKB_CB(skb)->bytes_read)
a6ca1094 475 kfree_skb(skb);
0c3141e9
AS
476 else {
477 if ((sock->state == SS_CONNECTING) ||
478 (sock->state == SS_CONNECTED)) {
479 sock->state = SS_DISCONNECTING;
f2f9800d 480 tipc_node_remove_conn(net, dnode, tsk->portid);
0c3141e9 481 }
bcd3ffd4 482 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
0c3141e9 483 }
b97bf3fd
PL
484 }
485
301bae56 486 tipc_sk_withdraw(tsk, 0, NULL);
1ea23a21 487 sk_stop_timer(sk, &sk->sk_timer);
07f6c4bc 488 tipc_sk_remove(tsk);
d6fb7e9c 489 if (tipc_sk_connected(sk)) {
c5898636 490 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
34747539 491 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
c5898636 492 tsk_own_node(tsk), tsk_peer_port(tsk),
07f6c4bc 493 tsk->portid, TIPC_ERR_NO_PORT);
a6ca1094 494 if (skb)
af9b028e 495 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
f2f9800d 496 tipc_node_remove_conn(net, dnode, tsk->portid);
5b8fa7ce 497 }
b97bf3fd 498
0c3141e9 499 /* Reject any messages that accumulated in backlog queue */
0c3141e9
AS
500 sock->state = SS_DISCONNECTING;
501 release_sock(sk);
07f6c4bc
YX
502
503 call_rcu(&tsk->rcu, tipc_sk_callback);
0c3141e9 504 sock->sk = NULL;
b97bf3fd 505
065d7e39 506 return 0;
b97bf3fd
PL
507}
508
509/**
247f0f3c 510 * tipc_bind - associate or disassocate TIPC name(s) with a socket
b97bf3fd
PL
511 * @sock: socket structure
512 * @uaddr: socket address describing name(s) and desired operation
513 * @uaddr_len: size of socket address data structure
c4307285 514 *
b97bf3fd
PL
515 * Name and name sequence binding is indicated using a positive scope value;
516 * a negative scope value unbinds the specified name. Specifying no name
517 * (i.e. a socket address length of 0) unbinds all names from the socket.
c4307285 518 *
b97bf3fd 519 * Returns 0 on success, errno otherwise
0c3141e9
AS
520 *
521 * NOTE: This routine doesn't need to take the socket lock since it doesn't
522 * access any non-constant socket information.
b97bf3fd 523 */
247f0f3c
YX
524static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
525 int uaddr_len)
b97bf3fd 526{
84602761 527 struct sock *sk = sock->sk;
b97bf3fd 528 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
58ed9442 529 struct tipc_sock *tsk = tipc_sk(sk);
84602761 530 int res = -EINVAL;
b97bf3fd 531
84602761
YX
532 lock_sock(sk);
533 if (unlikely(!uaddr_len)) {
301bae56 534 res = tipc_sk_withdraw(tsk, 0, NULL);
84602761
YX
535 goto exit;
536 }
c4307285 537
84602761
YX
538 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
539 res = -EINVAL;
540 goto exit;
541 }
542 if (addr->family != AF_TIPC) {
543 res = -EAFNOSUPPORT;
544 goto exit;
545 }
b97bf3fd 546
b97bf3fd
PL
547 if (addr->addrtype == TIPC_ADDR_NAME)
548 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
84602761
YX
549 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
550 res = -EAFNOSUPPORT;
551 goto exit;
552 }
c4307285 553
13a2e898 554 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
7d0ab17b 555 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
84602761
YX
556 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
557 res = -EACCES;
558 goto exit;
559 }
c422f1bd 560
84602761 561 res = (addr->scope > 0) ?
301bae56
JPM
562 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
563 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
84602761
YX
564exit:
565 release_sock(sk);
566 return res;
b97bf3fd
PL
567}
568
c4307285 569/**
247f0f3c 570 * tipc_getname - get port ID of socket or peer socket
b97bf3fd
PL
571 * @sock: socket structure
572 * @uaddr: area for returned socket address
573 * @uaddr_len: area for returned length of socket address
2da59918 574 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
c4307285 575 *
b97bf3fd 576 * Returns 0 on success, errno otherwise
0c3141e9 577 *
2da59918
AS
578 * NOTE: This routine doesn't need to take the socket lock since it only
579 * accesses socket information that is unchanging (or which changes in
0e65967e 580 * a completely predictable manner).
b97bf3fd 581 */
247f0f3c
YX
582static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
583 int *uaddr_len, int peer)
b97bf3fd 584{
b97bf3fd 585 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
58ed9442 586 struct tipc_sock *tsk = tipc_sk(sock->sk);
34747539 587 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
b97bf3fd 588
88f8a5e3 589 memset(addr, 0, sizeof(*addr));
0c3141e9 590 if (peer) {
2da59918
AS
591 if ((sock->state != SS_CONNECTED) &&
592 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
593 return -ENOTCONN;
301bae56
JPM
594 addr->addr.id.ref = tsk_peer_port(tsk);
595 addr->addr.id.node = tsk_peer_node(tsk);
0c3141e9 596 } else {
07f6c4bc 597 addr->addr.id.ref = tsk->portid;
34747539 598 addr->addr.id.node = tn->own_addr;
0c3141e9 599 }
b97bf3fd
PL
600
601 *uaddr_len = sizeof(*addr);
602 addr->addrtype = TIPC_ADDR_ID;
603 addr->family = AF_TIPC;
604 addr->scope = 0;
b97bf3fd
PL
605 addr->addr.name.domain = 0;
606
0c3141e9 607 return 0;
b97bf3fd
PL
608}
609
610/**
247f0f3c 611 * tipc_poll - read and possibly block on pollmask
b97bf3fd
PL
612 * @file: file structure associated with the socket
613 * @sock: socket for which to calculate the poll bits
614 * @wait: ???
615 *
9b674e82
AS
616 * Returns pollmask value
617 *
618 * COMMENTARY:
619 * It appears that the usual socket locking mechanisms are not useful here
620 * since the pollmask info is potentially out-of-date the moment this routine
621 * exits. TCP and other protocols seem to rely on higher level poll routines
622 * to handle any preventable race conditions, so TIPC will do the same ...
623 *
624 * TIPC sets the returned events as follows:
f662c070
AS
625 *
626 * socket state flags set
627 * ------------ ---------
628 * unconnected no read flags
c4fc298a 629 * POLLOUT if port is not congested
f662c070
AS
630 *
631 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
632 * no write flags
633 *
634 * connected POLLIN/POLLRDNORM if data in rx queue
635 * POLLOUT if port is not congested
636 *
637 * disconnecting POLLIN/POLLRDNORM/POLLHUP
638 * no write flags
639 *
640 * listening POLLIN if SYN in rx queue
641 * no write flags
642 *
643 * ready POLLIN/POLLRDNORM if data in rx queue
644 * [connectionless] POLLOUT (since port cannot be congested)
645 *
646 * IMPORTANT: The fact that a read or write operation is indicated does NOT
647 * imply that the operation will succeed, merely that it should be performed
648 * and will not block.
b97bf3fd 649 */
247f0f3c
YX
650static unsigned int tipc_poll(struct file *file, struct socket *sock,
651 poll_table *wait)
b97bf3fd 652{
9b674e82 653 struct sock *sk = sock->sk;
58ed9442 654 struct tipc_sock *tsk = tipc_sk(sk);
f662c070 655 u32 mask = 0;
9b674e82 656
f288bef4 657 sock_poll_wait(file, sk_sleep(sk), wait);
9b674e82 658
c752023a
PB
659 if (tipc_sk_type_connectionless(sk)) {
660 if (!tsk->link_cong)
661 mask |= POLLOUT;
662 if (!skb_queue_empty(&sk->sk_receive_queue))
663 mask |= (POLLIN | POLLRDNORM);
664 return mask;
665 }
666
f662c070 667 switch ((int)sock->state) {
c4fc298a 668 case SS_UNCONNECTED:
60120526 669 if (!tsk->link_cong)
c4fc298a
EH
670 mask |= POLLOUT;
671 break;
f662c070 672 case SS_CONNECTED:
301bae56 673 if (!tsk->link_cong && !tsk_conn_cong(tsk))
f662c070
AS
674 mask |= POLLOUT;
675 /* fall thru' */
676 case SS_CONNECTING:
677 case SS_LISTENING:
678 if (!skb_queue_empty(&sk->sk_receive_queue))
679 mask |= (POLLIN | POLLRDNORM);
680 break;
681 case SS_DISCONNECTING:
682 mask = (POLLIN | POLLRDNORM | POLLHUP);
683 break;
684 }
9b674e82
AS
685
686 return mask;
b97bf3fd
PL
687}
688
0abd8ff2
JPM
689/**
690 * tipc_sendmcast - send multicast message
691 * @sock: socket structure
692 * @seq: destination address
562640f3 693 * @msg: message to send
0abd8ff2
JPM
694 * @dsz: total length of message data
695 * @timeo: timeout to wait for wakeup
696 *
697 * Called from function tipc_sendmsg(), which has done all sanity checks
698 * Returns the number of bytes sent on success, or errno
699 */
700static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
562640f3 701 struct msghdr *msg, size_t dsz, long timeo)
0abd8ff2
JPM
702{
703 struct sock *sk = sock->sk;
c5898636 704 struct tipc_sock *tsk = tipc_sk(sk);
f2f9800d 705 struct net *net = sock_net(sk);
c5898636 706 struct tipc_msg *mhdr = &tsk->phdr;
f214fc40 707 struct sk_buff_head pktchain;
f25dcc76 708 struct iov_iter save = msg->msg_iter;
0abd8ff2
JPM
709 uint mtu;
710 int rc;
711
7cf87fa2
PB
712 if (!timeo && tsk->link_cong)
713 return -ELINKCONG;
714
0abd8ff2
JPM
715 msg_set_type(mhdr, TIPC_MCAST_MSG);
716 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
717 msg_set_destport(mhdr, 0);
718 msg_set_destnode(mhdr, 0);
719 msg_set_nametype(mhdr, seq->type);
720 msg_set_namelower(mhdr, seq->lower);
721 msg_set_nameupper(mhdr, seq->upper);
722 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
723
f214fc40
PB
724 skb_queue_head_init(&pktchain);
725
0abd8ff2 726new_mtu:
959e1781 727 mtu = tipc_bcast_get_mtu(net);
f214fc40 728 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
0abd8ff2
JPM
729 if (unlikely(rc < 0))
730 return rc;
731
732 do {
f214fc40 733 rc = tipc_bcast_xmit(net, &pktchain);
22d85c79
JPM
734 if (likely(!rc))
735 return dsz;
736
737 if (rc == -ELINKCONG) {
738 tsk->link_cong = 1;
739 rc = tipc_wait_for_sndmsg(sock, &timeo);
740 if (!rc)
741 continue;
0abd8ff2 742 }
f214fc40 743 __skb_queue_purge(&pktchain);
f25dcc76
AV
744 if (rc == -EMSGSIZE) {
745 msg->msg_iter = save;
0abd8ff2 746 goto new_mtu;
f25dcc76 747 }
22d85c79
JPM
748 break;
749 } while (1);
0abd8ff2
JPM
750 return rc;
751}
752
cb1b7280
JPM
753/**
754 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
755 * @arrvq: queue with arriving messages, to be cloned after destination lookup
756 * @inputq: queue with cloned messages, delivered to socket after dest lookup
757 *
758 * Multi-threaded: parallel calls with reference to same queues may occur
078bec82 759 */
cb1b7280
JPM
760void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
761 struct sk_buff_head *inputq)
078bec82 762{
cb1b7280 763 struct tipc_msg *msg;
3c724acd 764 struct tipc_plist dports;
3c724acd 765 u32 portid;
078bec82 766 u32 scope = TIPC_CLUSTER_SCOPE;
cb1b7280
JPM
767 struct sk_buff_head tmpq;
768 uint hsz;
769 struct sk_buff *skb, *_skb;
3c724acd 770
cb1b7280 771 __skb_queue_head_init(&tmpq);
3c724acd 772 tipc_plist_init(&dports);
078bec82 773
cb1b7280
JPM
774 skb = tipc_skb_peek(arrvq, &inputq->lock);
775 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
776 msg = buf_msg(skb);
777 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
778
779 if (in_own_node(net, msg_orignode(msg)))
780 scope = TIPC_NODE_SCOPE;
781
782 /* Create destination port list and message clones: */
783 tipc_nametbl_mc_translate(net,
784 msg_nametype(msg), msg_namelower(msg),
785 msg_nameupper(msg), scope, &dports);
786 portid = tipc_plist_pop(&dports);
787 for (; portid; portid = tipc_plist_pop(&dports)) {
788 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
789 if (_skb) {
790 msg_set_destport(buf_msg(_skb), portid);
791 __skb_queue_tail(&tmpq, _skb);
792 continue;
793 }
794 pr_warn("Failed to clone mcast rcv buffer\n");
078bec82 795 }
cb1b7280
JPM
796 /* Append to inputq if not already done by other thread */
797 spin_lock_bh(&inputq->lock);
798 if (skb_peek(arrvq) == skb) {
799 skb_queue_splice_tail_init(&tmpq, inputq);
800 kfree_skb(__skb_dequeue(arrvq));
801 }
802 spin_unlock_bh(&inputq->lock);
803 __skb_queue_purge(&tmpq);
804 kfree_skb(skb);
078bec82 805 }
cb1b7280 806 tipc_sk_rcv(net, inputq);
078bec82
JPM
807}
808
ac0074ee
JPM
809/**
810 * tipc_sk_proto_rcv - receive a connection mng protocol message
811 * @tsk: receiving socket
bcd3ffd4 812 * @skb: pointer to message buffer.
ac0074ee 813 */
f1d048f2
JPM
814static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
815 struct sk_buff_head *xmitq)
ac0074ee 816{
bcd3ffd4 817 struct sock *sk = &tsk->sk;
f1d048f2 818 u32 onode = tsk_own_node(tsk);
bcd3ffd4
JPM
819 struct tipc_msg *hdr = buf_msg(skb);
820 int mtyp = msg_type(hdr);
10724cc7 821 bool conn_cong;
bcd3ffd4 822
ac0074ee 823 /* Ignore if connection cannot be validated: */
bcd3ffd4 824 if (!tsk_peer_msg(tsk, hdr))
ac0074ee
JPM
825 goto exit;
826
301bae56 827 tsk->probing_state = TIPC_CONN_OK;
ac0074ee 828
bcd3ffd4
JPM
829 if (mtyp == CONN_PROBE) {
830 msg_set_type(hdr, CONN_PROBE_REPLY);
f1d048f2
JPM
831 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
832 __skb_queue_tail(xmitq, skb);
bcd3ffd4
JPM
833 return;
834 } else if (mtyp == CONN_ACK) {
301bae56 835 conn_cong = tsk_conn_cong(tsk);
10724cc7
JPM
836 tsk->snt_unacked -= msg_conn_ack(hdr);
837 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
838 tsk->snd_win = msg_adv_win(hdr);
60120526 839 if (conn_cong)
bcd3ffd4
JPM
840 sk->sk_write_space(sk);
841 } else if (mtyp != CONN_PROBE_REPLY) {
842 pr_warn("Received unknown CONN_PROTO msg\n");
ac0074ee 843 }
ac0074ee 844exit:
bcd3ffd4 845 kfree_skb(skb);
ac0074ee
JPM
846}
847
3f40504f
YX
848static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
849{
850 struct sock *sk = sock->sk;
58ed9442 851 struct tipc_sock *tsk = tipc_sk(sk);
3f40504f
YX
852 DEFINE_WAIT(wait);
853 int done;
854
855 do {
856 int err = sock_error(sk);
857 if (err)
858 return err;
859 if (sock->state == SS_DISCONNECTING)
860 return -EPIPE;
861 if (!*timeo_p)
862 return -EAGAIN;
863 if (signal_pending(current))
864 return sock_intr_errno(*timeo_p);
865
866 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
60120526 867 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
3f40504f
YX
868 finish_wait(sk_sleep(sk), &wait);
869 } while (!done);
870 return 0;
871}
872
b97bf3fd 873/**
247f0f3c 874 * tipc_sendmsg - send message in connectionless manner
b97bf3fd
PL
875 * @sock: socket structure
876 * @m: message to send
e2dafe87 877 * @dsz: amount of user data to be sent
c4307285 878 *
b97bf3fd 879 * Message must have an destination specified explicitly.
c4307285 880 * Used for SOCK_RDM and SOCK_DGRAM messages,
b97bf3fd
PL
881 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
882 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
c4307285 883 *
b97bf3fd
PL
884 * Returns the number of bytes sent on success, or errno otherwise
885 */
1b784140 886static int tipc_sendmsg(struct socket *sock,
e2dafe87 887 struct msghdr *m, size_t dsz)
39a0295f
YX
888{
889 struct sock *sk = sock->sk;
890 int ret;
891
892 lock_sock(sk);
893 ret = __tipc_sendmsg(sock, m, dsz);
894 release_sock(sk);
895
896 return ret;
897}
898
899static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
b97bf3fd 900{
e2dafe87 901 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
0c3141e9 902 struct sock *sk = sock->sk;
58ed9442 903 struct tipc_sock *tsk = tipc_sk(sk);
f2f9800d 904 struct net *net = sock_net(sk);
301bae56 905 struct tipc_msg *mhdr = &tsk->phdr;
e2dafe87 906 u32 dnode, dport;
f214fc40 907 struct sk_buff_head pktchain;
c752023a 908 bool is_connectionless = tipc_sk_type_connectionless(sk);
a6ca1094 909 struct sk_buff *skb;
f2f8036e 910 struct tipc_name_seq *seq;
f25dcc76 911 struct iov_iter save;
e2dafe87 912 u32 mtu;
3f40504f 913 long timeo;
88b17b6a 914 int rc;
b97bf3fd 915
e2dafe87 916 if (dsz > TIPC_MAX_USER_MSG_SIZE)
c29c3f70 917 return -EMSGSIZE;
f2f8036e 918 if (unlikely(!dest)) {
c752023a 919 if (is_connectionless && tsk->peer.family == AF_TIPC)
aeda16b6 920 dest = &tsk->peer;
f2f8036e
EH
921 else
922 return -EDESTADDRREQ;
923 } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
924 dest->family != AF_TIPC) {
925 return -EINVAL;
926 }
c752023a 927 if (!is_connectionless) {
39a0295f
YX
928 if (sock->state == SS_LISTENING)
929 return -EPIPE;
930 if (sock->state != SS_UNCONNECTED)
931 return -EISCONN;
932 if (tsk->published)
933 return -EOPNOTSUPP;
3388007b 934 if (dest->addrtype == TIPC_ADDR_NAME) {
301bae56
JPM
935 tsk->conn_type = dest->addr.name.name.type;
936 tsk->conn_instance = dest->addr.name.name.instance;
3388007b 937 }
b97bf3fd 938 }
f2f8036e 939 seq = &dest->addr.nameseq;
3f40504f 940 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
e2dafe87
JPM
941
942 if (dest->addrtype == TIPC_ADDR_MCAST) {
39a0295f 943 return tipc_sendmcast(sock, seq, m, dsz, timeo);
e2dafe87
JPM
944 } else if (dest->addrtype == TIPC_ADDR_NAME) {
945 u32 type = dest->addr.name.name.type;
946 u32 inst = dest->addr.name.name.instance;
947 u32 domain = dest->addr.name.domain;
948
949 dnode = domain;
950 msg_set_type(mhdr, TIPC_NAMED_MSG);
951 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
952 msg_set_nametype(mhdr, type);
953 msg_set_nameinst(mhdr, inst);
954 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
4ac1c8d0 955 dport = tipc_nametbl_translate(net, type, inst, &dnode);
e2dafe87
JPM
956 msg_set_destnode(mhdr, dnode);
957 msg_set_destport(mhdr, dport);
39a0295f
YX
958 if (unlikely(!dport && !dnode))
959 return -EHOSTUNREACH;
e2dafe87
JPM
960 } else if (dest->addrtype == TIPC_ADDR_ID) {
961 dnode = dest->addr.id.node;
962 msg_set_type(mhdr, TIPC_DIRECT_MSG);
963 msg_set_lookup_scope(mhdr, 0);
964 msg_set_destnode(mhdr, dnode);
965 msg_set_destport(mhdr, dest->addr.id.ref);
966 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
967 }
968
f214fc40 969 skb_queue_head_init(&pktchain);
f25dcc76 970 save = m->msg_iter;
e2dafe87 971new_mtu:
f2f9800d 972 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
f214fc40 973 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
e2dafe87 974 if (rc < 0)
39a0295f 975 return rc;
e2dafe87
JPM
976
977 do {
f214fc40 978 skb = skb_peek(&pktchain);
a6ca1094 979 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
f214fc40 980 rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
22d85c79 981 if (likely(!rc)) {
c752023a 982 if (!is_connectionless)
0c3141e9 983 sock->state = SS_CONNECTING;
22d85c79 984 return dsz;
c4307285 985 }
22d85c79
JPM
986 if (rc == -ELINKCONG) {
987 tsk->link_cong = 1;
988 rc = tipc_wait_for_sndmsg(sock, &timeo);
989 if (!rc)
990 continue;
991 }
f214fc40 992 __skb_queue_purge(&pktchain);
f25dcc76
AV
993 if (rc == -EMSGSIZE) {
994 m->msg_iter = save;
e2dafe87 995 goto new_mtu;
f25dcc76 996 }
22d85c79
JPM
997 break;
998 } while (1);
e2dafe87
JPM
999
1000 return rc;
b97bf3fd
PL
1001}
1002
391a6dd1
YX
1003static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
1004{
1005 struct sock *sk = sock->sk;
58ed9442 1006 struct tipc_sock *tsk = tipc_sk(sk);
391a6dd1
YX
1007 DEFINE_WAIT(wait);
1008 int done;
1009
1010 do {
1011 int err = sock_error(sk);
1012 if (err)
1013 return err;
1014 if (sock->state == SS_DISCONNECTING)
1015 return -EPIPE;
1016 else if (sock->state != SS_CONNECTED)
1017 return -ENOTCONN;
1018 if (!*timeo_p)
1019 return -EAGAIN;
1020 if (signal_pending(current))
1021 return sock_intr_errno(*timeo_p);
1022
1023 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1024 done = sk_wait_event(sk, timeo_p,
60120526 1025 (!tsk->link_cong &&
301bae56 1026 !tsk_conn_cong(tsk)) ||
d6fb7e9c 1027 !tipc_sk_connected(sk));
391a6dd1
YX
1028 finish_wait(sk_sleep(sk), &wait);
1029 } while (!done);
1030 return 0;
1031}
1032
c4307285 1033/**
4ccfe5e0 1034 * tipc_send_stream - send stream-oriented data
b97bf3fd 1035 * @sock: socket structure
4ccfe5e0
JPM
1036 * @m: data to send
1037 * @dsz: total length of data to be transmitted
c4307285 1038 *
4ccfe5e0 1039 * Used for SOCK_STREAM data.
c4307285 1040 *
4ccfe5e0
JPM
1041 * Returns the number of bytes sent on success (or partial success),
1042 * or errno if no data sent
b97bf3fd 1043 */
1b784140 1044static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
39a0295f
YX
1045{
1046 struct sock *sk = sock->sk;
1047 int ret;
1048
1049 lock_sock(sk);
1050 ret = __tipc_send_stream(sock, m, dsz);
1051 release_sock(sk);
1052
1053 return ret;
1054}
1055
1056static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
b97bf3fd 1057{
0c3141e9 1058 struct sock *sk = sock->sk;
f2f9800d 1059 struct net *net = sock_net(sk);
58ed9442 1060 struct tipc_sock *tsk = tipc_sk(sk);
301bae56 1061 struct tipc_msg *mhdr = &tsk->phdr;
f214fc40 1062 struct sk_buff_head pktchain;
342dfc30 1063 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
07f6c4bc 1064 u32 portid = tsk->portid;
4ccfe5e0 1065 int rc = -EINVAL;
391a6dd1 1066 long timeo;
4ccfe5e0
JPM
1067 u32 dnode;
1068 uint mtu, send, sent = 0;
f25dcc76 1069 struct iov_iter save;
10724cc7 1070 int hlen = MIN_H_SIZE;
b97bf3fd
PL
1071
1072 /* Handle implied connection establishment */
4ccfe5e0 1073 if (unlikely(dest)) {
39a0295f 1074 rc = __tipc_sendmsg(sock, m, dsz);
10724cc7 1075 hlen = msg_hdr_sz(mhdr);
4ccfe5e0 1076 if (dsz && (dsz == rc))
10724cc7 1077 tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
4ccfe5e0
JPM
1078 return rc;
1079 }
1080 if (dsz > (uint)INT_MAX)
c29c3f70
AS
1081 return -EMSGSIZE;
1082
391a6dd1
YX
1083 if (unlikely(sock->state != SS_CONNECTED)) {
1084 if (sock->state == SS_DISCONNECTING)
39a0295f 1085 return -EPIPE;
391a6dd1 1086 else
39a0295f 1087 return -ENOTCONN;
391a6dd1 1088 }
1d835874 1089
391a6dd1 1090 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
7cf87fa2
PB
1091 if (!timeo && tsk->link_cong)
1092 return -ELINKCONG;
1093
301bae56 1094 dnode = tsk_peer_node(tsk);
f214fc40 1095 skb_queue_head_init(&pktchain);
4ccfe5e0
JPM
1096
1097next:
f25dcc76 1098 save = m->msg_iter;
301bae56 1099 mtu = tsk->max_pkt;
4ccfe5e0 1100 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
f214fc40 1101 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
4ccfe5e0 1102 if (unlikely(rc < 0))
39a0295f 1103 return rc;
f214fc40 1104
c4307285 1105 do {
301bae56 1106 if (likely(!tsk_conn_cong(tsk))) {
f214fc40 1107 rc = tipc_node_xmit(net, &pktchain, dnode, portid);
4ccfe5e0 1108 if (likely(!rc)) {
10724cc7 1109 tsk->snt_unacked += tsk_inc(tsk, send + hlen);
4ccfe5e0
JPM
1110 sent += send;
1111 if (sent == dsz)
22d85c79 1112 return dsz;
4ccfe5e0
JPM
1113 goto next;
1114 }
1115 if (rc == -EMSGSIZE) {
f214fc40 1116 __skb_queue_purge(&pktchain);
f2f9800d
YX
1117 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1118 portid);
f25dcc76 1119 m->msg_iter = save;
4ccfe5e0
JPM
1120 goto next;
1121 }
1122 if (rc != -ELINKCONG)
1123 break;
22d85c79 1124
50100a5e 1125 tsk->link_cong = 1;
4ccfe5e0
JPM
1126 }
1127 rc = tipc_wait_for_sndpkt(sock, &timeo);
1128 } while (!rc);
39a0295f 1129
f214fc40 1130 __skb_queue_purge(&pktchain);
4ccfe5e0 1131 return sent ? sent : rc;
b97bf3fd
PL
1132}
1133
c4307285 1134/**
4ccfe5e0 1135 * tipc_send_packet - send a connection-oriented message
b97bf3fd 1136 * @sock: socket structure
4ccfe5e0
JPM
1137 * @m: message to send
1138 * @dsz: length of data to be transmitted
c4307285 1139 *
4ccfe5e0 1140 * Used for SOCK_SEQPACKET messages.
c4307285 1141 *
4ccfe5e0 1142 * Returns the number of bytes sent on success, or errno otherwise
b97bf3fd 1143 */
1b784140 1144static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
b97bf3fd 1145{
4ccfe5e0
JPM
1146 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1147 return -EMSGSIZE;
b97bf3fd 1148
1b784140 1149 return tipc_send_stream(sock, m, dsz);
b97bf3fd
PL
1150}
1151
dadebc00 1152/* tipc_sk_finish_conn - complete the setup of a connection
b97bf3fd 1153 */
301bae56 1154static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
dadebc00 1155 u32 peer_node)
b97bf3fd 1156{
3721e9c7
YX
1157 struct sock *sk = &tsk->sk;
1158 struct net *net = sock_net(sk);
301bae56 1159 struct tipc_msg *msg = &tsk->phdr;
b97bf3fd 1160
dadebc00
JPM
1161 msg_set_destnode(msg, peer_node);
1162 msg_set_destport(msg, peer_port);
1163 msg_set_type(msg, TIPC_CONN_MSG);
1164 msg_set_lookup_scope(msg, 0);
1165 msg_set_hdr_sz(msg, SHORT_H_SIZE);
584d24b3 1166
301bae56 1167 tsk->probing_state = TIPC_CONN_OK;
360aab6b 1168 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
f2f9800d
YX
1169 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1170 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
60020e18 1171 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
10724cc7
JPM
1172 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1173 return;
1174
1175 /* Fall back to message based flow control */
1176 tsk->rcv_win = FLOWCTL_MSG_WIN;
1177 tsk->snd_win = FLOWCTL_MSG_WIN;
b97bf3fd
PL
1178}
1179
1180/**
1181 * set_orig_addr - capture sender's address for received message
1182 * @m: descriptor for message info
1183 * @msg: received message header
c4307285 1184 *
b97bf3fd
PL
1185 * Note: Address is not captured if not requested by receiver.
1186 */
05790c64 1187static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
b97bf3fd 1188{
342dfc30 1189 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
b97bf3fd 1190
c4307285 1191 if (addr) {
b97bf3fd
PL
1192 addr->family = AF_TIPC;
1193 addr->addrtype = TIPC_ADDR_ID;
60085c3d 1194 memset(&addr->addr, 0, sizeof(addr->addr));
b97bf3fd
PL
1195 addr->addr.id.ref = msg_origport(msg);
1196 addr->addr.id.node = msg_orignode(msg);
0e65967e
AS
1197 addr->addr.name.domain = 0; /* could leave uninitialized */
1198 addr->scope = 0; /* could leave uninitialized */
b97bf3fd
PL
1199 m->msg_namelen = sizeof(struct sockaddr_tipc);
1200 }
1201}
1202
1203/**
301bae56 1204 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
b97bf3fd
PL
1205 * @m: descriptor for message info
1206 * @msg: received message header
301bae56 1207 * @tsk: TIPC port associated with message
c4307285 1208 *
b97bf3fd 1209 * Note: Ancillary data is not captured if not requested by receiver.
c4307285 1210 *
b97bf3fd
PL
1211 * Returns 0 if successful, otherwise errno
1212 */
301bae56
JPM
1213static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1214 struct tipc_sock *tsk)
b97bf3fd
PL
1215{
1216 u32 anc_data[3];
1217 u32 err;
1218 u32 dest_type;
3546c750 1219 int has_name;
b97bf3fd
PL
1220 int res;
1221
1222 if (likely(m->msg_controllen == 0))
1223 return 0;
1224
1225 /* Optionally capture errored message object(s) */
b97bf3fd
PL
1226 err = msg ? msg_errcode(msg) : 0;
1227 if (unlikely(err)) {
1228 anc_data[0] = err;
1229 anc_data[1] = msg_data_sz(msg);
2db9983a
AS
1230 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1231 if (res)
b97bf3fd 1232 return res;
2db9983a
AS
1233 if (anc_data[1]) {
1234 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1235 msg_data(msg));
1236 if (res)
1237 return res;
1238 }
b97bf3fd
PL
1239 }
1240
1241 /* Optionally capture message destination object */
b97bf3fd
PL
1242 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1243 switch (dest_type) {
1244 case TIPC_NAMED_MSG:
3546c750 1245 has_name = 1;
b97bf3fd
PL
1246 anc_data[0] = msg_nametype(msg);
1247 anc_data[1] = msg_namelower(msg);
1248 anc_data[2] = msg_namelower(msg);
1249 break;
1250 case TIPC_MCAST_MSG:
3546c750 1251 has_name = 1;
b97bf3fd
PL
1252 anc_data[0] = msg_nametype(msg);
1253 anc_data[1] = msg_namelower(msg);
1254 anc_data[2] = msg_nameupper(msg);
1255 break;
1256 case TIPC_CONN_MSG:
301bae56
JPM
1257 has_name = (tsk->conn_type != 0);
1258 anc_data[0] = tsk->conn_type;
1259 anc_data[1] = tsk->conn_instance;
1260 anc_data[2] = tsk->conn_instance;
b97bf3fd
PL
1261 break;
1262 default:
3546c750 1263 has_name = 0;
b97bf3fd 1264 }
2db9983a
AS
1265 if (has_name) {
1266 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1267 if (res)
1268 return res;
1269 }
b97bf3fd
PL
1270
1271 return 0;
1272}
1273
10724cc7 1274static void tipc_sk_send_ack(struct tipc_sock *tsk)
739f5e4e 1275{
d6fb7e9c
PB
1276 struct sock *sk = &tsk->sk;
1277 struct net *net = sock_net(sk);
a6ca1094 1278 struct sk_buff *skb = NULL;
739f5e4e 1279 struct tipc_msg *msg;
301bae56
JPM
1280 u32 peer_port = tsk_peer_port(tsk);
1281 u32 dnode = tsk_peer_node(tsk);
739f5e4e 1282
d6fb7e9c 1283 if (!tipc_sk_connected(sk))
739f5e4e 1284 return;
c5898636
JPM
1285 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1286 dnode, tsk_own_node(tsk), peer_port,
1287 tsk->portid, TIPC_OK);
a6ca1094 1288 if (!skb)
739f5e4e 1289 return;
a6ca1094 1290 msg = buf_msg(skb);
10724cc7
JPM
1291 msg_set_conn_ack(msg, tsk->rcv_unacked);
1292 tsk->rcv_unacked = 0;
1293
1294 /* Adjust to and advertize the correct window limit */
1295 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1296 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1297 msg_set_adv_win(msg, tsk->rcv_win);
1298 }
af9b028e 1299 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
739f5e4e
JPM
1300}
1301
85d3fc94 1302static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
9bbb4ecc
YX
1303{
1304 struct sock *sk = sock->sk;
1305 DEFINE_WAIT(wait);
85d3fc94 1306 long timeo = *timeop;
9bbb4ecc
YX
1307 int err;
1308
1309 for (;;) {
1310 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
fe8e4649 1311 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
9bbb4ecc
YX
1312 if (sock->state == SS_DISCONNECTING) {
1313 err = -ENOTCONN;
1314 break;
1315 }
1316 release_sock(sk);
1317 timeo = schedule_timeout(timeo);
1318 lock_sock(sk);
1319 }
1320 err = 0;
1321 if (!skb_queue_empty(&sk->sk_receive_queue))
1322 break;
9bbb4ecc
YX
1323 err = -EAGAIN;
1324 if (!timeo)
1325 break;
143fe22f
EH
1326 err = sock_intr_errno(timeo);
1327 if (signal_pending(current))
1328 break;
9bbb4ecc
YX
1329 }
1330 finish_wait(sk_sleep(sk), &wait);
85d3fc94 1331 *timeop = timeo;
9bbb4ecc
YX
1332 return err;
1333}
1334
c4307285 1335/**
247f0f3c 1336 * tipc_recvmsg - receive packet-oriented message
b97bf3fd
PL
1337 * @m: descriptor for message info
1338 * @buf_len: total size of user buffer area
1339 * @flags: receive flags
c4307285 1340 *
b97bf3fd
PL
1341 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1342 * If the complete message doesn't fit in user area, truncate it.
1343 *
1344 * Returns size of returned message data, errno otherwise
1345 */
1b784140
YX
1346static int tipc_recvmsg(struct socket *sock, struct msghdr *m, size_t buf_len,
1347 int flags)
b97bf3fd 1348{
0c3141e9 1349 struct sock *sk = sock->sk;
58ed9442 1350 struct tipc_sock *tsk = tipc_sk(sk);
b97bf3fd
PL
1351 struct sk_buff *buf;
1352 struct tipc_msg *msg;
c752023a 1353 bool is_connectionless = tipc_sk_type_connectionless(sk);
9bbb4ecc 1354 long timeo;
b97bf3fd
PL
1355 unsigned int sz;
1356 u32 err;
10724cc7 1357 int res, hlen;
b97bf3fd 1358
0c3141e9 1359 /* Catch invalid receive requests */
b97bf3fd
PL
1360 if (unlikely(!buf_len))
1361 return -EINVAL;
1362
0c3141e9 1363 lock_sock(sk);
b97bf3fd 1364
c752023a 1365 if (!is_connectionless && unlikely(sock->state == SS_UNCONNECTED)) {
0c3141e9 1366 res = -ENOTCONN;
b97bf3fd
PL
1367 goto exit;
1368 }
1369
9bbb4ecc 1370 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
0c3141e9 1371restart:
b97bf3fd 1372
0c3141e9 1373 /* Look for a message in receive queue; wait if necessary */
85d3fc94 1374 res = tipc_wait_for_rcvmsg(sock, &timeo);
9bbb4ecc
YX
1375 if (res)
1376 goto exit;
b97bf3fd 1377
0c3141e9 1378 /* Look at first message in receive queue */
0c3141e9 1379 buf = skb_peek(&sk->sk_receive_queue);
b97bf3fd
PL
1380 msg = buf_msg(buf);
1381 sz = msg_data_sz(msg);
10724cc7 1382 hlen = msg_hdr_sz(msg);
b97bf3fd
PL
1383 err = msg_errcode(msg);
1384
b97bf3fd 1385 /* Discard an empty non-errored message & try again */
b97bf3fd 1386 if ((!sz) && (!err)) {
2e84c60b 1387 tsk_advance_rx_queue(sk);
b97bf3fd
PL
1388 goto restart;
1389 }
1390
1391 /* Capture sender's address (optional) */
b97bf3fd
PL
1392 set_orig_addr(m, msg);
1393
1394 /* Capture ancillary data (optional) */
301bae56 1395 res = tipc_sk_anc_data_recv(m, msg, tsk);
0c3141e9 1396 if (res)
b97bf3fd
PL
1397 goto exit;
1398
1399 /* Capture message data (if valid) & compute return value (always) */
b97bf3fd
PL
1400 if (!err) {
1401 if (unlikely(buf_len < sz)) {
1402 sz = buf_len;
1403 m->msg_flags |= MSG_TRUNC;
1404 }
10724cc7 1405 res = skb_copy_datagram_msg(buf, hlen, m, sz);
0232fd0a 1406 if (res)
b97bf3fd 1407 goto exit;
b97bf3fd
PL
1408 res = sz;
1409 } else {
c752023a
PB
1410 if (is_connectionless || err == TIPC_CONN_SHUTDOWN ||
1411 m->msg_control)
b97bf3fd
PL
1412 res = 0;
1413 else
1414 res = -ECONNRESET;
1415 }
1416
10724cc7
JPM
1417 if (unlikely(flags & MSG_PEEK))
1418 goto exit;
1419
c752023a 1420 if (likely(!is_connectionless)) {
10724cc7
JPM
1421 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1422 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1423 tipc_sk_send_ack(tsk);
c4307285 1424 }
10724cc7 1425 tsk_advance_rx_queue(sk);
b97bf3fd 1426exit:
0c3141e9 1427 release_sock(sk);
b97bf3fd
PL
1428 return res;
1429}
1430
c4307285 1431/**
247f0f3c 1432 * tipc_recv_stream - receive stream-oriented data
b97bf3fd
PL
1433 * @m: descriptor for message info
1434 * @buf_len: total size of user buffer area
1435 * @flags: receive flags
c4307285
YH
1436 *
1437 * Used for SOCK_STREAM messages only. If not enough data is available
b97bf3fd
PL
1438 * will optionally wait for more; never truncates data.
1439 *
1440 * Returns size of returned message data, errno otherwise
1441 */
1b784140
YX
1442static int tipc_recv_stream(struct socket *sock, struct msghdr *m,
1443 size_t buf_len, int flags)
b97bf3fd 1444{
0c3141e9 1445 struct sock *sk = sock->sk;
58ed9442 1446 struct tipc_sock *tsk = tipc_sk(sk);
b97bf3fd
PL
1447 struct sk_buff *buf;
1448 struct tipc_msg *msg;
9bbb4ecc 1449 long timeo;
b97bf3fd 1450 unsigned int sz;
ba8aebe9 1451 int target;
b97bf3fd 1452 int sz_copied = 0;
b97bf3fd 1453 u32 err;
10724cc7 1454 int res = 0, hlen;
b97bf3fd 1455
0c3141e9 1456 /* Catch invalid receive attempts */
b97bf3fd
PL
1457 if (unlikely(!buf_len))
1458 return -EINVAL;
1459
0c3141e9 1460 lock_sock(sk);
b97bf3fd 1461
9bbb4ecc 1462 if (unlikely(sock->state == SS_UNCONNECTED)) {
0c3141e9 1463 res = -ENOTCONN;
b97bf3fd
PL
1464 goto exit;
1465 }
1466
3720d40b 1467 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
9bbb4ecc 1468 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
b97bf3fd 1469
617d3c7a 1470restart:
0c3141e9 1471 /* Look for a message in receive queue; wait if necessary */
85d3fc94 1472 res = tipc_wait_for_rcvmsg(sock, &timeo);
9bbb4ecc
YX
1473 if (res)
1474 goto exit;
b97bf3fd 1475
0c3141e9 1476 /* Look at first message in receive queue */
0c3141e9 1477 buf = skb_peek(&sk->sk_receive_queue);
b97bf3fd
PL
1478 msg = buf_msg(buf);
1479 sz = msg_data_sz(msg);
10724cc7 1480 hlen = msg_hdr_sz(msg);
b97bf3fd
PL
1481 err = msg_errcode(msg);
1482
1483 /* Discard an empty non-errored message & try again */
b97bf3fd 1484 if ((!sz) && (!err)) {
2e84c60b 1485 tsk_advance_rx_queue(sk);
b97bf3fd
PL
1486 goto restart;
1487 }
1488
1489 /* Optionally capture sender's address & ancillary data of first msg */
b97bf3fd
PL
1490 if (sz_copied == 0) {
1491 set_orig_addr(m, msg);
301bae56 1492 res = tipc_sk_anc_data_recv(m, msg, tsk);
0c3141e9 1493 if (res)
b97bf3fd
PL
1494 goto exit;
1495 }
1496
1497 /* Capture message data (if valid) & compute return value (always) */
b97bf3fd 1498 if (!err) {
ba8aebe9
PB
1499 u32 offset = TIPC_SKB_CB(buf)->bytes_read;
1500 u32 needed;
1501 int sz_to_copy;
b97bf3fd 1502
0232fd0a 1503 sz -= offset;
b97bf3fd 1504 needed = (buf_len - sz_copied);
ba8aebe9 1505 sz_to_copy = min(sz, needed);
0232fd0a 1506
10724cc7 1507 res = skb_copy_datagram_msg(buf, hlen + offset, m, sz_to_copy);
0232fd0a 1508 if (res)
b97bf3fd 1509 goto exit;
0232fd0a 1510
b97bf3fd
PL
1511 sz_copied += sz_to_copy;
1512
1513 if (sz_to_copy < sz) {
1514 if (!(flags & MSG_PEEK))
ba8aebe9
PB
1515 TIPC_SKB_CB(buf)->bytes_read =
1516 offset + sz_to_copy;
b97bf3fd
PL
1517 goto exit;
1518 }
b97bf3fd
PL
1519 } else {
1520 if (sz_copied != 0)
1521 goto exit; /* can't add error msg to valid data */
1522
1523 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1524 res = 0;
1525 else
1526 res = -ECONNRESET;
1527 }
1528
10724cc7
JPM
1529 if (unlikely(flags & MSG_PEEK))
1530 goto exit;
1531
1532 tsk->rcv_unacked += tsk_inc(tsk, hlen + sz);
1533 if (unlikely(tsk->rcv_unacked >= (tsk->rcv_win / 4)))
1534 tipc_sk_send_ack(tsk);
1535 tsk_advance_rx_queue(sk);
b97bf3fd
PL
1536
1537 /* Loop around if more data is required */
f64f9e71
JP
1538 if ((sz_copied < buf_len) && /* didn't get all requested data */
1539 (!skb_queue_empty(&sk->sk_receive_queue) ||
3720d40b 1540 (sz_copied < target)) && /* and more is ready or required */
f64f9e71 1541 (!err)) /* and haven't reached a FIN */
b97bf3fd
PL
1542 goto restart;
1543
1544exit:
0c3141e9 1545 release_sock(sk);
a3b0a5a9 1546 return sz_copied ? sz_copied : res;
b97bf3fd
PL
1547}
1548
f288bef4
YX
1549/**
1550 * tipc_write_space - wake up thread if port congestion is released
1551 * @sk: socket
1552 */
1553static void tipc_write_space(struct sock *sk)
1554{
1555 struct socket_wq *wq;
1556
1557 rcu_read_lock();
1558 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 1559 if (skwq_has_sleeper(wq))
f288bef4
YX
1560 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1561 POLLWRNORM | POLLWRBAND);
1562 rcu_read_unlock();
1563}
1564
1565/**
1566 * tipc_data_ready - wake up threads to indicate messages have been received
1567 * @sk: socket
1568 * @len: the length of messages
1569 */
676d2369 1570static void tipc_data_ready(struct sock *sk)
f288bef4
YX
1571{
1572 struct socket_wq *wq;
1573
1574 rcu_read_lock();
1575 wq = rcu_dereference(sk->sk_wq);
1ce0bf50 1576 if (skwq_has_sleeper(wq))
f288bef4
YX
1577 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1578 POLLRDNORM | POLLRDBAND);
1579 rcu_read_unlock();
1580}
1581
f4195d1e
YX
1582static void tipc_sock_destruct(struct sock *sk)
1583{
1584 __skb_queue_purge(&sk->sk_receive_queue);
1585}
1586
7e6c131e
YX
1587/**
1588 * filter_connect - Handle all incoming messages for a connection-based socket
58ed9442 1589 * @tsk: TIPC socket
1186adf7 1590 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
7e6c131e 1591 *
cda3696d 1592 * Returns true if everything ok, false otherwise
7e6c131e 1593 */
cda3696d 1594static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
7e6c131e 1595{
58ed9442 1596 struct sock *sk = &tsk->sk;
f2f9800d 1597 struct net *net = sock_net(sk);
8826cde6 1598 struct socket *sock = sk->sk_socket;
cda3696d 1599 struct tipc_msg *hdr = buf_msg(skb);
7e6c131e 1600
cda3696d
JPM
1601 if (unlikely(msg_mcast(hdr)))
1602 return false;
7e6c131e
YX
1603
1604 switch ((int)sock->state) {
1605 case SS_CONNECTED:
cda3696d 1606
7e6c131e 1607 /* Accept only connection-based messages sent by peer */
cda3696d
JPM
1608 if (unlikely(!tsk_peer_msg(tsk, hdr)))
1609 return false;
1610
1611 if (unlikely(msg_errcode(hdr))) {
1612 sock->state = SS_DISCONNECTING;
cda3696d
JPM
1613 /* Let timer expire on it's own */
1614 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1615 tsk->portid);
4891d8fe 1616 sk->sk_state_change(sk);
7e6c131e 1617 }
cda3696d
JPM
1618 return true;
1619
7e6c131e 1620 case SS_CONNECTING:
dadebc00 1621
cda3696d
JPM
1622 /* Accept only ACK or NACK message */
1623 if (unlikely(!msg_connected(hdr)))
1624 return false;
dadebc00 1625
cda3696d 1626 if (unlikely(msg_errcode(hdr))) {
584d24b3 1627 sock->state = SS_DISCONNECTING;
2c8d8518 1628 sk->sk_err = ECONNREFUSED;
cda3696d 1629 return true;
584d24b3
YX
1630 }
1631
cda3696d 1632 if (unlikely(!msg_isdata(hdr))) {
584d24b3 1633 sock->state = SS_DISCONNECTING;
dadebc00 1634 sk->sk_err = EINVAL;
cda3696d 1635 return true;
584d24b3
YX
1636 }
1637
cda3696d
JPM
1638 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1639 msg_set_importance(&tsk->phdr, msg_importance(hdr));
dadebc00
JPM
1640 sock->state = SS_CONNECTED;
1641
cda3696d
JPM
1642 /* If 'ACK+' message, add to socket receive queue */
1643 if (msg_data_sz(hdr))
1644 return true;
1645
1646 /* If empty 'ACK-' message, wake up sleeping connect() */
1647 if (waitqueue_active(sk_sleep(sk)))
1648 wake_up_interruptible(sk_sleep(sk));
1649
1650 /* 'ACK-' message is neither accepted nor rejected: */
1651 msg_set_dest_droppable(hdr, 1);
1652 return false;
1653
7e6c131e
YX
1654 case SS_LISTENING:
1655 case SS_UNCONNECTED:
cda3696d 1656
7e6c131e 1657 /* Accept only SYN message */
cda3696d
JPM
1658 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
1659 return true;
7e6c131e
YX
1660 break;
1661 case SS_DISCONNECTING:
1662 break;
1663 default:
1664 pr_err("Unknown socket state %u\n", sock->state);
1665 }
cda3696d 1666 return false;
7e6c131e
YX
1667}
1668
aba79f33
YX
1669/**
1670 * rcvbuf_limit - get proper overload limit of socket receive queue
1671 * @sk: socket
10724cc7 1672 * @skb: message
aba79f33 1673 *
10724cc7
JPM
1674 * For connection oriented messages, irrespective of importance,
1675 * default queue limit is 2 MB.
aba79f33 1676 *
10724cc7
JPM
1677 * For connectionless messages, queue limits are based on message
1678 * importance as follows:
aba79f33 1679 *
10724cc7
JPM
1680 * TIPC_LOW_IMPORTANCE (2 MB)
1681 * TIPC_MEDIUM_IMPORTANCE (4 MB)
1682 * TIPC_HIGH_IMPORTANCE (8 MB)
1683 * TIPC_CRITICAL_IMPORTANCE (16 MB)
aba79f33
YX
1684 *
1685 * Returns overload limit according to corresponding message importance
1686 */
10724cc7 1687static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
aba79f33 1688{
10724cc7
JPM
1689 struct tipc_sock *tsk = tipc_sk(sk);
1690 struct tipc_msg *hdr = buf_msg(skb);
1691
1692 if (unlikely(!msg_connected(hdr)))
1693 return sk->sk_rcvbuf << msg_importance(hdr);
aba79f33 1694
10724cc7
JPM
1695 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
1696 return sk->sk_rcvbuf;
0cee6bbe 1697
10724cc7 1698 return FLOWCTL_MSG_LIM;
aba79f33
YX
1699}
1700
c4307285 1701/**
0c3141e9
AS
1702 * filter_rcv - validate incoming message
1703 * @sk: socket
cda3696d 1704 * @skb: pointer to message.
c4307285 1705 *
0c3141e9
AS
1706 * Enqueues message on receive queue if acceptable; optionally handles
1707 * disconnect indication for a connected socket.
1708 *
1186adf7 1709 * Called with socket lock already taken
c4307285 1710 *
cda3696d 1711 * Returns true if message was added to socket receive queue, otherwise false
b97bf3fd 1712 */
f1d048f2
JPM
1713static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
1714 struct sk_buff_head *xmitq)
b97bf3fd 1715{
58ed9442 1716 struct tipc_sock *tsk = tipc_sk(sk);
cda3696d
JPM
1717 struct tipc_msg *hdr = buf_msg(skb);
1718 unsigned int limit = rcvbuf_limit(sk, skb);
1719 int err = TIPC_OK;
1720 int usr = msg_user(hdr);
b97bf3fd 1721
cda3696d 1722 if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
f1d048f2 1723 tipc_sk_proto_rcv(tsk, skb, xmitq);
cda3696d 1724 return false;
1186adf7 1725 }
ec8a2e56 1726
cda3696d
JPM
1727 if (unlikely(usr == SOCK_WAKEUP)) {
1728 kfree_skb(skb);
50100a5e
JPM
1729 tsk->link_cong = 0;
1730 sk->sk_write_space(sk);
cda3696d 1731 return false;
50100a5e
JPM
1732 }
1733
cda3696d
JPM
1734 /* Drop if illegal message type */
1735 if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
1736 kfree_skb(skb);
1737 return false;
1738 }
0c3141e9 1739
cda3696d 1740 /* Reject if wrong message type for current socket state */
c752023a 1741 if (tipc_sk_type_connectionless(sk)) {
cda3696d
JPM
1742 if (msg_connected(hdr)) {
1743 err = TIPC_ERR_NO_PORT;
1744 goto reject;
1745 }
1746 } else if (unlikely(!filter_connect(tsk, skb))) {
1747 err = TIPC_ERR_NO_PORT;
1748 goto reject;
b97bf3fd
PL
1749 }
1750
1751 /* Reject message if there isn't room to queue it */
cda3696d
JPM
1752 if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
1753 err = TIPC_ERR_OVERLOAD;
1754 goto reject;
1755 }
b97bf3fd 1756
aba79f33 1757 /* Enqueue message */
ba8aebe9 1758 TIPC_SKB_CB(skb)->bytes_read = 0;
cda3696d
JPM
1759 __skb_queue_tail(&sk->sk_receive_queue, skb);
1760 skb_set_owner_r(skb, sk);
0c3141e9 1761
676d2369 1762 sk->sk_data_ready(sk);
cda3696d
JPM
1763 return true;
1764
1765reject:
f1d048f2
JPM
1766 if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
1767 __skb_queue_tail(xmitq, skb);
cda3696d 1768 return false;
0c3141e9 1769}
b97bf3fd 1770
0c3141e9 1771/**
4f4482dc 1772 * tipc_backlog_rcv - handle incoming message from backlog queue
0c3141e9 1773 * @sk: socket
a6ca1094 1774 * @skb: message
0c3141e9 1775 *
e3a77561 1776 * Caller must hold socket lock
0c3141e9
AS
1777 *
1778 * Returns 0
1779 */
a6ca1094 1780static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
0c3141e9 1781{
cda3696d 1782 unsigned int truesize = skb->truesize;
f1d048f2
JPM
1783 struct sk_buff_head xmitq;
1784 u32 dnode, selector;
0c3141e9 1785
f1d048f2
JPM
1786 __skb_queue_head_init(&xmitq);
1787
1788 if (likely(filter_rcv(sk, skb, &xmitq))) {
cda3696d 1789 atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
f1d048f2
JPM
1790 return 0;
1791 }
1792
1793 if (skb_queue_empty(&xmitq))
1794 return 0;
1795
1796 /* Send response/rejected message */
1797 skb = __skb_dequeue(&xmitq);
1798 dnode = msg_destnode(buf_msg(skb));
1799 selector = msg_origport(buf_msg(skb));
1800 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
0c3141e9
AS
1801 return 0;
1802}
1803
d570d864 1804/**
c637c103
JPM
1805 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1806 * inputq and try adding them to socket or backlog queue
1807 * @inputq: list of incoming buffers with potentially different destinations
1808 * @sk: socket where the buffers should be enqueued
1809 * @dport: port number for the socket
d570d864
JPM
1810 *
1811 * Caller must hold socket lock
d570d864 1812 */
cda3696d 1813static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
f1d048f2 1814 u32 dport, struct sk_buff_head *xmitq)
d570d864 1815{
f1d048f2
JPM
1816 unsigned long time_limit = jiffies + 2;
1817 struct sk_buff *skb;
d570d864
JPM
1818 unsigned int lim;
1819 atomic_t *dcnt;
f1d048f2 1820 u32 onode;
c637c103
JPM
1821
1822 while (skb_queue_len(inputq)) {
51a00daf 1823 if (unlikely(time_after_eq(jiffies, time_limit)))
cda3696d
JPM
1824 return;
1825
c637c103
JPM
1826 skb = tipc_skb_dequeue(inputq, dport);
1827 if (unlikely(!skb))
cda3696d
JPM
1828 return;
1829
1830 /* Add message directly to receive queue if possible */
c637c103 1831 if (!sock_owned_by_user(sk)) {
f1d048f2 1832 filter_rcv(sk, skb, xmitq);
cda3696d 1833 continue;
c637c103 1834 }
cda3696d
JPM
1835
1836 /* Try backlog, compensating for double-counted bytes */
c637c103 1837 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
7c8bcfb1 1838 if (!sk->sk_backlog.len)
c637c103
JPM
1839 atomic_set(dcnt, 0);
1840 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1841 if (likely(!sk_add_backlog(sk, skb, lim)))
1842 continue;
cda3696d
JPM
1843
1844 /* Overload => reject message back to sender */
f1d048f2
JPM
1845 onode = tipc_own_addr(sock_net(sk));
1846 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
1847 __skb_queue_tail(xmitq, skb);
cda3696d 1848 break;
c637c103 1849 }
d570d864
JPM
1850}
1851
0c3141e9 1852/**
c637c103
JPM
1853 * tipc_sk_rcv - handle a chain of incoming buffers
1854 * @inputq: buffer list containing the buffers
1855 * Consumes all buffers in list until inputq is empty
1856 * Note: may be called in multiple threads referring to the same queue
0c3141e9 1857 */
cda3696d 1858void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
0c3141e9 1859{
f1d048f2 1860 struct sk_buff_head xmitq;
c637c103 1861 u32 dnode, dport = 0;
9871b27f 1862 int err;
9816f061 1863 struct tipc_sock *tsk;
9816f061 1864 struct sock *sk;
cda3696d 1865 struct sk_buff *skb;
9816f061 1866
f1d048f2 1867 __skb_queue_head_init(&xmitq);
c637c103 1868 while (skb_queue_len(inputq)) {
c637c103
JPM
1869 dport = tipc_skb_peek_port(inputq, dport);
1870 tsk = tipc_sk_lookup(net, dport);
cda3696d 1871
c637c103
JPM
1872 if (likely(tsk)) {
1873 sk = &tsk->sk;
1874 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
f1d048f2 1875 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
c637c103 1876 spin_unlock_bh(&sk->sk_lock.slock);
c637c103 1877 }
f1d048f2
JPM
1878 /* Send pending response/rejected messages, if any */
1879 while ((skb = __skb_dequeue(&xmitq))) {
1880 dnode = msg_destnode(buf_msg(skb));
1881 tipc_node_xmit_skb(net, skb, dnode, dport);
1882 }
c637c103 1883 sock_put(sk);
c637c103 1884 continue;
c637c103 1885 }
cda3696d
JPM
1886
1887 /* No destination socket => dequeue skb if still there */
1888 skb = tipc_skb_dequeue(inputq, dport);
1889 if (!skb)
1890 return;
1891
1892 /* Try secondary lookup if unresolved named message */
1893 err = TIPC_ERR_NO_PORT;
1894 if (tipc_msg_lookup_dest(net, skb, &err))
1895 goto xmit;
1896
1897 /* Prepare for message rejection */
1898 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
c637c103 1899 continue;
e3a77561 1900xmit:
cda3696d 1901 dnode = msg_destnode(buf_msg(skb));
af9b028e 1902 tipc_node_xmit_skb(net, skb, dnode, dport);
c637c103 1903 }
b97bf3fd
PL
1904}
1905
78eb3a53
YX
1906static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1907{
1908 struct sock *sk = sock->sk;
1909 DEFINE_WAIT(wait);
1910 int done;
1911
1912 do {
1913 int err = sock_error(sk);
1914 if (err)
1915 return err;
1916 if (!*timeo_p)
1917 return -ETIMEDOUT;
1918 if (signal_pending(current))
1919 return sock_intr_errno(*timeo_p);
1920
1921 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1922 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1923 finish_wait(sk_sleep(sk), &wait);
1924 } while (!done);
1925 return 0;
1926}
1927
b97bf3fd 1928/**
247f0f3c 1929 * tipc_connect - establish a connection to another TIPC port
b97bf3fd
PL
1930 * @sock: socket structure
1931 * @dest: socket address for destination port
1932 * @destlen: size of socket address data structure
0c3141e9 1933 * @flags: file-related flags associated with socket
b97bf3fd
PL
1934 *
1935 * Returns 0 on success, errno otherwise
1936 */
247f0f3c
YX
1937static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1938 int destlen, int flags)
b97bf3fd 1939{
0c3141e9 1940 struct sock *sk = sock->sk;
f2f8036e 1941 struct tipc_sock *tsk = tipc_sk(sk);
b89741a0
AS
1942 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1943 struct msghdr m = {NULL,};
f2f8036e 1944 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
78eb3a53 1945 socket_state previous;
f2f8036e 1946 int res = 0;
b89741a0 1947
0c3141e9
AS
1948 lock_sock(sk);
1949
f2f8036e 1950 /* DGRAM/RDM connect(), just save the destaddr */
c752023a 1951 if (tipc_sk_type_connectionless(sk)) {
f2f8036e 1952 if (dst->family == AF_UNSPEC) {
aeda16b6 1953 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
610600c8
SL
1954 } else if (destlen != sizeof(struct sockaddr_tipc)) {
1955 res = -EINVAL;
f2f8036e 1956 } else {
aeda16b6 1957 memcpy(&tsk->peer, dest, destlen);
f2f8036e 1958 }
0c3141e9
AS
1959 goto exit;
1960 }
b89741a0 1961
b89741a0
AS
1962 /*
1963 * Reject connection attempt using multicast address
1964 *
1965 * Note: send_msg() validates the rest of the address fields,
1966 * so there's no need to do it here
1967 */
0c3141e9
AS
1968 if (dst->addrtype == TIPC_ADDR_MCAST) {
1969 res = -EINVAL;
1970 goto exit;
1971 }
1972
78eb3a53 1973 previous = sock->state;
584d24b3
YX
1974 switch (sock->state) {
1975 case SS_UNCONNECTED:
1976 /* Send a 'SYN-' to destination */
1977 m.msg_name = dest;
1978 m.msg_namelen = destlen;
1979
1980 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1981 * indicate send_msg() is never blocked.
1982 */
1983 if (!timeout)
1984 m.msg_flags = MSG_DONTWAIT;
1985
39a0295f 1986 res = __tipc_sendmsg(sock, &m, 0);
584d24b3
YX
1987 if ((res < 0) && (res != -EWOULDBLOCK))
1988 goto exit;
1989
1990 /* Just entered SS_CONNECTING state; the only
1991 * difference is that return value in non-blocking
1992 * case is EINPROGRESS, rather than EALREADY.
1993 */
1994 res = -EINPROGRESS;
584d24b3 1995 case SS_CONNECTING:
78eb3a53
YX
1996 if (previous == SS_CONNECTING)
1997 res = -EALREADY;
1998 if (!timeout)
1999 goto exit;
2000 timeout = msecs_to_jiffies(timeout);
2001 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2002 res = tipc_wait_for_connect(sock, &timeout);
584d24b3
YX
2003 break;
2004 case SS_CONNECTED:
2005 res = -EISCONN;
2006 break;
2007 default:
2008 res = -EINVAL;
78eb3a53 2009 break;
b89741a0 2010 }
0c3141e9
AS
2011exit:
2012 release_sock(sk);
b89741a0 2013 return res;
b97bf3fd
PL
2014}
2015
c4307285 2016/**
247f0f3c 2017 * tipc_listen - allow socket to listen for incoming connections
b97bf3fd
PL
2018 * @sock: socket structure
2019 * @len: (unused)
c4307285 2020 *
b97bf3fd
PL
2021 * Returns 0 on success, errno otherwise
2022 */
247f0f3c 2023static int tipc_listen(struct socket *sock, int len)
b97bf3fd 2024{
0c3141e9
AS
2025 struct sock *sk = sock->sk;
2026 int res;
2027
2028 lock_sock(sk);
b97bf3fd 2029
245f3d34 2030 if (sock->state != SS_UNCONNECTED)
0c3141e9
AS
2031 res = -EINVAL;
2032 else {
2033 sock->state = SS_LISTENING;
2034 res = 0;
2035 }
2036
2037 release_sock(sk);
2038 return res;
b97bf3fd
PL
2039}
2040
6398e23c
YX
2041static int tipc_wait_for_accept(struct socket *sock, long timeo)
2042{
2043 struct sock *sk = sock->sk;
2044 DEFINE_WAIT(wait);
2045 int err;
2046
2047 /* True wake-one mechanism for incoming connections: only
2048 * one process gets woken up, not the 'whole herd'.
2049 * Since we do not 'race & poll' for established sockets
2050 * anymore, the common case will execute the loop only once.
2051 */
2052 for (;;) {
2053 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2054 TASK_INTERRUPTIBLE);
fe8e4649 2055 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
6398e23c
YX
2056 release_sock(sk);
2057 timeo = schedule_timeout(timeo);
2058 lock_sock(sk);
2059 }
2060 err = 0;
2061 if (!skb_queue_empty(&sk->sk_receive_queue))
2062 break;
2063 err = -EINVAL;
2064 if (sock->state != SS_LISTENING)
2065 break;
6398e23c
YX
2066 err = -EAGAIN;
2067 if (!timeo)
2068 break;
143fe22f
EH
2069 err = sock_intr_errno(timeo);
2070 if (signal_pending(current))
2071 break;
6398e23c
YX
2072 }
2073 finish_wait(sk_sleep(sk), &wait);
2074 return err;
2075}
2076
c4307285 2077/**
247f0f3c 2078 * tipc_accept - wait for connection request
b97bf3fd
PL
2079 * @sock: listening socket
2080 * @newsock: new socket that is to be connected
2081 * @flags: file-related flags associated with socket
c4307285 2082 *
b97bf3fd
PL
2083 * Returns 0 on success, errno otherwise
2084 */
247f0f3c 2085static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
b97bf3fd 2086{
0fef8f20 2087 struct sock *new_sk, *sk = sock->sk;
b97bf3fd 2088 struct sk_buff *buf;
301bae56 2089 struct tipc_sock *new_tsock;
0fef8f20 2090 struct tipc_msg *msg;
6398e23c 2091 long timeo;
0c3141e9 2092 int res;
b97bf3fd 2093
0c3141e9 2094 lock_sock(sk);
b97bf3fd 2095
0c3141e9
AS
2096 if (sock->state != SS_LISTENING) {
2097 res = -EINVAL;
b97bf3fd
PL
2098 goto exit;
2099 }
6398e23c
YX
2100 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2101 res = tipc_wait_for_accept(sock, timeo);
2102 if (res)
2103 goto exit;
0c3141e9
AS
2104
2105 buf = skb_peek(&sk->sk_receive_queue);
2106
cb5da847 2107 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);
0fef8f20
PG
2108 if (res)
2109 goto exit;
fdd75ea8 2110 security_sk_clone(sock->sk, new_sock->sk);
b97bf3fd 2111
0fef8f20 2112 new_sk = new_sock->sk;
301bae56 2113 new_tsock = tipc_sk(new_sk);
0fef8f20 2114 msg = buf_msg(buf);
b97bf3fd 2115
0fef8f20
PG
2116 /* we lock on new_sk; but lockdep sees the lock on sk */
2117 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2118
2119 /*
2120 * Reject any stray messages received by new socket
2121 * before the socket lock was taken (very, very unlikely)
2122 */
2e84c60b 2123 tsk_rej_rx_queue(new_sk);
0fef8f20
PG
2124
2125 /* Connect new socket to it's peer */
301bae56 2126 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
0fef8f20
PG
2127 new_sock->state = SS_CONNECTED;
2128
301bae56 2129 tsk_set_importance(new_tsock, msg_importance(msg));
0fef8f20 2130 if (msg_named(msg)) {
301bae56
JPM
2131 new_tsock->conn_type = msg_nametype(msg);
2132 new_tsock->conn_instance = msg_nameinst(msg);
b97bf3fd 2133 }
0fef8f20
PG
2134
2135 /*
2136 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2137 * Respond to 'SYN+' by queuing it on new socket.
2138 */
2139 if (!msg_data_sz(msg)) {
2140 struct msghdr m = {NULL,};
2141
2e84c60b 2142 tsk_advance_rx_queue(sk);
39a0295f 2143 __tipc_send_stream(new_sock, &m, 0);
0fef8f20
PG
2144 } else {
2145 __skb_dequeue(&sk->sk_receive_queue);
2146 __skb_queue_head(&new_sk->sk_receive_queue, buf);
aba79f33 2147 skb_set_owner_r(buf, new_sk);
0fef8f20
PG
2148 }
2149 release_sock(new_sk);
b97bf3fd 2150exit:
0c3141e9 2151 release_sock(sk);
b97bf3fd
PL
2152 return res;
2153}
2154
2155/**
247f0f3c 2156 * tipc_shutdown - shutdown socket connection
b97bf3fd 2157 * @sock: socket structure
e247a8f5 2158 * @how: direction to close (must be SHUT_RDWR)
b97bf3fd
PL
2159 *
2160 * Terminates connection (if necessary), then purges socket's receive queue.
c4307285 2161 *
b97bf3fd
PL
2162 * Returns 0 on success, errno otherwise
2163 */
247f0f3c 2164static int tipc_shutdown(struct socket *sock, int how)
b97bf3fd 2165{
0c3141e9 2166 struct sock *sk = sock->sk;
f2f9800d 2167 struct net *net = sock_net(sk);
58ed9442 2168 struct tipc_sock *tsk = tipc_sk(sk);
a6ca1094 2169 struct sk_buff *skb;
cda3696d
JPM
2170 u32 dnode = tsk_peer_node(tsk);
2171 u32 dport = tsk_peer_port(tsk);
2172 u32 onode = tipc_own_addr(net);
2173 u32 oport = tsk->portid;
b97bf3fd
PL
2174 int res;
2175
e247a8f5
AS
2176 if (how != SHUT_RDWR)
2177 return -EINVAL;
b97bf3fd 2178
0c3141e9 2179 lock_sock(sk);
b97bf3fd
PL
2180
2181 switch (sock->state) {
0c3141e9 2182 case SS_CONNECTING:
b97bf3fd
PL
2183 case SS_CONNECTED:
2184
b97bf3fd 2185restart:
bcd3ffd4
JPM
2186 dnode = tsk_peer_node(tsk);
2187
617d3c7a 2188 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
a6ca1094
YX
2189 skb = __skb_dequeue(&sk->sk_receive_queue);
2190 if (skb) {
ba8aebe9 2191 if (TIPC_SKB_CB(skb)->bytes_read) {
a6ca1094 2192 kfree_skb(skb);
b97bf3fd
PL
2193 goto restart;
2194 }
bcd3ffd4 2195 tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
0c3141e9 2196 } else {
c5898636 2197 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
80e44c22 2198 TIPC_CONN_MSG, SHORT_H_SIZE,
cda3696d
JPM
2199 0, dnode, onode, dport, oport,
2200 TIPC_CONN_SHUTDOWN);
d2fbdf76
VN
2201 if (skb)
2202 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
b97bf3fd 2203 }
0c3141e9 2204 sock->state = SS_DISCONNECTING;
f2f9800d 2205 tipc_node_remove_conn(net, dnode, tsk->portid);
b97bf3fd
PL
2206 /* fall through */
2207
2208 case SS_DISCONNECTING:
2209
75031151 2210 /* Discard any unreceived messages */
57467e56 2211 __skb_queue_purge(&sk->sk_receive_queue);
75031151
YX
2212
2213 /* Wake up anyone sleeping in poll */
2214 sk->sk_state_change(sk);
b97bf3fd
PL
2215 res = 0;
2216 break;
2217
2218 default:
2219 res = -ENOTCONN;
2220 }
2221
0c3141e9 2222 release_sock(sk);
b97bf3fd
PL
2223 return res;
2224}
2225
f2f2a96a 2226static void tipc_sk_timeout(unsigned long data)
57289015 2227{
f2f2a96a
YX
2228 struct tipc_sock *tsk = (struct tipc_sock *)data;
2229 struct sock *sk = &tsk->sk;
a6ca1094 2230 struct sk_buff *skb = NULL;
57289015 2231 u32 peer_port, peer_node;
c5898636 2232 u32 own_node = tsk_own_node(tsk);
57289015 2233
6c9808ce 2234 bh_lock_sock(sk);
d6fb7e9c 2235 if (!tipc_sk_connected(sk)) {
6c9808ce
JPM
2236 bh_unlock_sock(sk);
2237 goto exit;
57289015 2238 }
301bae56
JPM
2239 peer_port = tsk_peer_port(tsk);
2240 peer_node = tsk_peer_node(tsk);
57289015 2241
301bae56 2242 if (tsk->probing_state == TIPC_CONN_PROBING) {
b3be5e3e
EH
2243 if (!sock_owned_by_user(sk)) {
2244 sk->sk_socket->state = SS_DISCONNECTING;
b3be5e3e
EH
2245 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
2246 tsk_peer_port(tsk));
2247 sk->sk_state_change(sk);
2248 } else {
2249 /* Try again later */
2250 sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
2251 }
2252
360aab6b
PB
2253 bh_unlock_sock(sk);
2254 goto exit;
57289015 2255 }
360aab6b
PB
2256
2257 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2258 INT_H_SIZE, 0, peer_node, own_node,
2259 peer_port, tsk->portid, TIPC_OK);
2260 tsk->probing_state = TIPC_CONN_PROBING;
2261 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
57289015 2262 bh_unlock_sock(sk);
a6ca1094 2263 if (skb)
af9b028e 2264 tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
6c9808ce 2265exit:
07f6c4bc 2266 sock_put(sk);
57289015
JPM
2267}
2268
301bae56 2269static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
0fc87aae
JPM
2270 struct tipc_name_seq const *seq)
2271{
d6fb7e9c
PB
2272 struct sock *sk = &tsk->sk;
2273 struct net *net = sock_net(sk);
0fc87aae
JPM
2274 struct publication *publ;
2275 u32 key;
2276
d6fb7e9c 2277 if (tipc_sk_connected(sk))
0fc87aae 2278 return -EINVAL;
07f6c4bc
YX
2279 key = tsk->portid + tsk->pub_count + 1;
2280 if (key == tsk->portid)
0fc87aae
JPM
2281 return -EADDRINUSE;
2282
f2f9800d 2283 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
07f6c4bc 2284 scope, tsk->portid, key);
0fc87aae
JPM
2285 if (unlikely(!publ))
2286 return -EINVAL;
2287
301bae56
JPM
2288 list_add(&publ->pport_list, &tsk->publications);
2289 tsk->pub_count++;
2290 tsk->published = 1;
0fc87aae
JPM
2291 return 0;
2292}
2293
301bae56 2294static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
0fc87aae
JPM
2295 struct tipc_name_seq const *seq)
2296{
f2f9800d 2297 struct net *net = sock_net(&tsk->sk);
0fc87aae
JPM
2298 struct publication *publ;
2299 struct publication *safe;
2300 int rc = -EINVAL;
2301
301bae56 2302 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
0fc87aae
JPM
2303 if (seq) {
2304 if (publ->scope != scope)
2305 continue;
2306 if (publ->type != seq->type)
2307 continue;
2308 if (publ->lower != seq->lower)
2309 continue;
2310 if (publ->upper != seq->upper)
2311 break;
f2f9800d 2312 tipc_nametbl_withdraw(net, publ->type, publ->lower,
0fc87aae
JPM
2313 publ->ref, publ->key);
2314 rc = 0;
2315 break;
2316 }
f2f9800d 2317 tipc_nametbl_withdraw(net, publ->type, publ->lower,
0fc87aae
JPM
2318 publ->ref, publ->key);
2319 rc = 0;
2320 }
301bae56
JPM
2321 if (list_empty(&tsk->publications))
2322 tsk->published = 0;
0fc87aae
JPM
2323 return rc;
2324}
2325
5a9ee0be
JPM
2326/* tipc_sk_reinit: set non-zero address in all existing sockets
2327 * when we go from standalone to network mode.
2328 */
e05b31f4 2329void tipc_sk_reinit(struct net *net)
5a9ee0be 2330{
e05b31f4 2331 struct tipc_net *tn = net_generic(net, tipc_net_id);
07f6c4bc
YX
2332 const struct bucket_table *tbl;
2333 struct rhash_head *pos;
2334 struct tipc_sock *tsk;
5a9ee0be 2335 struct tipc_msg *msg;
07f6c4bc 2336 int i;
5a9ee0be 2337
07f6c4bc 2338 rcu_read_lock();
e05b31f4 2339 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
07f6c4bc
YX
2340 for (i = 0; i < tbl->size; i++) {
2341 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2342 spin_lock_bh(&tsk->sk.sk_lock.slock);
2343 msg = &tsk->phdr;
34747539
YX
2344 msg_set_prevnode(msg, tn->own_addr);
2345 msg_set_orignode(msg, tn->own_addr);
07f6c4bc
YX
2346 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2347 }
5a9ee0be 2348 }
07f6c4bc 2349 rcu_read_unlock();
5a9ee0be
JPM
2350}
2351
e05b31f4 2352static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
808d90f9 2353{
e05b31f4 2354 struct tipc_net *tn = net_generic(net, tipc_net_id);
07f6c4bc 2355 struct tipc_sock *tsk;
808d90f9 2356
07f6c4bc 2357 rcu_read_lock();
6cca7289 2358 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
07f6c4bc
YX
2359 if (tsk)
2360 sock_hold(&tsk->sk);
2361 rcu_read_unlock();
808d90f9 2362
07f6c4bc 2363 return tsk;
808d90f9
JPM
2364}
2365
07f6c4bc 2366static int tipc_sk_insert(struct tipc_sock *tsk)
808d90f9 2367{
e05b31f4
YX
2368 struct sock *sk = &tsk->sk;
2369 struct net *net = sock_net(sk);
2370 struct tipc_net *tn = net_generic(net, tipc_net_id);
07f6c4bc
YX
2371 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2372 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
808d90f9 2373
07f6c4bc
YX
2374 while (remaining--) {
2375 portid++;
2376 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2377 portid = TIPC_MIN_PORT;
2378 tsk->portid = portid;
2379 sock_hold(&tsk->sk);
6cca7289
HX
2380 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2381 tsk_rht_params))
07f6c4bc
YX
2382 return 0;
2383 sock_put(&tsk->sk);
808d90f9
JPM
2384 }
2385
07f6c4bc 2386 return -1;
808d90f9
JPM
2387}
2388
07f6c4bc 2389static void tipc_sk_remove(struct tipc_sock *tsk)
808d90f9 2390{
07f6c4bc 2391 struct sock *sk = &tsk->sk;
e05b31f4 2392 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
808d90f9 2393
6cca7289 2394 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
07f6c4bc
YX
2395 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2396 __sock_put(sk);
808d90f9 2397 }
808d90f9
JPM
2398}
2399
6cca7289
HX
2400static const struct rhashtable_params tsk_rht_params = {
2401 .nelem_hint = 192,
2402 .head_offset = offsetof(struct tipc_sock, node),
2403 .key_offset = offsetof(struct tipc_sock, portid),
2404 .key_len = sizeof(u32), /* portid */
6cca7289
HX
2405 .max_size = 1048576,
2406 .min_size = 256,
b5e2c150 2407 .automatic_shrinking = true,
6cca7289
HX
2408};
2409
e05b31f4 2410int tipc_sk_rht_init(struct net *net)
808d90f9 2411{
e05b31f4 2412 struct tipc_net *tn = net_generic(net, tipc_net_id);
6cca7289
HX
2413
2414 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
808d90f9
JPM
2415}
2416
e05b31f4 2417void tipc_sk_rht_destroy(struct net *net)
808d90f9 2418{
e05b31f4
YX
2419 struct tipc_net *tn = net_generic(net, tipc_net_id);
2420
07f6c4bc
YX
2421 /* Wait for socket readers to complete */
2422 synchronize_net();
808d90f9 2423
e05b31f4 2424 rhashtable_destroy(&tn->sk_rht);
808d90f9
JPM
2425}
2426
b97bf3fd 2427/**
247f0f3c 2428 * tipc_setsockopt - set socket option
b97bf3fd
PL
2429 * @sock: socket structure
2430 * @lvl: option level
2431 * @opt: option identifier
2432 * @ov: pointer to new option value
2433 * @ol: length of option value
c4307285
YH
2434 *
2435 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
b97bf3fd 2436 * (to ease compatibility).
c4307285 2437 *
b97bf3fd
PL
2438 * Returns 0 on success, errno otherwise
2439 */
247f0f3c
YX
2440static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2441 char __user *ov, unsigned int ol)
b97bf3fd 2442{
0c3141e9 2443 struct sock *sk = sock->sk;
58ed9442 2444 struct tipc_sock *tsk = tipc_sk(sk);
b97bf3fd
PL
2445 u32 value;
2446 int res;
2447
c4307285
YH
2448 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2449 return 0;
b97bf3fd
PL
2450 if (lvl != SOL_TIPC)
2451 return -ENOPROTOOPT;
2452 if (ol < sizeof(value))
2453 return -EINVAL;
2db9983a
AS
2454 res = get_user(value, (u32 __user *)ov);
2455 if (res)
b97bf3fd
PL
2456 return res;
2457
0c3141e9 2458 lock_sock(sk);
c4307285 2459
b97bf3fd
PL
2460 switch (opt) {
2461 case TIPC_IMPORTANCE:
301bae56 2462 res = tsk_set_importance(tsk, value);
b97bf3fd
PL
2463 break;
2464 case TIPC_SRC_DROPPABLE:
2465 if (sock->type != SOCK_STREAM)
301bae56 2466 tsk_set_unreliable(tsk, value);
c4307285 2467 else
b97bf3fd
PL
2468 res = -ENOPROTOOPT;
2469 break;
2470 case TIPC_DEST_DROPPABLE:
301bae56 2471 tsk_set_unreturnable(tsk, value);
b97bf3fd
PL
2472 break;
2473 case TIPC_CONN_TIMEOUT:
a0f40f02 2474 tipc_sk(sk)->conn_timeout = value;
0c3141e9 2475 /* no need to set "res", since already 0 at this point */
b97bf3fd
PL
2476 break;
2477 default:
2478 res = -EINVAL;
2479 }
2480
0c3141e9
AS
2481 release_sock(sk);
2482
b97bf3fd
PL
2483 return res;
2484}
2485
2486/**
247f0f3c 2487 * tipc_getsockopt - get socket option
b97bf3fd
PL
2488 * @sock: socket structure
2489 * @lvl: option level
2490 * @opt: option identifier
2491 * @ov: receptacle for option value
2492 * @ol: receptacle for length of option value
c4307285
YH
2493 *
2494 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
b97bf3fd 2495 * (to ease compatibility).
c4307285 2496 *
b97bf3fd
PL
2497 * Returns 0 on success, errno otherwise
2498 */
247f0f3c
YX
2499static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2500 char __user *ov, int __user *ol)
b97bf3fd 2501{
0c3141e9 2502 struct sock *sk = sock->sk;
58ed9442 2503 struct tipc_sock *tsk = tipc_sk(sk);
c4307285 2504 int len;
b97bf3fd 2505 u32 value;
c4307285 2506 int res;
b97bf3fd 2507
c4307285
YH
2508 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2509 return put_user(0, ol);
b97bf3fd
PL
2510 if (lvl != SOL_TIPC)
2511 return -ENOPROTOOPT;
2db9983a
AS
2512 res = get_user(len, ol);
2513 if (res)
c4307285 2514 return res;
b97bf3fd 2515
0c3141e9 2516 lock_sock(sk);
b97bf3fd
PL
2517
2518 switch (opt) {
2519 case TIPC_IMPORTANCE:
301bae56 2520 value = tsk_importance(tsk);
b97bf3fd
PL
2521 break;
2522 case TIPC_SRC_DROPPABLE:
301bae56 2523 value = tsk_unreliable(tsk);
b97bf3fd
PL
2524 break;
2525 case TIPC_DEST_DROPPABLE:
301bae56 2526 value = tsk_unreturnable(tsk);
b97bf3fd
PL
2527 break;
2528 case TIPC_CONN_TIMEOUT:
301bae56 2529 value = tsk->conn_timeout;
0c3141e9 2530 /* no need to set "res", since already 0 at this point */
b97bf3fd 2531 break;
0e65967e 2532 case TIPC_NODE_RECVQ_DEPTH:
9da3d475 2533 value = 0; /* was tipc_queue_size, now obsolete */
6650613d 2534 break;
0e65967e 2535 case TIPC_SOCK_RECVQ_DEPTH:
6650613d 2536 value = skb_queue_len(&sk->sk_receive_queue);
2537 break;
b97bf3fd
PL
2538 default:
2539 res = -EINVAL;
2540 }
2541
0c3141e9
AS
2542 release_sock(sk);
2543
25860c3b
PG
2544 if (res)
2545 return res; /* "get" failed */
b97bf3fd 2546
25860c3b
PG
2547 if (len < sizeof(value))
2548 return -EINVAL;
2549
2550 if (copy_to_user(ov, &value, sizeof(value)))
2551 return -EFAULT;
2552
2553 return put_user(sizeof(value), ol);
b97bf3fd
PL
2554}
2555
f2f9800d 2556static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
78acb1f9 2557{
f2f9800d 2558 struct sock *sk = sock->sk;
78acb1f9
EH
2559 struct tipc_sioc_ln_req lnr;
2560 void __user *argp = (void __user *)arg;
2561
2562 switch (cmd) {
2563 case SIOCGETLINKNAME:
2564 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2565 return -EFAULT;
f2f9800d
YX
2566 if (!tipc_node_get_linkname(sock_net(sk),
2567 lnr.bearer_id & 0xffff, lnr.peer,
78acb1f9
EH
2568 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2569 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2570 return -EFAULT;
2571 return 0;
2572 }
2573 return -EADDRNOTAVAIL;
78acb1f9
EH
2574 default:
2575 return -ENOIOCTLCMD;
2576 }
2577}
2578
ae86b9e3
BH
2579/* Protocol switches for the various types of TIPC sockets */
2580
bca65eae 2581static const struct proto_ops msg_ops = {
0e65967e 2582 .owner = THIS_MODULE,
b97bf3fd 2583 .family = AF_TIPC,
247f0f3c
YX
2584 .release = tipc_release,
2585 .bind = tipc_bind,
2586 .connect = tipc_connect,
5eee6a6d 2587 .socketpair = sock_no_socketpair,
245f3d34 2588 .accept = sock_no_accept,
247f0f3c
YX
2589 .getname = tipc_getname,
2590 .poll = tipc_poll,
78acb1f9 2591 .ioctl = tipc_ioctl,
245f3d34 2592 .listen = sock_no_listen,
247f0f3c
YX
2593 .shutdown = tipc_shutdown,
2594 .setsockopt = tipc_setsockopt,
2595 .getsockopt = tipc_getsockopt,
2596 .sendmsg = tipc_sendmsg,
2597 .recvmsg = tipc_recvmsg,
8238745a
YH
2598 .mmap = sock_no_mmap,
2599 .sendpage = sock_no_sendpage
b97bf3fd
PL
2600};
2601
bca65eae 2602static const struct proto_ops packet_ops = {
0e65967e 2603 .owner = THIS_MODULE,
b97bf3fd 2604 .family = AF_TIPC,
247f0f3c
YX
2605 .release = tipc_release,
2606 .bind = tipc_bind,
2607 .connect = tipc_connect,
5eee6a6d 2608 .socketpair = sock_no_socketpair,
247f0f3c
YX
2609 .accept = tipc_accept,
2610 .getname = tipc_getname,
2611 .poll = tipc_poll,
78acb1f9 2612 .ioctl = tipc_ioctl,
247f0f3c
YX
2613 .listen = tipc_listen,
2614 .shutdown = tipc_shutdown,
2615 .setsockopt = tipc_setsockopt,
2616 .getsockopt = tipc_getsockopt,
2617 .sendmsg = tipc_send_packet,
2618 .recvmsg = tipc_recvmsg,
8238745a
YH
2619 .mmap = sock_no_mmap,
2620 .sendpage = sock_no_sendpage
b97bf3fd
PL
2621};
2622
bca65eae 2623static const struct proto_ops stream_ops = {
0e65967e 2624 .owner = THIS_MODULE,
b97bf3fd 2625 .family = AF_TIPC,
247f0f3c
YX
2626 .release = tipc_release,
2627 .bind = tipc_bind,
2628 .connect = tipc_connect,
5eee6a6d 2629 .socketpair = sock_no_socketpair,
247f0f3c
YX
2630 .accept = tipc_accept,
2631 .getname = tipc_getname,
2632 .poll = tipc_poll,
78acb1f9 2633 .ioctl = tipc_ioctl,
247f0f3c
YX
2634 .listen = tipc_listen,
2635 .shutdown = tipc_shutdown,
2636 .setsockopt = tipc_setsockopt,
2637 .getsockopt = tipc_getsockopt,
2638 .sendmsg = tipc_send_stream,
2639 .recvmsg = tipc_recv_stream,
8238745a
YH
2640 .mmap = sock_no_mmap,
2641 .sendpage = sock_no_sendpage
b97bf3fd
PL
2642};
2643
bca65eae 2644static const struct net_proto_family tipc_family_ops = {
0e65967e 2645 .owner = THIS_MODULE,
b97bf3fd 2646 .family = AF_TIPC,
c5fa7b3c 2647 .create = tipc_sk_create
b97bf3fd
PL
2648};
2649
2650static struct proto tipc_proto = {
2651 .name = "TIPC",
2652 .owner = THIS_MODULE,
cc79dd1b
YX
2653 .obj_size = sizeof(struct tipc_sock),
2654 .sysctl_rmem = sysctl_tipc_rmem
b97bf3fd
PL
2655};
2656
2657/**
4323add6 2658 * tipc_socket_init - initialize TIPC socket interface
c4307285 2659 *
b97bf3fd
PL
2660 * Returns 0 on success, errno otherwise
2661 */
4323add6 2662int tipc_socket_init(void)
b97bf3fd
PL
2663{
2664 int res;
2665
c4307285 2666 res = proto_register(&tipc_proto, 1);
b97bf3fd 2667 if (res) {
2cf8aa19 2668 pr_err("Failed to register TIPC protocol type\n");
b97bf3fd
PL
2669 goto out;
2670 }
2671
2672 res = sock_register(&tipc_family_ops);
2673 if (res) {
2cf8aa19 2674 pr_err("Failed to register TIPC socket type\n");
b97bf3fd
PL
2675 proto_unregister(&tipc_proto);
2676 goto out;
2677 }
b97bf3fd
PL
2678 out:
2679 return res;
2680}
2681
2682/**
4323add6 2683 * tipc_socket_stop - stop TIPC socket interface
b97bf3fd 2684 */
4323add6 2685void tipc_socket_stop(void)
b97bf3fd 2686{
b97bf3fd
PL
2687 sock_unregister(tipc_family_ops.family);
2688 proto_unregister(&tipc_proto);
2689}
34b78a12
RA
2690
2691/* Caller should hold socket lock for the passed tipc socket. */
d8182804 2692static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
34b78a12
RA
2693{
2694 u32 peer_node;
2695 u32 peer_port;
2696 struct nlattr *nest;
2697
2698 peer_node = tsk_peer_node(tsk);
2699 peer_port = tsk_peer_port(tsk);
2700
2701 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2702
2703 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2704 goto msg_full;
2705 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2706 goto msg_full;
2707
2708 if (tsk->conn_type != 0) {
2709 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2710 goto msg_full;
2711 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2712 goto msg_full;
2713 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2714 goto msg_full;
2715 }
2716 nla_nest_end(skb, nest);
2717
2718 return 0;
2719
2720msg_full:
2721 nla_nest_cancel(skb, nest);
2722
2723 return -EMSGSIZE;
2724}
2725
2726/* Caller should hold socket lock for the passed tipc socket. */
d8182804
RA
2727static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2728 struct tipc_sock *tsk)
34b78a12
RA
2729{
2730 int err;
2731 void *hdr;
2732 struct nlattr *attrs;
34747539
YX
2733 struct net *net = sock_net(skb->sk);
2734 struct tipc_net *tn = net_generic(net, tipc_net_id);
d6fb7e9c 2735 struct sock *sk = &tsk->sk;
34b78a12
RA
2736
2737 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
bfb3e5dd 2738 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
34b78a12
RA
2739 if (!hdr)
2740 goto msg_cancel;
2741
2742 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2743 if (!attrs)
2744 goto genlmsg_cancel;
07f6c4bc 2745 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
34b78a12 2746 goto attr_msg_cancel;
34747539 2747 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
34b78a12
RA
2748 goto attr_msg_cancel;
2749
d6fb7e9c 2750 if (tipc_sk_connected(sk)) {
34b78a12
RA
2751 err = __tipc_nl_add_sk_con(skb, tsk);
2752 if (err)
2753 goto attr_msg_cancel;
2754 } else if (!list_empty(&tsk->publications)) {
2755 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2756 goto attr_msg_cancel;
2757 }
2758 nla_nest_end(skb, attrs);
2759 genlmsg_end(skb, hdr);
2760
2761 return 0;
2762
2763attr_msg_cancel:
2764 nla_nest_cancel(skb, attrs);
2765genlmsg_cancel:
2766 genlmsg_cancel(skb, hdr);
2767msg_cancel:
2768 return -EMSGSIZE;
2769}
2770
2771int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2772{
2773 int err;
2774 struct tipc_sock *tsk;
07f6c4bc
YX
2775 const struct bucket_table *tbl;
2776 struct rhash_head *pos;
e05b31f4
YX
2777 struct net *net = sock_net(skb->sk);
2778 struct tipc_net *tn = net_generic(net, tipc_net_id);
d6e164e3
RA
2779 u32 tbl_id = cb->args[0];
2780 u32 prev_portid = cb->args[1];
34b78a12 2781
07f6c4bc 2782 rcu_read_lock();
e05b31f4 2783 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
d6e164e3
RA
2784 for (; tbl_id < tbl->size; tbl_id++) {
2785 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
07f6c4bc 2786 spin_lock_bh(&tsk->sk.sk_lock.slock);
d6e164e3
RA
2787 if (prev_portid && prev_portid != tsk->portid) {
2788 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2789 continue;
2790 }
2791
07f6c4bc 2792 err = __tipc_nl_add_sk(skb, cb, tsk);
d6e164e3
RA
2793 if (err) {
2794 prev_portid = tsk->portid;
2795 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2796 goto out;
2797 }
2798 prev_portid = 0;
07f6c4bc 2799 spin_unlock_bh(&tsk->sk.sk_lock.slock);
07f6c4bc 2800 }
34b78a12 2801 }
d6e164e3 2802out:
07f6c4bc 2803 rcu_read_unlock();
d6e164e3
RA
2804 cb->args[0] = tbl_id;
2805 cb->args[1] = prev_portid;
34b78a12
RA
2806
2807 return skb->len;
2808}
1a1a143d
RA
2809
2810/* Caller should hold socket lock for the passed tipc socket. */
d8182804
RA
2811static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2812 struct netlink_callback *cb,
2813 struct publication *publ)
1a1a143d
RA
2814{
2815 void *hdr;
2816 struct nlattr *attrs;
2817
2818 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
bfb3e5dd 2819 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
1a1a143d
RA
2820 if (!hdr)
2821 goto msg_cancel;
2822
2823 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2824 if (!attrs)
2825 goto genlmsg_cancel;
2826
2827 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2828 goto attr_msg_cancel;
2829 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2830 goto attr_msg_cancel;
2831 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2832 goto attr_msg_cancel;
2833 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2834 goto attr_msg_cancel;
2835
2836 nla_nest_end(skb, attrs);
2837 genlmsg_end(skb, hdr);
2838
2839 return 0;
2840
2841attr_msg_cancel:
2842 nla_nest_cancel(skb, attrs);
2843genlmsg_cancel:
2844 genlmsg_cancel(skb, hdr);
2845msg_cancel:
2846 return -EMSGSIZE;
2847}
2848
2849/* Caller should hold socket lock for the passed tipc socket. */
d8182804
RA
2850static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2851 struct netlink_callback *cb,
2852 struct tipc_sock *tsk, u32 *last_publ)
1a1a143d
RA
2853{
2854 int err;
2855 struct publication *p;
2856
2857 if (*last_publ) {
2858 list_for_each_entry(p, &tsk->publications, pport_list) {
2859 if (p->key == *last_publ)
2860 break;
2861 }
2862 if (p->key != *last_publ) {
2863 /* We never set seq or call nl_dump_check_consistent()
2864 * this means that setting prev_seq here will cause the
2865 * consistence check to fail in the netlink callback
2866 * handler. Resulting in the last NLMSG_DONE message
2867 * having the NLM_F_DUMP_INTR flag set.
2868 */
2869 cb->prev_seq = 1;
2870 *last_publ = 0;
2871 return -EPIPE;
2872 }
2873 } else {
2874 p = list_first_entry(&tsk->publications, struct publication,
2875 pport_list);
2876 }
2877
2878 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2879 err = __tipc_nl_add_sk_publ(skb, cb, p);
2880 if (err) {
2881 *last_publ = p->key;
2882 return err;
2883 }
2884 }
2885 *last_publ = 0;
2886
2887 return 0;
2888}
2889
2890int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2891{
2892 int err;
07f6c4bc 2893 u32 tsk_portid = cb->args[0];
1a1a143d
RA
2894 u32 last_publ = cb->args[1];
2895 u32 done = cb->args[2];
e05b31f4 2896 struct net *net = sock_net(skb->sk);
1a1a143d
RA
2897 struct tipc_sock *tsk;
2898
07f6c4bc 2899 if (!tsk_portid) {
1a1a143d
RA
2900 struct nlattr **attrs;
2901 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2902
2903 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2904 if (err)
2905 return err;
2906
45e093ae
RA
2907 if (!attrs[TIPC_NLA_SOCK])
2908 return -EINVAL;
2909
1a1a143d
RA
2910 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2911 attrs[TIPC_NLA_SOCK],
2912 tipc_nl_sock_policy);
2913 if (err)
2914 return err;
2915
2916 if (!sock[TIPC_NLA_SOCK_REF])
2917 return -EINVAL;
2918
07f6c4bc 2919 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
1a1a143d
RA
2920 }
2921
2922 if (done)
2923 return 0;
2924
e05b31f4 2925 tsk = tipc_sk_lookup(net, tsk_portid);
1a1a143d
RA
2926 if (!tsk)
2927 return -EINVAL;
2928
2929 lock_sock(&tsk->sk);
2930 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2931 if (!err)
2932 done = 1;
2933 release_sock(&tsk->sk);
07f6c4bc 2934 sock_put(&tsk->sk);
1a1a143d 2935
07f6c4bc 2936 cb->args[0] = tsk_portid;
1a1a143d
RA
2937 cb->args[1] = last_publ;
2938 cb->args[2] = done;
2939
2940 return skb->len;
2941}