]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/tipc/socket.c
Merge tag 'ovl-update-4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs
[mirror_ubuntu-eoan-kernel.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
39
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_MAX_PORT 0xffffffff
54 #define TIPC_MIN_PORT 1
55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
56
57 enum {
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
63 };
64
65 struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
68 };
69
70 /**
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
83 * @probing_state:
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
93 */
94 struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 uint conn_timeout;
106 atomic_t dupl_rcvcnt;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 bool group_is_open;
120 };
121
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
128 bool kern);
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
140
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
146
147 static u32 tsk_own_node(struct tipc_sock *tsk)
148 {
149 return msg_prevnode(&tsk->phdr);
150 }
151
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
153 {
154 return msg_destnode(&tsk->phdr);
155 }
156
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
158 {
159 return msg_destport(&tsk->phdr);
160 }
161
162 static bool tsk_unreliable(struct tipc_sock *tsk)
163 {
164 return msg_src_droppable(&tsk->phdr) != 0;
165 }
166
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
168 {
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
170 }
171
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
173 {
174 return msg_dest_droppable(&tsk->phdr) != 0;
175 }
176
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
178 {
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
180 }
181
182 static int tsk_importance(struct tipc_sock *tsk)
183 {
184 return msg_importance(&tsk->phdr);
185 }
186
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
188 {
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
190 return -EINVAL;
191 msg_set_importance(&tsk->phdr, (u32)imp);
192 return 0;
193 }
194
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
196 {
197 return container_of(sk, struct tipc_sock, sk);
198 }
199
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
201 {
202 return tsk->snt_unacked > tsk->snd_win;
203 }
204
205 static u16 tsk_blocks(int len)
206 {
207 return ((len / FLOWCTL_BLK_SZ) + 1);
208 }
209
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
213 */
214 static u16 tsk_adv_blocks(int len)
215 {
216 return len / FLOWCTL_BLK_SZ / 4;
217 }
218
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
222 */
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
224 {
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
227 return 1;
228 }
229
230 /**
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
232 *
233 * Caller must hold socket lock
234 */
235 static void tsk_advance_rx_queue(struct sock *sk)
236 {
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
238 }
239
240 /* tipc_sk_respond() : send response message back to sender
241 */
242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
243 {
244 u32 selector;
245 u32 dnode;
246 u32 onode = tipc_own_addr(sock_net(sk));
247
248 if (!tipc_msg_reverse(onode, &skb, err))
249 return;
250
251 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
254 }
255
256 /**
257 * tsk_rej_rx_queue - reject all buffers in socket receive queue
258 *
259 * Caller must hold socket lock
260 */
261 static void tsk_rej_rx_queue(struct sock *sk)
262 {
263 struct sk_buff *skb;
264
265 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
267 }
268
269 static bool tipc_sk_connected(struct sock *sk)
270 {
271 return sk->sk_state == TIPC_ESTABLISHED;
272 }
273
274 /* tipc_sk_type_connectionless - check if the socket is datagram socket
275 * @sk: socket
276 *
277 * Returns true if connection less, false otherwise
278 */
279 static bool tipc_sk_type_connectionless(struct sock *sk)
280 {
281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
282 }
283
284 /* tsk_peer_msg - verify if message was sent by connected port's peer
285 *
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
288 */
289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
290 {
291 struct sock *sk = &tsk->sk;
292 u32 self = tipc_own_addr(sock_net(sk));
293 u32 peer_port = tsk_peer_port(tsk);
294 u32 orig_node, peer_node;
295
296 if (unlikely(!tipc_sk_connected(sk)))
297 return false;
298
299 if (unlikely(msg_origport(msg) != peer_port))
300 return false;
301
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
304
305 if (likely(orig_node == peer_node))
306 return true;
307
308 if (!orig_node && peer_node == self)
309 return true;
310
311 if (!peer_node && orig_node == self)
312 return true;
313
314 return false;
315 }
316
317 /* tipc_set_sk_state - set the sk_state of the socket
318 * @sk: socket
319 *
320 * Caller must hold socket lock
321 *
322 * Returns 0 on success, errno otherwise
323 */
324 static int tipc_set_sk_state(struct sock *sk, int state)
325 {
326 int oldsk_state = sk->sk_state;
327 int res = -EINVAL;
328
329 switch (state) {
330 case TIPC_OPEN:
331 res = 0;
332 break;
333 case TIPC_LISTEN:
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
336 res = 0;
337 break;
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
341 res = 0;
342 break;
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
346 res = 0;
347 break;
348 }
349
350 if (!res)
351 sk->sk_state = state;
352
353 return res;
354 }
355
356 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
357 {
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
361
362 if (err)
363 return err;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
366 return -EPIPE;
367 else if (!tipc_sk_connected(sk))
368 return -ENOTCONN;
369 }
370 if (!*timeout)
371 return -EAGAIN;
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
374
375 return 0;
376 }
377
378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
379 ({ \
380 struct sock *sk_; \
381 int rc_; \
382 \
383 while ((rc_ = !(condition_))) { \
384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
385 sk_ = (sock_)->sk; \
386 rc_ = tipc_sk_sock_err((sock_), timeo_); \
387 if (rc_) \
388 break; \
389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
390 release_sock(sk_); \
391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
392 sched_annotate_sleep(); \
393 lock_sock(sk_); \
394 remove_wait_queue(sk_sleep(sk_), &wait_); \
395 } \
396 rc_; \
397 })
398
399 /**
400 * tipc_sk_create - create a TIPC socket
401 * @net: network namespace (must be default network)
402 * @sock: pre-allocated socket structure
403 * @protocol: protocol indicator (must be 0)
404 * @kern: caused by kernel or by userspace?
405 *
406 * This routine creates additional data structures used by the TIPC socket,
407 * initializes them, and links them together.
408 *
409 * Returns 0 on success, errno otherwise
410 */
411 static int tipc_sk_create(struct net *net, struct socket *sock,
412 int protocol, int kern)
413 {
414 const struct proto_ops *ops;
415 struct sock *sk;
416 struct tipc_sock *tsk;
417 struct tipc_msg *msg;
418
419 /* Validate arguments */
420 if (unlikely(protocol != 0))
421 return -EPROTONOSUPPORT;
422
423 switch (sock->type) {
424 case SOCK_STREAM:
425 ops = &stream_ops;
426 break;
427 case SOCK_SEQPACKET:
428 ops = &packet_ops;
429 break;
430 case SOCK_DGRAM:
431 case SOCK_RDM:
432 ops = &msg_ops;
433 break;
434 default:
435 return -EPROTOTYPE;
436 }
437
438 /* Allocate socket's protocol area */
439 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
440 if (sk == NULL)
441 return -ENOMEM;
442
443 tsk = tipc_sk(sk);
444 tsk->max_pkt = MAX_PKT_DEFAULT;
445 INIT_LIST_HEAD(&tsk->publications);
446 INIT_LIST_HEAD(&tsk->cong_links);
447 msg = &tsk->phdr;
448
449 /* Finish initializing socket data structures */
450 sock->ops = ops;
451 sock_init_data(sock, sk);
452 tipc_set_sk_state(sk, TIPC_OPEN);
453 if (tipc_sk_insert(tsk)) {
454 pr_warn("Socket create failed; port number exhausted\n");
455 return -EINVAL;
456 }
457
458 /* Ensure tsk is visible before we read own_addr. */
459 smp_mb();
460
461 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
462 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
463
464 msg_set_origport(msg, tsk->portid);
465 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
466 sk->sk_shutdown = 0;
467 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
468 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
469 sk->sk_data_ready = tipc_data_ready;
470 sk->sk_write_space = tipc_write_space;
471 sk->sk_destruct = tipc_sock_destruct;
472 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
473 tsk->group_is_open = true;
474 atomic_set(&tsk->dupl_rcvcnt, 0);
475
476 /* Start out with safe limits until we receive an advertised window */
477 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
478 tsk->rcv_win = tsk->snd_win;
479
480 if (tipc_sk_type_connectionless(sk)) {
481 tsk_set_unreturnable(tsk, true);
482 if (sock->type == SOCK_DGRAM)
483 tsk_set_unreliable(tsk, true);
484 }
485
486 return 0;
487 }
488
489 static void tipc_sk_callback(struct rcu_head *head)
490 {
491 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
492
493 sock_put(&tsk->sk);
494 }
495
496 /* Caller should hold socket lock for the socket. */
497 static void __tipc_shutdown(struct socket *sock, int error)
498 {
499 struct sock *sk = sock->sk;
500 struct tipc_sock *tsk = tipc_sk(sk);
501 struct net *net = sock_net(sk);
502 long timeout = CONN_TIMEOUT_DEFAULT;
503 u32 dnode = tsk_peer_node(tsk);
504 struct sk_buff *skb;
505
506 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
507 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
508 !tsk_conn_cong(tsk)));
509
510 /* Reject all unreceived messages, except on an active connection
511 * (which disconnects locally & sends a 'FIN+' to peer).
512 */
513 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
514 if (TIPC_SKB_CB(skb)->bytes_read) {
515 kfree_skb(skb);
516 continue;
517 }
518 if (!tipc_sk_type_connectionless(sk) &&
519 sk->sk_state != TIPC_DISCONNECTING) {
520 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
521 tipc_node_remove_conn(net, dnode, tsk->portid);
522 }
523 tipc_sk_respond(sk, skb, error);
524 }
525
526 if (tipc_sk_type_connectionless(sk))
527 return;
528
529 if (sk->sk_state != TIPC_DISCONNECTING) {
530 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
531 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
532 tsk_own_node(tsk), tsk_peer_port(tsk),
533 tsk->portid, error);
534 if (skb)
535 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
536 tipc_node_remove_conn(net, dnode, tsk->portid);
537 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
538 }
539 }
540
541 /**
542 * tipc_release - destroy a TIPC socket
543 * @sock: socket to destroy
544 *
545 * This routine cleans up any messages that are still queued on the socket.
546 * For DGRAM and RDM socket types, all queued messages are rejected.
547 * For SEQPACKET and STREAM socket types, the first message is rejected
548 * and any others are discarded. (If the first message on a STREAM socket
549 * is partially-read, it is discarded and the next one is rejected instead.)
550 *
551 * NOTE: Rejected messages are not necessarily returned to the sender! They
552 * are returned or discarded according to the "destination droppable" setting
553 * specified for the message by the sender.
554 *
555 * Returns 0 on success, errno otherwise
556 */
557 static int tipc_release(struct socket *sock)
558 {
559 struct sock *sk = sock->sk;
560 struct tipc_sock *tsk;
561
562 /*
563 * Exit if socket isn't fully initialized (occurs when a failed accept()
564 * releases a pre-allocated child socket that was never used)
565 */
566 if (sk == NULL)
567 return 0;
568
569 tsk = tipc_sk(sk);
570 lock_sock(sk);
571
572 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
573 sk->sk_shutdown = SHUTDOWN_MASK;
574 tipc_sk_leave(tsk);
575 tipc_sk_withdraw(tsk, 0, NULL);
576 sk_stop_timer(sk, &sk->sk_timer);
577 tipc_sk_remove(tsk);
578
579 /* Reject any messages that accumulated in backlog queue */
580 release_sock(sk);
581 tipc_dest_list_purge(&tsk->cong_links);
582 tsk->cong_link_cnt = 0;
583 call_rcu(&tsk->rcu, tipc_sk_callback);
584 sock->sk = NULL;
585
586 return 0;
587 }
588
589 /**
590 * tipc_bind - associate or disassocate TIPC name(s) with a socket
591 * @sock: socket structure
592 * @uaddr: socket address describing name(s) and desired operation
593 * @uaddr_len: size of socket address data structure
594 *
595 * Name and name sequence binding is indicated using a positive scope value;
596 * a negative scope value unbinds the specified name. Specifying no name
597 * (i.e. a socket address length of 0) unbinds all names from the socket.
598 *
599 * Returns 0 on success, errno otherwise
600 *
601 * NOTE: This routine doesn't need to take the socket lock since it doesn't
602 * access any non-constant socket information.
603 */
604 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
605 int uaddr_len)
606 {
607 struct sock *sk = sock->sk;
608 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
609 struct tipc_sock *tsk = tipc_sk(sk);
610 int res = -EINVAL;
611
612 lock_sock(sk);
613 if (unlikely(!uaddr_len)) {
614 res = tipc_sk_withdraw(tsk, 0, NULL);
615 goto exit;
616 }
617 if (tsk->group) {
618 res = -EACCES;
619 goto exit;
620 }
621 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
622 res = -EINVAL;
623 goto exit;
624 }
625 if (addr->family != AF_TIPC) {
626 res = -EAFNOSUPPORT;
627 goto exit;
628 }
629
630 if (addr->addrtype == TIPC_ADDR_NAME)
631 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
632 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
633 res = -EAFNOSUPPORT;
634 goto exit;
635 }
636
637 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
638 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
639 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
640 res = -EACCES;
641 goto exit;
642 }
643
644 res = (addr->scope >= 0) ?
645 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
646 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
647 exit:
648 release_sock(sk);
649 return res;
650 }
651
652 /**
653 * tipc_getname - get port ID of socket or peer socket
654 * @sock: socket structure
655 * @uaddr: area for returned socket address
656 * @uaddr_len: area for returned length of socket address
657 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
658 *
659 * Returns 0 on success, errno otherwise
660 *
661 * NOTE: This routine doesn't need to take the socket lock since it only
662 * accesses socket information that is unchanging (or which changes in
663 * a completely predictable manner).
664 */
665 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
666 int peer)
667 {
668 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
669 struct sock *sk = sock->sk;
670 struct tipc_sock *tsk = tipc_sk(sk);
671
672 memset(addr, 0, sizeof(*addr));
673 if (peer) {
674 if ((!tipc_sk_connected(sk)) &&
675 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
676 return -ENOTCONN;
677 addr->addr.id.ref = tsk_peer_port(tsk);
678 addr->addr.id.node = tsk_peer_node(tsk);
679 } else {
680 addr->addr.id.ref = tsk->portid;
681 addr->addr.id.node = tipc_own_addr(sock_net(sk));
682 }
683
684 addr->addrtype = TIPC_ADDR_ID;
685 addr->family = AF_TIPC;
686 addr->scope = 0;
687 addr->addr.name.domain = 0;
688
689 return sizeof(*addr);
690 }
691
692 /**
693 * tipc_poll - read and possibly block on pollmask
694 * @file: file structure associated with the socket
695 * @sock: socket for which to calculate the poll bits
696 * @wait: ???
697 *
698 * Returns pollmask value
699 *
700 * COMMENTARY:
701 * It appears that the usual socket locking mechanisms are not useful here
702 * since the pollmask info is potentially out-of-date the moment this routine
703 * exits. TCP and other protocols seem to rely on higher level poll routines
704 * to handle any preventable race conditions, so TIPC will do the same ...
705 *
706 * IMPORTANT: The fact that a read or write operation is indicated does NOT
707 * imply that the operation will succeed, merely that it should be performed
708 * and will not block.
709 */
710 static __poll_t tipc_poll(struct file *file, struct socket *sock,
711 poll_table *wait)
712 {
713 struct sock *sk = sock->sk;
714 struct tipc_sock *tsk = tipc_sk(sk);
715 __poll_t revents = 0;
716
717 sock_poll_wait(file, wait);
718
719 if (sk->sk_shutdown & RCV_SHUTDOWN)
720 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
721 if (sk->sk_shutdown == SHUTDOWN_MASK)
722 revents |= EPOLLHUP;
723
724 switch (sk->sk_state) {
725 case TIPC_ESTABLISHED:
726 case TIPC_CONNECTING:
727 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
728 revents |= EPOLLOUT;
729 /* fall thru' */
730 case TIPC_LISTEN:
731 if (!skb_queue_empty(&sk->sk_receive_queue))
732 revents |= EPOLLIN | EPOLLRDNORM;
733 break;
734 case TIPC_OPEN:
735 if (tsk->group_is_open && !tsk->cong_link_cnt)
736 revents |= EPOLLOUT;
737 if (!tipc_sk_type_connectionless(sk))
738 break;
739 if (skb_queue_empty(&sk->sk_receive_queue))
740 break;
741 revents |= EPOLLIN | EPOLLRDNORM;
742 break;
743 case TIPC_DISCONNECTING:
744 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
745 break;
746 }
747 return revents;
748 }
749
750 /**
751 * tipc_sendmcast - send multicast message
752 * @sock: socket structure
753 * @seq: destination address
754 * @msg: message to send
755 * @dlen: length of data to send
756 * @timeout: timeout to wait for wakeup
757 *
758 * Called from function tipc_sendmsg(), which has done all sanity checks
759 * Returns the number of bytes sent on success, or errno
760 */
761 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
762 struct msghdr *msg, size_t dlen, long timeout)
763 {
764 struct sock *sk = sock->sk;
765 struct tipc_sock *tsk = tipc_sk(sk);
766 struct tipc_msg *hdr = &tsk->phdr;
767 struct net *net = sock_net(sk);
768 int mtu = tipc_bcast_get_mtu(net);
769 struct tipc_mc_method *method = &tsk->mc_method;
770 struct sk_buff_head pkts;
771 struct tipc_nlist dsts;
772 int rc;
773
774 if (tsk->group)
775 return -EACCES;
776
777 /* Block or return if any destination link is congested */
778 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
779 if (unlikely(rc))
780 return rc;
781
782 /* Lookup destination nodes */
783 tipc_nlist_init(&dsts, tipc_own_addr(net));
784 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
785 seq->upper, &dsts);
786 if (!dsts.local && !dsts.remote)
787 return -EHOSTUNREACH;
788
789 /* Build message header */
790 msg_set_type(hdr, TIPC_MCAST_MSG);
791 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
792 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
793 msg_set_destport(hdr, 0);
794 msg_set_destnode(hdr, 0);
795 msg_set_nametype(hdr, seq->type);
796 msg_set_namelower(hdr, seq->lower);
797 msg_set_nameupper(hdr, seq->upper);
798
799 /* Build message as chain of buffers */
800 skb_queue_head_init(&pkts);
801 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
802
803 /* Send message if build was successful */
804 if (unlikely(rc == dlen))
805 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
806 &tsk->cong_link_cnt);
807
808 tipc_nlist_purge(&dsts);
809
810 return rc ? rc : dlen;
811 }
812
813 /**
814 * tipc_send_group_msg - send a message to a member in the group
815 * @net: network namespace
816 * @m: message to send
817 * @mb: group member
818 * @dnode: destination node
819 * @dport: destination port
820 * @dlen: total length of message data
821 */
822 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
823 struct msghdr *m, struct tipc_member *mb,
824 u32 dnode, u32 dport, int dlen)
825 {
826 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
827 struct tipc_mc_method *method = &tsk->mc_method;
828 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
829 struct tipc_msg *hdr = &tsk->phdr;
830 struct sk_buff_head pkts;
831 int mtu, rc;
832
833 /* Complete message header */
834 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
835 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
836 msg_set_destport(hdr, dport);
837 msg_set_destnode(hdr, dnode);
838 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
839
840 /* Build message as chain of buffers */
841 skb_queue_head_init(&pkts);
842 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
843 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
844 if (unlikely(rc != dlen))
845 return rc;
846
847 /* Send message */
848 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
849 if (unlikely(rc == -ELINKCONG)) {
850 tipc_dest_push(&tsk->cong_links, dnode, 0);
851 tsk->cong_link_cnt++;
852 }
853
854 /* Update send window */
855 tipc_group_update_member(mb, blks);
856
857 /* A broadcast sent within next EXPIRE period must follow same path */
858 method->rcast = true;
859 method->mandatory = true;
860 return dlen;
861 }
862
863 /**
864 * tipc_send_group_unicast - send message to a member in the group
865 * @sock: socket structure
866 * @m: message to send
867 * @dlen: total length of message data
868 * @timeout: timeout to wait for wakeup
869 *
870 * Called from function tipc_sendmsg(), which has done all sanity checks
871 * Returns the number of bytes sent on success, or errno
872 */
873 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
874 int dlen, long timeout)
875 {
876 struct sock *sk = sock->sk;
877 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
878 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
879 struct tipc_sock *tsk = tipc_sk(sk);
880 struct tipc_group *grp = tsk->group;
881 struct net *net = sock_net(sk);
882 struct tipc_member *mb = NULL;
883 u32 node, port;
884 int rc;
885
886 node = dest->addr.id.node;
887 port = dest->addr.id.ref;
888 if (!port && !node)
889 return -EHOSTUNREACH;
890
891 /* Block or return if destination link or member is congested */
892 rc = tipc_wait_for_cond(sock, &timeout,
893 !tipc_dest_find(&tsk->cong_links, node, 0) &&
894 !tipc_group_cong(grp, node, port, blks, &mb));
895 if (unlikely(rc))
896 return rc;
897
898 if (unlikely(!mb))
899 return -EHOSTUNREACH;
900
901 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
902
903 return rc ? rc : dlen;
904 }
905
906 /**
907 * tipc_send_group_anycast - send message to any member with given identity
908 * @sock: socket structure
909 * @m: message to send
910 * @dlen: total length of message data
911 * @timeout: timeout to wait for wakeup
912 *
913 * Called from function tipc_sendmsg(), which has done all sanity checks
914 * Returns the number of bytes sent on success, or errno
915 */
916 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
917 int dlen, long timeout)
918 {
919 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
920 struct sock *sk = sock->sk;
921 struct tipc_sock *tsk = tipc_sk(sk);
922 struct list_head *cong_links = &tsk->cong_links;
923 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
924 struct tipc_group *grp = tsk->group;
925 struct tipc_msg *hdr = &tsk->phdr;
926 struct tipc_member *first = NULL;
927 struct tipc_member *mbr = NULL;
928 struct net *net = sock_net(sk);
929 u32 node, port, exclude;
930 struct list_head dsts;
931 u32 type, inst, scope;
932 int lookups = 0;
933 int dstcnt, rc;
934 bool cong;
935
936 INIT_LIST_HEAD(&dsts);
937
938 type = msg_nametype(hdr);
939 inst = dest->addr.name.name.instance;
940 scope = msg_lookup_scope(hdr);
941 exclude = tipc_group_exclude(grp);
942
943 while (++lookups < 4) {
944 first = NULL;
945
946 /* Look for a non-congested destination member, if any */
947 while (1) {
948 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
949 &dstcnt, exclude, false))
950 return -EHOSTUNREACH;
951 tipc_dest_pop(&dsts, &node, &port);
952 cong = tipc_group_cong(grp, node, port, blks, &mbr);
953 if (!cong)
954 break;
955 if (mbr == first)
956 break;
957 if (!first)
958 first = mbr;
959 }
960
961 /* Start over if destination was not in member list */
962 if (unlikely(!mbr))
963 continue;
964
965 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
966 break;
967
968 /* Block or return if destination link or member is congested */
969 rc = tipc_wait_for_cond(sock, &timeout,
970 !tipc_dest_find(cong_links, node, 0) &&
971 !tipc_group_cong(grp, node, port,
972 blks, &mbr));
973 if (unlikely(rc))
974 return rc;
975
976 /* Send, unless destination disappeared while waiting */
977 if (likely(mbr))
978 break;
979 }
980
981 if (unlikely(lookups >= 4))
982 return -EHOSTUNREACH;
983
984 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
985
986 return rc ? rc : dlen;
987 }
988
989 /**
990 * tipc_send_group_bcast - send message to all members in communication group
991 * @sk: socket structure
992 * @m: message to send
993 * @dlen: total length of message data
994 * @timeout: timeout to wait for wakeup
995 *
996 * Called from function tipc_sendmsg(), which has done all sanity checks
997 * Returns the number of bytes sent on success, or errno
998 */
999 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1000 int dlen, long timeout)
1001 {
1002 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1003 struct sock *sk = sock->sk;
1004 struct net *net = sock_net(sk);
1005 struct tipc_sock *tsk = tipc_sk(sk);
1006 struct tipc_group *grp = tsk->group;
1007 struct tipc_nlist *dsts = tipc_group_dests(grp);
1008 struct tipc_mc_method *method = &tsk->mc_method;
1009 bool ack = method->mandatory && method->rcast;
1010 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1011 struct tipc_msg *hdr = &tsk->phdr;
1012 int mtu = tipc_bcast_get_mtu(net);
1013 struct sk_buff_head pkts;
1014 int rc = -EHOSTUNREACH;
1015
1016 if (!dsts->local && !dsts->remote)
1017 return -EHOSTUNREACH;
1018
1019 /* Block or return if any destination link or member is congested */
1020 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
1021 !tipc_group_bc_cong(grp, blks));
1022 if (unlikely(rc))
1023 return rc;
1024
1025 /* Complete message header */
1026 if (dest) {
1027 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1028 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1029 } else {
1030 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1031 msg_set_nameinst(hdr, 0);
1032 }
1033 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1034 msg_set_destport(hdr, 0);
1035 msg_set_destnode(hdr, 0);
1036 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
1037
1038 /* Avoid getting stuck with repeated forced replicasts */
1039 msg_set_grp_bc_ack_req(hdr, ack);
1040
1041 /* Build message as chain of buffers */
1042 skb_queue_head_init(&pkts);
1043 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1044 if (unlikely(rc != dlen))
1045 return rc;
1046
1047 /* Send message */
1048 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1049 if (unlikely(rc))
1050 return rc;
1051
1052 /* Update broadcast sequence number and send windows */
1053 tipc_group_update_bc_members(tsk->group, blks, ack);
1054
1055 /* Broadcast link is now free to choose method for next broadcast */
1056 method->mandatory = false;
1057 method->expires = jiffies;
1058
1059 return dlen;
1060 }
1061
1062 /**
1063 * tipc_send_group_mcast - send message to all members with given identity
1064 * @sock: socket structure
1065 * @m: message to send
1066 * @dlen: total length of message data
1067 * @timeout: timeout to wait for wakeup
1068 *
1069 * Called from function tipc_sendmsg(), which has done all sanity checks
1070 * Returns the number of bytes sent on success, or errno
1071 */
1072 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1073 int dlen, long timeout)
1074 {
1075 struct sock *sk = sock->sk;
1076 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1077 struct tipc_sock *tsk = tipc_sk(sk);
1078 struct tipc_group *grp = tsk->group;
1079 struct tipc_msg *hdr = &tsk->phdr;
1080 struct net *net = sock_net(sk);
1081 u32 type, inst, scope, exclude;
1082 struct list_head dsts;
1083 u32 dstcnt;
1084
1085 INIT_LIST_HEAD(&dsts);
1086
1087 type = msg_nametype(hdr);
1088 inst = dest->addr.name.name.instance;
1089 scope = msg_lookup_scope(hdr);
1090 exclude = tipc_group_exclude(grp);
1091
1092 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1093 &dstcnt, exclude, true))
1094 return -EHOSTUNREACH;
1095
1096 if (dstcnt == 1) {
1097 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1098 return tipc_send_group_unicast(sock, m, dlen, timeout);
1099 }
1100
1101 tipc_dest_list_purge(&dsts);
1102 return tipc_send_group_bcast(sock, m, dlen, timeout);
1103 }
1104
1105 /**
1106 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1107 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1108 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1109 *
1110 * Multi-threaded: parallel calls with reference to same queues may occur
1111 */
1112 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1113 struct sk_buff_head *inputq)
1114 {
1115 u32 self = tipc_own_addr(net);
1116 u32 type, lower, upper, scope;
1117 struct sk_buff *skb, *_skb;
1118 u32 portid, onode;
1119 struct sk_buff_head tmpq;
1120 struct list_head dports;
1121 struct tipc_msg *hdr;
1122 int user, mtyp, hlen;
1123 bool exact;
1124
1125 __skb_queue_head_init(&tmpq);
1126 INIT_LIST_HEAD(&dports);
1127
1128 skb = tipc_skb_peek(arrvq, &inputq->lock);
1129 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1130 hdr = buf_msg(skb);
1131 user = msg_user(hdr);
1132 mtyp = msg_type(hdr);
1133 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1134 onode = msg_orignode(hdr);
1135 type = msg_nametype(hdr);
1136
1137 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1138 spin_lock_bh(&inputq->lock);
1139 if (skb_peek(arrvq) == skb) {
1140 __skb_dequeue(arrvq);
1141 __skb_queue_tail(inputq, skb);
1142 }
1143 kfree_skb(skb);
1144 spin_unlock_bh(&inputq->lock);
1145 continue;
1146 }
1147
1148 /* Group messages require exact scope match */
1149 if (msg_in_group(hdr)) {
1150 lower = 0;
1151 upper = ~0;
1152 scope = msg_lookup_scope(hdr);
1153 exact = true;
1154 } else {
1155 /* TIPC_NODE_SCOPE means "any scope" in this context */
1156 if (onode == self)
1157 scope = TIPC_NODE_SCOPE;
1158 else
1159 scope = TIPC_CLUSTER_SCOPE;
1160 exact = false;
1161 lower = msg_namelower(hdr);
1162 upper = msg_nameupper(hdr);
1163 }
1164
1165 /* Create destination port list: */
1166 tipc_nametbl_mc_lookup(net, type, lower, upper,
1167 scope, exact, &dports);
1168
1169 /* Clone message per destination */
1170 while (tipc_dest_pop(&dports, NULL, &portid)) {
1171 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1172 if (_skb) {
1173 msg_set_destport(buf_msg(_skb), portid);
1174 __skb_queue_tail(&tmpq, _skb);
1175 continue;
1176 }
1177 pr_warn("Failed to clone mcast rcv buffer\n");
1178 }
1179 /* Append to inputq if not already done by other thread */
1180 spin_lock_bh(&inputq->lock);
1181 if (skb_peek(arrvq) == skb) {
1182 skb_queue_splice_tail_init(&tmpq, inputq);
1183 kfree_skb(__skb_dequeue(arrvq));
1184 }
1185 spin_unlock_bh(&inputq->lock);
1186 __skb_queue_purge(&tmpq);
1187 kfree_skb(skb);
1188 }
1189 tipc_sk_rcv(net, inputq);
1190 }
1191
1192 /**
1193 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1194 * @tsk: receiving socket
1195 * @skb: pointer to message buffer.
1196 */
1197 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1198 struct sk_buff_head *xmitq)
1199 {
1200 struct tipc_msg *hdr = buf_msg(skb);
1201 u32 onode = tsk_own_node(tsk);
1202 struct sock *sk = &tsk->sk;
1203 int mtyp = msg_type(hdr);
1204 bool conn_cong;
1205
1206 /* Ignore if connection cannot be validated: */
1207 if (!tsk_peer_msg(tsk, hdr))
1208 goto exit;
1209
1210 if (unlikely(msg_errcode(hdr))) {
1211 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1212 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1213 tsk_peer_port(tsk));
1214 sk->sk_state_change(sk);
1215 goto exit;
1216 }
1217
1218 tsk->probe_unacked = false;
1219
1220 if (mtyp == CONN_PROBE) {
1221 msg_set_type(hdr, CONN_PROBE_REPLY);
1222 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1223 __skb_queue_tail(xmitq, skb);
1224 return;
1225 } else if (mtyp == CONN_ACK) {
1226 conn_cong = tsk_conn_cong(tsk);
1227 tsk->snt_unacked -= msg_conn_ack(hdr);
1228 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1229 tsk->snd_win = msg_adv_win(hdr);
1230 if (conn_cong)
1231 sk->sk_write_space(sk);
1232 } else if (mtyp != CONN_PROBE_REPLY) {
1233 pr_warn("Received unknown CONN_PROTO msg\n");
1234 }
1235 exit:
1236 kfree_skb(skb);
1237 }
1238
1239 /**
1240 * tipc_sendmsg - send message in connectionless manner
1241 * @sock: socket structure
1242 * @m: message to send
1243 * @dsz: amount of user data to be sent
1244 *
1245 * Message must have an destination specified explicitly.
1246 * Used for SOCK_RDM and SOCK_DGRAM messages,
1247 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1248 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1249 *
1250 * Returns the number of bytes sent on success, or errno otherwise
1251 */
1252 static int tipc_sendmsg(struct socket *sock,
1253 struct msghdr *m, size_t dsz)
1254 {
1255 struct sock *sk = sock->sk;
1256 int ret;
1257
1258 lock_sock(sk);
1259 ret = __tipc_sendmsg(sock, m, dsz);
1260 release_sock(sk);
1261
1262 return ret;
1263 }
1264
1265 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1266 {
1267 struct sock *sk = sock->sk;
1268 struct net *net = sock_net(sk);
1269 struct tipc_sock *tsk = tipc_sk(sk);
1270 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1271 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1272 struct list_head *clinks = &tsk->cong_links;
1273 bool syn = !tipc_sk_type_connectionless(sk);
1274 struct tipc_group *grp = tsk->group;
1275 struct tipc_msg *hdr = &tsk->phdr;
1276 struct tipc_name_seq *seq;
1277 struct sk_buff_head pkts;
1278 u32 dport, dnode = 0;
1279 u32 type, inst;
1280 int mtu, rc;
1281
1282 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1283 return -EMSGSIZE;
1284
1285 if (likely(dest)) {
1286 if (unlikely(m->msg_namelen < sizeof(*dest)))
1287 return -EINVAL;
1288 if (unlikely(dest->family != AF_TIPC))
1289 return -EINVAL;
1290 }
1291
1292 if (grp) {
1293 if (!dest)
1294 return tipc_send_group_bcast(sock, m, dlen, timeout);
1295 if (dest->addrtype == TIPC_ADDR_NAME)
1296 return tipc_send_group_anycast(sock, m, dlen, timeout);
1297 if (dest->addrtype == TIPC_ADDR_ID)
1298 return tipc_send_group_unicast(sock, m, dlen, timeout);
1299 if (dest->addrtype == TIPC_ADDR_MCAST)
1300 return tipc_send_group_mcast(sock, m, dlen, timeout);
1301 return -EINVAL;
1302 }
1303
1304 if (unlikely(!dest)) {
1305 dest = &tsk->peer;
1306 if (!syn || dest->family != AF_TIPC)
1307 return -EDESTADDRREQ;
1308 }
1309
1310 if (unlikely(syn)) {
1311 if (sk->sk_state == TIPC_LISTEN)
1312 return -EPIPE;
1313 if (sk->sk_state != TIPC_OPEN)
1314 return -EISCONN;
1315 if (tsk->published)
1316 return -EOPNOTSUPP;
1317 if (dest->addrtype == TIPC_ADDR_NAME) {
1318 tsk->conn_type = dest->addr.name.name.type;
1319 tsk->conn_instance = dest->addr.name.name.instance;
1320 }
1321 }
1322
1323 seq = &dest->addr.nameseq;
1324 if (dest->addrtype == TIPC_ADDR_MCAST)
1325 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1326
1327 if (dest->addrtype == TIPC_ADDR_NAME) {
1328 type = dest->addr.name.name.type;
1329 inst = dest->addr.name.name.instance;
1330 dnode = dest->addr.name.domain;
1331 msg_set_type(hdr, TIPC_NAMED_MSG);
1332 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1333 msg_set_nametype(hdr, type);
1334 msg_set_nameinst(hdr, inst);
1335 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1336 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1337 msg_set_destnode(hdr, dnode);
1338 msg_set_destport(hdr, dport);
1339 if (unlikely(!dport && !dnode))
1340 return -EHOSTUNREACH;
1341 } else if (dest->addrtype == TIPC_ADDR_ID) {
1342 dnode = dest->addr.id.node;
1343 msg_set_type(hdr, TIPC_DIRECT_MSG);
1344 msg_set_lookup_scope(hdr, 0);
1345 msg_set_destnode(hdr, dnode);
1346 msg_set_destport(hdr, dest->addr.id.ref);
1347 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1348 } else {
1349 return -EINVAL;
1350 }
1351
1352 /* Block or return if destination link is congested */
1353 rc = tipc_wait_for_cond(sock, &timeout,
1354 !tipc_dest_find(clinks, dnode, 0));
1355 if (unlikely(rc))
1356 return rc;
1357
1358 skb_queue_head_init(&pkts);
1359 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1360 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1361 if (unlikely(rc != dlen))
1362 return rc;
1363
1364 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1365 if (unlikely(rc == -ELINKCONG)) {
1366 tipc_dest_push(clinks, dnode, 0);
1367 tsk->cong_link_cnt++;
1368 rc = 0;
1369 }
1370
1371 if (unlikely(syn && !rc))
1372 tipc_set_sk_state(sk, TIPC_CONNECTING);
1373
1374 return rc ? rc : dlen;
1375 }
1376
1377 /**
1378 * tipc_sendstream - send stream-oriented data
1379 * @sock: socket structure
1380 * @m: data to send
1381 * @dsz: total length of data to be transmitted
1382 *
1383 * Used for SOCK_STREAM data.
1384 *
1385 * Returns the number of bytes sent on success (or partial success),
1386 * or errno if no data sent
1387 */
1388 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1389 {
1390 struct sock *sk = sock->sk;
1391 int ret;
1392
1393 lock_sock(sk);
1394 ret = __tipc_sendstream(sock, m, dsz);
1395 release_sock(sk);
1396
1397 return ret;
1398 }
1399
1400 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1401 {
1402 struct sock *sk = sock->sk;
1403 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1404 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1405 struct tipc_sock *tsk = tipc_sk(sk);
1406 struct tipc_msg *hdr = &tsk->phdr;
1407 struct net *net = sock_net(sk);
1408 struct sk_buff_head pkts;
1409 u32 dnode = tsk_peer_node(tsk);
1410 int send, sent = 0;
1411 int rc = 0;
1412
1413 skb_queue_head_init(&pkts);
1414
1415 if (unlikely(dlen > INT_MAX))
1416 return -EMSGSIZE;
1417
1418 /* Handle implicit connection setup */
1419 if (unlikely(dest)) {
1420 rc = __tipc_sendmsg(sock, m, dlen);
1421 if (dlen && (dlen == rc))
1422 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1423 return rc;
1424 }
1425
1426 do {
1427 rc = tipc_wait_for_cond(sock, &timeout,
1428 (!tsk->cong_link_cnt &&
1429 !tsk_conn_cong(tsk) &&
1430 tipc_sk_connected(sk)));
1431 if (unlikely(rc))
1432 break;
1433
1434 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1435 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1436 if (unlikely(rc != send))
1437 break;
1438
1439 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1440 if (unlikely(rc == -ELINKCONG)) {
1441 tsk->cong_link_cnt = 1;
1442 rc = 0;
1443 }
1444 if (likely(!rc)) {
1445 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1446 sent += send;
1447 }
1448 } while (sent < dlen && !rc);
1449
1450 return sent ? sent : rc;
1451 }
1452
1453 /**
1454 * tipc_send_packet - send a connection-oriented message
1455 * @sock: socket structure
1456 * @m: message to send
1457 * @dsz: length of data to be transmitted
1458 *
1459 * Used for SOCK_SEQPACKET messages.
1460 *
1461 * Returns the number of bytes sent on success, or errno otherwise
1462 */
1463 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1464 {
1465 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1466 return -EMSGSIZE;
1467
1468 return tipc_sendstream(sock, m, dsz);
1469 }
1470
1471 /* tipc_sk_finish_conn - complete the setup of a connection
1472 */
1473 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1474 u32 peer_node)
1475 {
1476 struct sock *sk = &tsk->sk;
1477 struct net *net = sock_net(sk);
1478 struct tipc_msg *msg = &tsk->phdr;
1479
1480 msg_set_destnode(msg, peer_node);
1481 msg_set_destport(msg, peer_port);
1482 msg_set_type(msg, TIPC_CONN_MSG);
1483 msg_set_lookup_scope(msg, 0);
1484 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1485
1486 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1487 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1488 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1489 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1490 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1491 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1492 return;
1493
1494 /* Fall back to message based flow control */
1495 tsk->rcv_win = FLOWCTL_MSG_WIN;
1496 tsk->snd_win = FLOWCTL_MSG_WIN;
1497 }
1498
1499 /**
1500 * tipc_sk_set_orig_addr - capture sender's address for received message
1501 * @m: descriptor for message info
1502 * @hdr: received message header
1503 *
1504 * Note: Address is not captured if not requested by receiver.
1505 */
1506 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1507 {
1508 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1509 struct tipc_msg *hdr = buf_msg(skb);
1510
1511 if (!srcaddr)
1512 return;
1513
1514 srcaddr->sock.family = AF_TIPC;
1515 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1516 srcaddr->sock.scope = 0;
1517 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1518 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1519 srcaddr->sock.addr.name.domain = 0;
1520 m->msg_namelen = sizeof(struct sockaddr_tipc);
1521
1522 if (!msg_in_group(hdr))
1523 return;
1524
1525 /* Group message users may also want to know sending member's id */
1526 srcaddr->member.family = AF_TIPC;
1527 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1528 srcaddr->member.scope = 0;
1529 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1530 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1531 srcaddr->member.addr.name.domain = 0;
1532 m->msg_namelen = sizeof(*srcaddr);
1533 }
1534
1535 /**
1536 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1537 * @m: descriptor for message info
1538 * @msg: received message header
1539 * @tsk: TIPC port associated with message
1540 *
1541 * Note: Ancillary data is not captured if not requested by receiver.
1542 *
1543 * Returns 0 if successful, otherwise errno
1544 */
1545 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1546 struct tipc_sock *tsk)
1547 {
1548 u32 anc_data[3];
1549 u32 err;
1550 u32 dest_type;
1551 int has_name;
1552 int res;
1553
1554 if (likely(m->msg_controllen == 0))
1555 return 0;
1556
1557 /* Optionally capture errored message object(s) */
1558 err = msg ? msg_errcode(msg) : 0;
1559 if (unlikely(err)) {
1560 anc_data[0] = err;
1561 anc_data[1] = msg_data_sz(msg);
1562 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1563 if (res)
1564 return res;
1565 if (anc_data[1]) {
1566 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1567 msg_data(msg));
1568 if (res)
1569 return res;
1570 }
1571 }
1572
1573 /* Optionally capture message destination object */
1574 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1575 switch (dest_type) {
1576 case TIPC_NAMED_MSG:
1577 has_name = 1;
1578 anc_data[0] = msg_nametype(msg);
1579 anc_data[1] = msg_namelower(msg);
1580 anc_data[2] = msg_namelower(msg);
1581 break;
1582 case TIPC_MCAST_MSG:
1583 has_name = 1;
1584 anc_data[0] = msg_nametype(msg);
1585 anc_data[1] = msg_namelower(msg);
1586 anc_data[2] = msg_nameupper(msg);
1587 break;
1588 case TIPC_CONN_MSG:
1589 has_name = (tsk->conn_type != 0);
1590 anc_data[0] = tsk->conn_type;
1591 anc_data[1] = tsk->conn_instance;
1592 anc_data[2] = tsk->conn_instance;
1593 break;
1594 default:
1595 has_name = 0;
1596 }
1597 if (has_name) {
1598 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1599 if (res)
1600 return res;
1601 }
1602
1603 return 0;
1604 }
1605
1606 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1607 {
1608 struct sock *sk = &tsk->sk;
1609 struct net *net = sock_net(sk);
1610 struct sk_buff *skb = NULL;
1611 struct tipc_msg *msg;
1612 u32 peer_port = tsk_peer_port(tsk);
1613 u32 dnode = tsk_peer_node(tsk);
1614
1615 if (!tipc_sk_connected(sk))
1616 return;
1617 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1618 dnode, tsk_own_node(tsk), peer_port,
1619 tsk->portid, TIPC_OK);
1620 if (!skb)
1621 return;
1622 msg = buf_msg(skb);
1623 msg_set_conn_ack(msg, tsk->rcv_unacked);
1624 tsk->rcv_unacked = 0;
1625
1626 /* Adjust to and advertize the correct window limit */
1627 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1628 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1629 msg_set_adv_win(msg, tsk->rcv_win);
1630 }
1631 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1632 }
1633
1634 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1635 {
1636 struct sock *sk = sock->sk;
1637 DEFINE_WAIT(wait);
1638 long timeo = *timeop;
1639 int err = sock_error(sk);
1640
1641 if (err)
1642 return err;
1643
1644 for (;;) {
1645 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1646 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1647 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1648 err = -ENOTCONN;
1649 break;
1650 }
1651 release_sock(sk);
1652 timeo = schedule_timeout(timeo);
1653 lock_sock(sk);
1654 }
1655 err = 0;
1656 if (!skb_queue_empty(&sk->sk_receive_queue))
1657 break;
1658 err = -EAGAIN;
1659 if (!timeo)
1660 break;
1661 err = sock_intr_errno(timeo);
1662 if (signal_pending(current))
1663 break;
1664
1665 err = sock_error(sk);
1666 if (err)
1667 break;
1668 }
1669 finish_wait(sk_sleep(sk), &wait);
1670 *timeop = timeo;
1671 return err;
1672 }
1673
1674 /**
1675 * tipc_recvmsg - receive packet-oriented message
1676 * @m: descriptor for message info
1677 * @buflen: length of user buffer area
1678 * @flags: receive flags
1679 *
1680 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1681 * If the complete message doesn't fit in user area, truncate it.
1682 *
1683 * Returns size of returned message data, errno otherwise
1684 */
1685 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1686 size_t buflen, int flags)
1687 {
1688 struct sock *sk = sock->sk;
1689 bool connected = !tipc_sk_type_connectionless(sk);
1690 struct tipc_sock *tsk = tipc_sk(sk);
1691 int rc, err, hlen, dlen, copy;
1692 struct sk_buff_head xmitq;
1693 struct tipc_msg *hdr;
1694 struct sk_buff *skb;
1695 bool grp_evt;
1696 long timeout;
1697
1698 /* Catch invalid receive requests */
1699 if (unlikely(!buflen))
1700 return -EINVAL;
1701
1702 lock_sock(sk);
1703 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1704 rc = -ENOTCONN;
1705 goto exit;
1706 }
1707 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1708
1709 /* Step rcv queue to first msg with data or error; wait if necessary */
1710 do {
1711 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1712 if (unlikely(rc))
1713 goto exit;
1714 skb = skb_peek(&sk->sk_receive_queue);
1715 hdr = buf_msg(skb);
1716 dlen = msg_data_sz(hdr);
1717 hlen = msg_hdr_sz(hdr);
1718 err = msg_errcode(hdr);
1719 grp_evt = msg_is_grp_evt(hdr);
1720 if (likely(dlen || err))
1721 break;
1722 tsk_advance_rx_queue(sk);
1723 } while (1);
1724
1725 /* Collect msg meta data, including error code and rejected data */
1726 tipc_sk_set_orig_addr(m, skb);
1727 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1728 if (unlikely(rc))
1729 goto exit;
1730
1731 /* Capture data if non-error msg, otherwise just set return value */
1732 if (likely(!err)) {
1733 copy = min_t(int, dlen, buflen);
1734 if (unlikely(copy != dlen))
1735 m->msg_flags |= MSG_TRUNC;
1736 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1737 } else {
1738 copy = 0;
1739 rc = 0;
1740 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1741 rc = -ECONNRESET;
1742 }
1743 if (unlikely(rc))
1744 goto exit;
1745
1746 /* Mark message as group event if applicable */
1747 if (unlikely(grp_evt)) {
1748 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1749 m->msg_flags |= MSG_EOR;
1750 m->msg_flags |= MSG_OOB;
1751 copy = 0;
1752 }
1753
1754 /* Caption of data or error code/rejected data was successful */
1755 if (unlikely(flags & MSG_PEEK))
1756 goto exit;
1757
1758 /* Send group flow control advertisement when applicable */
1759 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1760 skb_queue_head_init(&xmitq);
1761 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1762 msg_orignode(hdr), msg_origport(hdr),
1763 &xmitq);
1764 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1765 }
1766
1767 tsk_advance_rx_queue(sk);
1768
1769 if (likely(!connected))
1770 goto exit;
1771
1772 /* Send connection flow control advertisement when applicable */
1773 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1774 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1775 tipc_sk_send_ack(tsk);
1776 exit:
1777 release_sock(sk);
1778 return rc ? rc : copy;
1779 }
1780
1781 /**
1782 * tipc_recvstream - receive stream-oriented data
1783 * @m: descriptor for message info
1784 * @buflen: total size of user buffer area
1785 * @flags: receive flags
1786 *
1787 * Used for SOCK_STREAM messages only. If not enough data is available
1788 * will optionally wait for more; never truncates data.
1789 *
1790 * Returns size of returned message data, errno otherwise
1791 */
1792 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1793 size_t buflen, int flags)
1794 {
1795 struct sock *sk = sock->sk;
1796 struct tipc_sock *tsk = tipc_sk(sk);
1797 struct sk_buff *skb;
1798 struct tipc_msg *hdr;
1799 struct tipc_skb_cb *skb_cb;
1800 bool peek = flags & MSG_PEEK;
1801 int offset, required, copy, copied = 0;
1802 int hlen, dlen, err, rc;
1803 long timeout;
1804
1805 /* Catch invalid receive attempts */
1806 if (unlikely(!buflen))
1807 return -EINVAL;
1808
1809 lock_sock(sk);
1810
1811 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1812 rc = -ENOTCONN;
1813 goto exit;
1814 }
1815 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1816 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1817
1818 do {
1819 /* Look at first msg in receive queue; wait if necessary */
1820 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1821 if (unlikely(rc))
1822 break;
1823 skb = skb_peek(&sk->sk_receive_queue);
1824 skb_cb = TIPC_SKB_CB(skb);
1825 hdr = buf_msg(skb);
1826 dlen = msg_data_sz(hdr);
1827 hlen = msg_hdr_sz(hdr);
1828 err = msg_errcode(hdr);
1829
1830 /* Discard any empty non-errored (SYN-) message */
1831 if (unlikely(!dlen && !err)) {
1832 tsk_advance_rx_queue(sk);
1833 continue;
1834 }
1835
1836 /* Collect msg meta data, incl. error code and rejected data */
1837 if (!copied) {
1838 tipc_sk_set_orig_addr(m, skb);
1839 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1840 if (rc)
1841 break;
1842 }
1843
1844 /* Copy data if msg ok, otherwise return error/partial data */
1845 if (likely(!err)) {
1846 offset = skb_cb->bytes_read;
1847 copy = min_t(int, dlen - offset, buflen - copied);
1848 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1849 if (unlikely(rc))
1850 break;
1851 copied += copy;
1852 offset += copy;
1853 if (unlikely(offset < dlen)) {
1854 if (!peek)
1855 skb_cb->bytes_read = offset;
1856 break;
1857 }
1858 } else {
1859 rc = 0;
1860 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1861 rc = -ECONNRESET;
1862 if (copied || rc)
1863 break;
1864 }
1865
1866 if (unlikely(peek))
1867 break;
1868
1869 tsk_advance_rx_queue(sk);
1870
1871 /* Send connection flow control advertisement when applicable */
1872 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1873 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1874 tipc_sk_send_ack(tsk);
1875
1876 /* Exit if all requested data or FIN/error received */
1877 if (copied == buflen || err)
1878 break;
1879
1880 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1881 exit:
1882 release_sock(sk);
1883 return copied ? copied : rc;
1884 }
1885
1886 /**
1887 * tipc_write_space - wake up thread if port congestion is released
1888 * @sk: socket
1889 */
1890 static void tipc_write_space(struct sock *sk)
1891 {
1892 struct socket_wq *wq;
1893
1894 rcu_read_lock();
1895 wq = rcu_dereference(sk->sk_wq);
1896 if (skwq_has_sleeper(wq))
1897 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1898 EPOLLWRNORM | EPOLLWRBAND);
1899 rcu_read_unlock();
1900 }
1901
1902 /**
1903 * tipc_data_ready - wake up threads to indicate messages have been received
1904 * @sk: socket
1905 * @len: the length of messages
1906 */
1907 static void tipc_data_ready(struct sock *sk)
1908 {
1909 struct socket_wq *wq;
1910
1911 rcu_read_lock();
1912 wq = rcu_dereference(sk->sk_wq);
1913 if (skwq_has_sleeper(wq))
1914 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1915 EPOLLRDNORM | EPOLLRDBAND);
1916 rcu_read_unlock();
1917 }
1918
1919 static void tipc_sock_destruct(struct sock *sk)
1920 {
1921 __skb_queue_purge(&sk->sk_receive_queue);
1922 }
1923
1924 static void tipc_sk_proto_rcv(struct sock *sk,
1925 struct sk_buff_head *inputq,
1926 struct sk_buff_head *xmitq)
1927 {
1928 struct sk_buff *skb = __skb_dequeue(inputq);
1929 struct tipc_sock *tsk = tipc_sk(sk);
1930 struct tipc_msg *hdr = buf_msg(skb);
1931 struct tipc_group *grp = tsk->group;
1932 bool wakeup = false;
1933
1934 switch (msg_user(hdr)) {
1935 case CONN_MANAGER:
1936 tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
1937 return;
1938 case SOCK_WAKEUP:
1939 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1940 tsk->cong_link_cnt--;
1941 wakeup = true;
1942 break;
1943 case GROUP_PROTOCOL:
1944 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1945 break;
1946 case TOP_SRV:
1947 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1948 hdr, inputq, xmitq);
1949 break;
1950 default:
1951 break;
1952 }
1953
1954 if (wakeup)
1955 sk->sk_write_space(sk);
1956
1957 kfree_skb(skb);
1958 }
1959
1960 /**
1961 * tipc_filter_connect - Handle incoming message for a connection-based socket
1962 * @tsk: TIPC socket
1963 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1964 *
1965 * Returns true if everything ok, false otherwise
1966 */
1967 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1968 {
1969 struct sock *sk = &tsk->sk;
1970 struct net *net = sock_net(sk);
1971 struct tipc_msg *hdr = buf_msg(skb);
1972 u32 pport = msg_origport(hdr);
1973 u32 pnode = msg_orignode(hdr);
1974
1975 if (unlikely(msg_mcast(hdr)))
1976 return false;
1977
1978 switch (sk->sk_state) {
1979 case TIPC_CONNECTING:
1980 /* Accept only ACK or NACK message */
1981 if (unlikely(!msg_connected(hdr))) {
1982 if (pport != tsk_peer_port(tsk) ||
1983 pnode != tsk_peer_node(tsk))
1984 return false;
1985
1986 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1987 sk->sk_err = ECONNREFUSED;
1988 sk->sk_state_change(sk);
1989 return true;
1990 }
1991
1992 if (unlikely(msg_errcode(hdr))) {
1993 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1994 sk->sk_err = ECONNREFUSED;
1995 sk->sk_state_change(sk);
1996 return true;
1997 }
1998
1999 if (unlikely(!msg_isdata(hdr))) {
2000 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2001 sk->sk_err = EINVAL;
2002 sk->sk_state_change(sk);
2003 return true;
2004 }
2005
2006 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
2007 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2008
2009 /* If 'ACK+' message, add to socket receive queue */
2010 if (msg_data_sz(hdr))
2011 return true;
2012
2013 /* If empty 'ACK-' message, wake up sleeping connect() */
2014 sk->sk_data_ready(sk);
2015
2016 /* 'ACK-' message is neither accepted nor rejected: */
2017 msg_set_dest_droppable(hdr, 1);
2018 return false;
2019
2020 case TIPC_OPEN:
2021 case TIPC_DISCONNECTING:
2022 break;
2023 case TIPC_LISTEN:
2024 /* Accept only SYN message */
2025 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2026 return true;
2027 break;
2028 case TIPC_ESTABLISHED:
2029 /* Accept only connection-based messages sent by peer */
2030 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2031 return false;
2032
2033 if (unlikely(msg_errcode(hdr))) {
2034 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2035 /* Let timer expire on it's own */
2036 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2037 tsk->portid);
2038 sk->sk_state_change(sk);
2039 }
2040 return true;
2041 default:
2042 pr_err("Unknown sk_state %u\n", sk->sk_state);
2043 }
2044
2045 return false;
2046 }
2047
2048 /**
2049 * rcvbuf_limit - get proper overload limit of socket receive queue
2050 * @sk: socket
2051 * @skb: message
2052 *
2053 * For connection oriented messages, irrespective of importance,
2054 * default queue limit is 2 MB.
2055 *
2056 * For connectionless messages, queue limits are based on message
2057 * importance as follows:
2058 *
2059 * TIPC_LOW_IMPORTANCE (2 MB)
2060 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2061 * TIPC_HIGH_IMPORTANCE (8 MB)
2062 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2063 *
2064 * Returns overload limit according to corresponding message importance
2065 */
2066 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2067 {
2068 struct tipc_sock *tsk = tipc_sk(sk);
2069 struct tipc_msg *hdr = buf_msg(skb);
2070
2071 if (unlikely(msg_in_group(hdr)))
2072 return sk->sk_rcvbuf;
2073
2074 if (unlikely(!msg_connected(hdr)))
2075 return sk->sk_rcvbuf << msg_importance(hdr);
2076
2077 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2078 return sk->sk_rcvbuf;
2079
2080 return FLOWCTL_MSG_LIM;
2081 }
2082
2083 /**
2084 * tipc_sk_filter_rcv - validate incoming message
2085 * @sk: socket
2086 * @skb: pointer to message.
2087 *
2088 * Enqueues message on receive queue if acceptable; optionally handles
2089 * disconnect indication for a connected socket.
2090 *
2091 * Called with socket lock already taken
2092 *
2093 */
2094 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2095 struct sk_buff_head *xmitq)
2096 {
2097 bool sk_conn = !tipc_sk_type_connectionless(sk);
2098 struct tipc_sock *tsk = tipc_sk(sk);
2099 struct tipc_group *grp = tsk->group;
2100 struct tipc_msg *hdr = buf_msg(skb);
2101 struct net *net = sock_net(sk);
2102 struct sk_buff_head inputq;
2103 int limit, err = TIPC_OK;
2104
2105 TIPC_SKB_CB(skb)->bytes_read = 0;
2106 __skb_queue_head_init(&inputq);
2107 __skb_queue_tail(&inputq, skb);
2108
2109 if (unlikely(!msg_isdata(hdr)))
2110 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2111
2112 if (unlikely(grp))
2113 tipc_group_filter_msg(grp, &inputq, xmitq);
2114
2115 /* Validate and add to receive buffer if there is space */
2116 while ((skb = __skb_dequeue(&inputq))) {
2117 hdr = buf_msg(skb);
2118 limit = rcvbuf_limit(sk, skb);
2119 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2120 (!sk_conn && msg_connected(hdr)) ||
2121 (!grp && msg_in_group(hdr)))
2122 err = TIPC_ERR_NO_PORT;
2123 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2124 atomic_inc(&sk->sk_drops);
2125 err = TIPC_ERR_OVERLOAD;
2126 }
2127
2128 if (unlikely(err)) {
2129 tipc_skb_reject(net, err, skb, xmitq);
2130 err = TIPC_OK;
2131 continue;
2132 }
2133 __skb_queue_tail(&sk->sk_receive_queue, skb);
2134 skb_set_owner_r(skb, sk);
2135 sk->sk_data_ready(sk);
2136 }
2137 }
2138
2139 /**
2140 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2141 * @sk: socket
2142 * @skb: message
2143 *
2144 * Caller must hold socket lock
2145 */
2146 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2147 {
2148 unsigned int before = sk_rmem_alloc_get(sk);
2149 struct sk_buff_head xmitq;
2150 unsigned int added;
2151
2152 __skb_queue_head_init(&xmitq);
2153
2154 tipc_sk_filter_rcv(sk, skb, &xmitq);
2155 added = sk_rmem_alloc_get(sk) - before;
2156 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2157
2158 /* Send pending response/rejected messages, if any */
2159 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2160 return 0;
2161 }
2162
2163 /**
2164 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2165 * inputq and try adding them to socket or backlog queue
2166 * @inputq: list of incoming buffers with potentially different destinations
2167 * @sk: socket where the buffers should be enqueued
2168 * @dport: port number for the socket
2169 *
2170 * Caller must hold socket lock
2171 */
2172 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2173 u32 dport, struct sk_buff_head *xmitq)
2174 {
2175 unsigned long time_limit = jiffies + 2;
2176 struct sk_buff *skb;
2177 unsigned int lim;
2178 atomic_t *dcnt;
2179 u32 onode;
2180
2181 while (skb_queue_len(inputq)) {
2182 if (unlikely(time_after_eq(jiffies, time_limit)))
2183 return;
2184
2185 skb = tipc_skb_dequeue(inputq, dport);
2186 if (unlikely(!skb))
2187 return;
2188
2189 /* Add message directly to receive queue if possible */
2190 if (!sock_owned_by_user(sk)) {
2191 tipc_sk_filter_rcv(sk, skb, xmitq);
2192 continue;
2193 }
2194
2195 /* Try backlog, compensating for double-counted bytes */
2196 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2197 if (!sk->sk_backlog.len)
2198 atomic_set(dcnt, 0);
2199 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2200 if (likely(!sk_add_backlog(sk, skb, lim)))
2201 continue;
2202
2203 /* Overload => reject message back to sender */
2204 onode = tipc_own_addr(sock_net(sk));
2205 atomic_inc(&sk->sk_drops);
2206 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2207 __skb_queue_tail(xmitq, skb);
2208 break;
2209 }
2210 }
2211
2212 /**
2213 * tipc_sk_rcv - handle a chain of incoming buffers
2214 * @inputq: buffer list containing the buffers
2215 * Consumes all buffers in list until inputq is empty
2216 * Note: may be called in multiple threads referring to the same queue
2217 */
2218 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2219 {
2220 struct sk_buff_head xmitq;
2221 u32 dnode, dport = 0;
2222 int err;
2223 struct tipc_sock *tsk;
2224 struct sock *sk;
2225 struct sk_buff *skb;
2226
2227 __skb_queue_head_init(&xmitq);
2228 while (skb_queue_len(inputq)) {
2229 dport = tipc_skb_peek_port(inputq, dport);
2230 tsk = tipc_sk_lookup(net, dport);
2231
2232 if (likely(tsk)) {
2233 sk = &tsk->sk;
2234 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2235 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2236 spin_unlock_bh(&sk->sk_lock.slock);
2237 }
2238 /* Send pending response/rejected messages, if any */
2239 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2240 sock_put(sk);
2241 continue;
2242 }
2243 /* No destination socket => dequeue skb if still there */
2244 skb = tipc_skb_dequeue(inputq, dport);
2245 if (!skb)
2246 return;
2247
2248 /* Try secondary lookup if unresolved named message */
2249 err = TIPC_ERR_NO_PORT;
2250 if (tipc_msg_lookup_dest(net, skb, &err))
2251 goto xmit;
2252
2253 /* Prepare for message rejection */
2254 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2255 continue;
2256 xmit:
2257 dnode = msg_destnode(buf_msg(skb));
2258 tipc_node_xmit_skb(net, skb, dnode, dport);
2259 }
2260 }
2261
2262 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2263 {
2264 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2265 struct sock *sk = sock->sk;
2266 int done;
2267
2268 do {
2269 int err = sock_error(sk);
2270 if (err)
2271 return err;
2272 if (!*timeo_p)
2273 return -ETIMEDOUT;
2274 if (signal_pending(current))
2275 return sock_intr_errno(*timeo_p);
2276
2277 add_wait_queue(sk_sleep(sk), &wait);
2278 done = sk_wait_event(sk, timeo_p,
2279 sk->sk_state != TIPC_CONNECTING, &wait);
2280 remove_wait_queue(sk_sleep(sk), &wait);
2281 } while (!done);
2282 return 0;
2283 }
2284
2285 /**
2286 * tipc_connect - establish a connection to another TIPC port
2287 * @sock: socket structure
2288 * @dest: socket address for destination port
2289 * @destlen: size of socket address data structure
2290 * @flags: file-related flags associated with socket
2291 *
2292 * Returns 0 on success, errno otherwise
2293 */
2294 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2295 int destlen, int flags)
2296 {
2297 struct sock *sk = sock->sk;
2298 struct tipc_sock *tsk = tipc_sk(sk);
2299 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2300 struct msghdr m = {NULL,};
2301 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2302 int previous;
2303 int res = 0;
2304
2305 if (destlen != sizeof(struct sockaddr_tipc))
2306 return -EINVAL;
2307
2308 lock_sock(sk);
2309
2310 if (tsk->group) {
2311 res = -EINVAL;
2312 goto exit;
2313 }
2314
2315 if (dst->family == AF_UNSPEC) {
2316 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2317 if (!tipc_sk_type_connectionless(sk))
2318 res = -EINVAL;
2319 goto exit;
2320 } else if (dst->family != AF_TIPC) {
2321 res = -EINVAL;
2322 }
2323 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2324 res = -EINVAL;
2325 if (res)
2326 goto exit;
2327
2328 /* DGRAM/RDM connect(), just save the destaddr */
2329 if (tipc_sk_type_connectionless(sk)) {
2330 memcpy(&tsk->peer, dest, destlen);
2331 goto exit;
2332 }
2333
2334 previous = sk->sk_state;
2335
2336 switch (sk->sk_state) {
2337 case TIPC_OPEN:
2338 /* Send a 'SYN-' to destination */
2339 m.msg_name = dest;
2340 m.msg_namelen = destlen;
2341
2342 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2343 * indicate send_msg() is never blocked.
2344 */
2345 if (!timeout)
2346 m.msg_flags = MSG_DONTWAIT;
2347
2348 res = __tipc_sendmsg(sock, &m, 0);
2349 if ((res < 0) && (res != -EWOULDBLOCK))
2350 goto exit;
2351
2352 /* Just entered TIPC_CONNECTING state; the only
2353 * difference is that return value in non-blocking
2354 * case is EINPROGRESS, rather than EALREADY.
2355 */
2356 res = -EINPROGRESS;
2357 /* fall thru' */
2358 case TIPC_CONNECTING:
2359 if (!timeout) {
2360 if (previous == TIPC_CONNECTING)
2361 res = -EALREADY;
2362 goto exit;
2363 }
2364 timeout = msecs_to_jiffies(timeout);
2365 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2366 res = tipc_wait_for_connect(sock, &timeout);
2367 break;
2368 case TIPC_ESTABLISHED:
2369 res = -EISCONN;
2370 break;
2371 default:
2372 res = -EINVAL;
2373 }
2374
2375 exit:
2376 release_sock(sk);
2377 return res;
2378 }
2379
2380 /**
2381 * tipc_listen - allow socket to listen for incoming connections
2382 * @sock: socket structure
2383 * @len: (unused)
2384 *
2385 * Returns 0 on success, errno otherwise
2386 */
2387 static int tipc_listen(struct socket *sock, int len)
2388 {
2389 struct sock *sk = sock->sk;
2390 int res;
2391
2392 lock_sock(sk);
2393 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2394 release_sock(sk);
2395
2396 return res;
2397 }
2398
2399 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2400 {
2401 struct sock *sk = sock->sk;
2402 DEFINE_WAIT(wait);
2403 int err;
2404
2405 /* True wake-one mechanism for incoming connections: only
2406 * one process gets woken up, not the 'whole herd'.
2407 * Since we do not 'race & poll' for established sockets
2408 * anymore, the common case will execute the loop only once.
2409 */
2410 for (;;) {
2411 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2412 TASK_INTERRUPTIBLE);
2413 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2414 release_sock(sk);
2415 timeo = schedule_timeout(timeo);
2416 lock_sock(sk);
2417 }
2418 err = 0;
2419 if (!skb_queue_empty(&sk->sk_receive_queue))
2420 break;
2421 err = -EAGAIN;
2422 if (!timeo)
2423 break;
2424 err = sock_intr_errno(timeo);
2425 if (signal_pending(current))
2426 break;
2427 }
2428 finish_wait(sk_sleep(sk), &wait);
2429 return err;
2430 }
2431
2432 /**
2433 * tipc_accept - wait for connection request
2434 * @sock: listening socket
2435 * @newsock: new socket that is to be connected
2436 * @flags: file-related flags associated with socket
2437 *
2438 * Returns 0 on success, errno otherwise
2439 */
2440 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2441 bool kern)
2442 {
2443 struct sock *new_sk, *sk = sock->sk;
2444 struct sk_buff *buf;
2445 struct tipc_sock *new_tsock;
2446 struct tipc_msg *msg;
2447 long timeo;
2448 int res;
2449
2450 lock_sock(sk);
2451
2452 if (sk->sk_state != TIPC_LISTEN) {
2453 res = -EINVAL;
2454 goto exit;
2455 }
2456 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2457 res = tipc_wait_for_accept(sock, timeo);
2458 if (res)
2459 goto exit;
2460
2461 buf = skb_peek(&sk->sk_receive_queue);
2462
2463 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2464 if (res)
2465 goto exit;
2466 security_sk_clone(sock->sk, new_sock->sk);
2467
2468 new_sk = new_sock->sk;
2469 new_tsock = tipc_sk(new_sk);
2470 msg = buf_msg(buf);
2471
2472 /* we lock on new_sk; but lockdep sees the lock on sk */
2473 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2474
2475 /*
2476 * Reject any stray messages received by new socket
2477 * before the socket lock was taken (very, very unlikely)
2478 */
2479 tsk_rej_rx_queue(new_sk);
2480
2481 /* Connect new socket to it's peer */
2482 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2483
2484 tsk_set_importance(new_tsock, msg_importance(msg));
2485 if (msg_named(msg)) {
2486 new_tsock->conn_type = msg_nametype(msg);
2487 new_tsock->conn_instance = msg_nameinst(msg);
2488 }
2489
2490 /*
2491 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2492 * Respond to 'SYN+' by queuing it on new socket.
2493 */
2494 if (!msg_data_sz(msg)) {
2495 struct msghdr m = {NULL,};
2496
2497 tsk_advance_rx_queue(sk);
2498 __tipc_sendstream(new_sock, &m, 0);
2499 } else {
2500 __skb_dequeue(&sk->sk_receive_queue);
2501 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2502 skb_set_owner_r(buf, new_sk);
2503 }
2504 release_sock(new_sk);
2505 exit:
2506 release_sock(sk);
2507 return res;
2508 }
2509
2510 /**
2511 * tipc_shutdown - shutdown socket connection
2512 * @sock: socket structure
2513 * @how: direction to close (must be SHUT_RDWR)
2514 *
2515 * Terminates connection (if necessary), then purges socket's receive queue.
2516 *
2517 * Returns 0 on success, errno otherwise
2518 */
2519 static int tipc_shutdown(struct socket *sock, int how)
2520 {
2521 struct sock *sk = sock->sk;
2522 int res;
2523
2524 if (how != SHUT_RDWR)
2525 return -EINVAL;
2526
2527 lock_sock(sk);
2528
2529 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2530 sk->sk_shutdown = SEND_SHUTDOWN;
2531
2532 if (sk->sk_state == TIPC_DISCONNECTING) {
2533 /* Discard any unreceived messages */
2534 __skb_queue_purge(&sk->sk_receive_queue);
2535
2536 /* Wake up anyone sleeping in poll */
2537 sk->sk_state_change(sk);
2538 res = 0;
2539 } else {
2540 res = -ENOTCONN;
2541 }
2542
2543 release_sock(sk);
2544 return res;
2545 }
2546
2547 static void tipc_sk_timeout(struct timer_list *t)
2548 {
2549 struct sock *sk = from_timer(sk, t, sk_timer);
2550 struct tipc_sock *tsk = tipc_sk(sk);
2551 u32 peer_port = tsk_peer_port(tsk);
2552 u32 peer_node = tsk_peer_node(tsk);
2553 u32 own_node = tsk_own_node(tsk);
2554 u32 own_port = tsk->portid;
2555 struct net *net = sock_net(sk);
2556 struct sk_buff *skb = NULL;
2557
2558 bh_lock_sock(sk);
2559 if (!tipc_sk_connected(sk))
2560 goto exit;
2561
2562 /* Try again later if socket is busy */
2563 if (sock_owned_by_user(sk)) {
2564 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2565 goto exit;
2566 }
2567
2568 if (tsk->probe_unacked) {
2569 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2570 tipc_node_remove_conn(net, peer_node, peer_port);
2571 sk->sk_state_change(sk);
2572 goto exit;
2573 }
2574 /* Send new probe */
2575 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2576 peer_node, own_node, peer_port, own_port,
2577 TIPC_OK);
2578 tsk->probe_unacked = true;
2579 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2580 exit:
2581 bh_unlock_sock(sk);
2582 if (skb)
2583 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2584 sock_put(sk);
2585 }
2586
2587 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2588 struct tipc_name_seq const *seq)
2589 {
2590 struct sock *sk = &tsk->sk;
2591 struct net *net = sock_net(sk);
2592 struct publication *publ;
2593 u32 key;
2594
2595 if (scope != TIPC_NODE_SCOPE)
2596 scope = TIPC_CLUSTER_SCOPE;
2597
2598 if (tipc_sk_connected(sk))
2599 return -EINVAL;
2600 key = tsk->portid + tsk->pub_count + 1;
2601 if (key == tsk->portid)
2602 return -EADDRINUSE;
2603
2604 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2605 scope, tsk->portid, key);
2606 if (unlikely(!publ))
2607 return -EINVAL;
2608
2609 list_add(&publ->binding_sock, &tsk->publications);
2610 tsk->pub_count++;
2611 tsk->published = 1;
2612 return 0;
2613 }
2614
2615 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2616 struct tipc_name_seq const *seq)
2617 {
2618 struct net *net = sock_net(&tsk->sk);
2619 struct publication *publ;
2620 struct publication *safe;
2621 int rc = -EINVAL;
2622
2623 if (scope != TIPC_NODE_SCOPE)
2624 scope = TIPC_CLUSTER_SCOPE;
2625
2626 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2627 if (seq) {
2628 if (publ->scope != scope)
2629 continue;
2630 if (publ->type != seq->type)
2631 continue;
2632 if (publ->lower != seq->lower)
2633 continue;
2634 if (publ->upper != seq->upper)
2635 break;
2636 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2637 publ->upper, publ->key);
2638 rc = 0;
2639 break;
2640 }
2641 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2642 publ->upper, publ->key);
2643 rc = 0;
2644 }
2645 if (list_empty(&tsk->publications))
2646 tsk->published = 0;
2647 return rc;
2648 }
2649
2650 /* tipc_sk_reinit: set non-zero address in all existing sockets
2651 * when we go from standalone to network mode.
2652 */
2653 void tipc_sk_reinit(struct net *net)
2654 {
2655 struct tipc_net *tn = net_generic(net, tipc_net_id);
2656 struct rhashtable_iter iter;
2657 struct tipc_sock *tsk;
2658 struct tipc_msg *msg;
2659
2660 rhashtable_walk_enter(&tn->sk_rht, &iter);
2661
2662 do {
2663 rhashtable_walk_start(&iter);
2664
2665 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2666 spin_lock_bh(&tsk->sk.sk_lock.slock);
2667 msg = &tsk->phdr;
2668 msg_set_prevnode(msg, tipc_own_addr(net));
2669 msg_set_orignode(msg, tipc_own_addr(net));
2670 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2671 }
2672
2673 rhashtable_walk_stop(&iter);
2674 } while (tsk == ERR_PTR(-EAGAIN));
2675 }
2676
2677 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2678 {
2679 struct tipc_net *tn = net_generic(net, tipc_net_id);
2680 struct tipc_sock *tsk;
2681
2682 rcu_read_lock();
2683 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2684 if (tsk)
2685 sock_hold(&tsk->sk);
2686 rcu_read_unlock();
2687
2688 return tsk;
2689 }
2690
2691 static int tipc_sk_insert(struct tipc_sock *tsk)
2692 {
2693 struct sock *sk = &tsk->sk;
2694 struct net *net = sock_net(sk);
2695 struct tipc_net *tn = net_generic(net, tipc_net_id);
2696 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2697 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2698
2699 while (remaining--) {
2700 portid++;
2701 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2702 portid = TIPC_MIN_PORT;
2703 tsk->portid = portid;
2704 sock_hold(&tsk->sk);
2705 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2706 tsk_rht_params))
2707 return 0;
2708 sock_put(&tsk->sk);
2709 }
2710
2711 return -1;
2712 }
2713
2714 static void tipc_sk_remove(struct tipc_sock *tsk)
2715 {
2716 struct sock *sk = &tsk->sk;
2717 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2718
2719 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2720 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2721 __sock_put(sk);
2722 }
2723 }
2724
2725 static const struct rhashtable_params tsk_rht_params = {
2726 .nelem_hint = 192,
2727 .head_offset = offsetof(struct tipc_sock, node),
2728 .key_offset = offsetof(struct tipc_sock, portid),
2729 .key_len = sizeof(u32), /* portid */
2730 .max_size = 1048576,
2731 .min_size = 256,
2732 .automatic_shrinking = true,
2733 };
2734
2735 int tipc_sk_rht_init(struct net *net)
2736 {
2737 struct tipc_net *tn = net_generic(net, tipc_net_id);
2738
2739 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2740 }
2741
2742 void tipc_sk_rht_destroy(struct net *net)
2743 {
2744 struct tipc_net *tn = net_generic(net, tipc_net_id);
2745
2746 /* Wait for socket readers to complete */
2747 synchronize_net();
2748
2749 rhashtable_destroy(&tn->sk_rht);
2750 }
2751
2752 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2753 {
2754 struct net *net = sock_net(&tsk->sk);
2755 struct tipc_group *grp = tsk->group;
2756 struct tipc_msg *hdr = &tsk->phdr;
2757 struct tipc_name_seq seq;
2758 int rc;
2759
2760 if (mreq->type < TIPC_RESERVED_TYPES)
2761 return -EACCES;
2762 if (mreq->scope > TIPC_NODE_SCOPE)
2763 return -EINVAL;
2764 if (grp)
2765 return -EACCES;
2766 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2767 if (!grp)
2768 return -ENOMEM;
2769 tsk->group = grp;
2770 msg_set_lookup_scope(hdr, mreq->scope);
2771 msg_set_nametype(hdr, mreq->type);
2772 msg_set_dest_droppable(hdr, true);
2773 seq.type = mreq->type;
2774 seq.lower = mreq->instance;
2775 seq.upper = seq.lower;
2776 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2777 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2778 if (rc) {
2779 tipc_group_delete(net, grp);
2780 tsk->group = NULL;
2781 return rc;
2782 }
2783 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2784 tsk->mc_method.rcast = true;
2785 tsk->mc_method.mandatory = true;
2786 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2787 return rc;
2788 }
2789
2790 static int tipc_sk_leave(struct tipc_sock *tsk)
2791 {
2792 struct net *net = sock_net(&tsk->sk);
2793 struct tipc_group *grp = tsk->group;
2794 struct tipc_name_seq seq;
2795 int scope;
2796
2797 if (!grp)
2798 return -EINVAL;
2799 tipc_group_self(grp, &seq, &scope);
2800 tipc_group_delete(net, grp);
2801 tsk->group = NULL;
2802 tipc_sk_withdraw(tsk, scope, &seq);
2803 return 0;
2804 }
2805
2806 /**
2807 * tipc_setsockopt - set socket option
2808 * @sock: socket structure
2809 * @lvl: option level
2810 * @opt: option identifier
2811 * @ov: pointer to new option value
2812 * @ol: length of option value
2813 *
2814 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2815 * (to ease compatibility).
2816 *
2817 * Returns 0 on success, errno otherwise
2818 */
2819 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2820 char __user *ov, unsigned int ol)
2821 {
2822 struct sock *sk = sock->sk;
2823 struct tipc_sock *tsk = tipc_sk(sk);
2824 struct tipc_group_req mreq;
2825 u32 value = 0;
2826 int res = 0;
2827
2828 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2829 return 0;
2830 if (lvl != SOL_TIPC)
2831 return -ENOPROTOOPT;
2832
2833 switch (opt) {
2834 case TIPC_IMPORTANCE:
2835 case TIPC_SRC_DROPPABLE:
2836 case TIPC_DEST_DROPPABLE:
2837 case TIPC_CONN_TIMEOUT:
2838 if (ol < sizeof(value))
2839 return -EINVAL;
2840 if (get_user(value, (u32 __user *)ov))
2841 return -EFAULT;
2842 break;
2843 case TIPC_GROUP_JOIN:
2844 if (ol < sizeof(mreq))
2845 return -EINVAL;
2846 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2847 return -EFAULT;
2848 break;
2849 default:
2850 if (ov || ol)
2851 return -EINVAL;
2852 }
2853
2854 lock_sock(sk);
2855
2856 switch (opt) {
2857 case TIPC_IMPORTANCE:
2858 res = tsk_set_importance(tsk, value);
2859 break;
2860 case TIPC_SRC_DROPPABLE:
2861 if (sock->type != SOCK_STREAM)
2862 tsk_set_unreliable(tsk, value);
2863 else
2864 res = -ENOPROTOOPT;
2865 break;
2866 case TIPC_DEST_DROPPABLE:
2867 tsk_set_unreturnable(tsk, value);
2868 break;
2869 case TIPC_CONN_TIMEOUT:
2870 tipc_sk(sk)->conn_timeout = value;
2871 break;
2872 case TIPC_MCAST_BROADCAST:
2873 tsk->mc_method.rcast = false;
2874 tsk->mc_method.mandatory = true;
2875 break;
2876 case TIPC_MCAST_REPLICAST:
2877 tsk->mc_method.rcast = true;
2878 tsk->mc_method.mandatory = true;
2879 break;
2880 case TIPC_GROUP_JOIN:
2881 res = tipc_sk_join(tsk, &mreq);
2882 break;
2883 case TIPC_GROUP_LEAVE:
2884 res = tipc_sk_leave(tsk);
2885 break;
2886 default:
2887 res = -EINVAL;
2888 }
2889
2890 release_sock(sk);
2891
2892 return res;
2893 }
2894
2895 /**
2896 * tipc_getsockopt - get socket option
2897 * @sock: socket structure
2898 * @lvl: option level
2899 * @opt: option identifier
2900 * @ov: receptacle for option value
2901 * @ol: receptacle for length of option value
2902 *
2903 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2904 * (to ease compatibility).
2905 *
2906 * Returns 0 on success, errno otherwise
2907 */
2908 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2909 char __user *ov, int __user *ol)
2910 {
2911 struct sock *sk = sock->sk;
2912 struct tipc_sock *tsk = tipc_sk(sk);
2913 struct tipc_name_seq seq;
2914 int len, scope;
2915 u32 value;
2916 int res;
2917
2918 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2919 return put_user(0, ol);
2920 if (lvl != SOL_TIPC)
2921 return -ENOPROTOOPT;
2922 res = get_user(len, ol);
2923 if (res)
2924 return res;
2925
2926 lock_sock(sk);
2927
2928 switch (opt) {
2929 case TIPC_IMPORTANCE:
2930 value = tsk_importance(tsk);
2931 break;
2932 case TIPC_SRC_DROPPABLE:
2933 value = tsk_unreliable(tsk);
2934 break;
2935 case TIPC_DEST_DROPPABLE:
2936 value = tsk_unreturnable(tsk);
2937 break;
2938 case TIPC_CONN_TIMEOUT:
2939 value = tsk->conn_timeout;
2940 /* no need to set "res", since already 0 at this point */
2941 break;
2942 case TIPC_NODE_RECVQ_DEPTH:
2943 value = 0; /* was tipc_queue_size, now obsolete */
2944 break;
2945 case TIPC_SOCK_RECVQ_DEPTH:
2946 value = skb_queue_len(&sk->sk_receive_queue);
2947 break;
2948 case TIPC_GROUP_JOIN:
2949 seq.type = 0;
2950 if (tsk->group)
2951 tipc_group_self(tsk->group, &seq, &scope);
2952 value = seq.type;
2953 break;
2954 default:
2955 res = -EINVAL;
2956 }
2957
2958 release_sock(sk);
2959
2960 if (res)
2961 return res; /* "get" failed */
2962
2963 if (len < sizeof(value))
2964 return -EINVAL;
2965
2966 if (copy_to_user(ov, &value, sizeof(value)))
2967 return -EFAULT;
2968
2969 return put_user(sizeof(value), ol);
2970 }
2971
2972 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2973 {
2974 struct net *net = sock_net(sock->sk);
2975 struct tipc_sioc_nodeid_req nr = {0};
2976 struct tipc_sioc_ln_req lnr;
2977 void __user *argp = (void __user *)arg;
2978
2979 switch (cmd) {
2980 case SIOCGETLINKNAME:
2981 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2982 return -EFAULT;
2983 if (!tipc_node_get_linkname(net,
2984 lnr.bearer_id & 0xffff, lnr.peer,
2985 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2986 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2987 return -EFAULT;
2988 return 0;
2989 }
2990 return -EADDRNOTAVAIL;
2991 case SIOCGETNODEID:
2992 if (copy_from_user(&nr, argp, sizeof(nr)))
2993 return -EFAULT;
2994 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
2995 return -EADDRNOTAVAIL;
2996 if (copy_to_user(argp, &nr, sizeof(nr)))
2997 return -EFAULT;
2998 return 0;
2999 default:
3000 return -ENOIOCTLCMD;
3001 }
3002 }
3003
3004 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3005 {
3006 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3007 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3008 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3009
3010 tsk1->peer.family = AF_TIPC;
3011 tsk1->peer.addrtype = TIPC_ADDR_ID;
3012 tsk1->peer.scope = TIPC_NODE_SCOPE;
3013 tsk1->peer.addr.id.ref = tsk2->portid;
3014 tsk1->peer.addr.id.node = onode;
3015 tsk2->peer.family = AF_TIPC;
3016 tsk2->peer.addrtype = TIPC_ADDR_ID;
3017 tsk2->peer.scope = TIPC_NODE_SCOPE;
3018 tsk2->peer.addr.id.ref = tsk1->portid;
3019 tsk2->peer.addr.id.node = onode;
3020
3021 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3022 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3023 return 0;
3024 }
3025
3026 /* Protocol switches for the various types of TIPC sockets */
3027
3028 static const struct proto_ops msg_ops = {
3029 .owner = THIS_MODULE,
3030 .family = AF_TIPC,
3031 .release = tipc_release,
3032 .bind = tipc_bind,
3033 .connect = tipc_connect,
3034 .socketpair = tipc_socketpair,
3035 .accept = sock_no_accept,
3036 .getname = tipc_getname,
3037 .poll = tipc_poll,
3038 .ioctl = tipc_ioctl,
3039 .listen = sock_no_listen,
3040 .shutdown = tipc_shutdown,
3041 .setsockopt = tipc_setsockopt,
3042 .getsockopt = tipc_getsockopt,
3043 .sendmsg = tipc_sendmsg,
3044 .recvmsg = tipc_recvmsg,
3045 .mmap = sock_no_mmap,
3046 .sendpage = sock_no_sendpage
3047 };
3048
3049 static const struct proto_ops packet_ops = {
3050 .owner = THIS_MODULE,
3051 .family = AF_TIPC,
3052 .release = tipc_release,
3053 .bind = tipc_bind,
3054 .connect = tipc_connect,
3055 .socketpair = tipc_socketpair,
3056 .accept = tipc_accept,
3057 .getname = tipc_getname,
3058 .poll = tipc_poll,
3059 .ioctl = tipc_ioctl,
3060 .listen = tipc_listen,
3061 .shutdown = tipc_shutdown,
3062 .setsockopt = tipc_setsockopt,
3063 .getsockopt = tipc_getsockopt,
3064 .sendmsg = tipc_send_packet,
3065 .recvmsg = tipc_recvmsg,
3066 .mmap = sock_no_mmap,
3067 .sendpage = sock_no_sendpage
3068 };
3069
3070 static const struct proto_ops stream_ops = {
3071 .owner = THIS_MODULE,
3072 .family = AF_TIPC,
3073 .release = tipc_release,
3074 .bind = tipc_bind,
3075 .connect = tipc_connect,
3076 .socketpair = tipc_socketpair,
3077 .accept = tipc_accept,
3078 .getname = tipc_getname,
3079 .poll = tipc_poll,
3080 .ioctl = tipc_ioctl,
3081 .listen = tipc_listen,
3082 .shutdown = tipc_shutdown,
3083 .setsockopt = tipc_setsockopt,
3084 .getsockopt = tipc_getsockopt,
3085 .sendmsg = tipc_sendstream,
3086 .recvmsg = tipc_recvstream,
3087 .mmap = sock_no_mmap,
3088 .sendpage = sock_no_sendpage
3089 };
3090
3091 static const struct net_proto_family tipc_family_ops = {
3092 .owner = THIS_MODULE,
3093 .family = AF_TIPC,
3094 .create = tipc_sk_create
3095 };
3096
3097 static struct proto tipc_proto = {
3098 .name = "TIPC",
3099 .owner = THIS_MODULE,
3100 .obj_size = sizeof(struct tipc_sock),
3101 .sysctl_rmem = sysctl_tipc_rmem
3102 };
3103
3104 /**
3105 * tipc_socket_init - initialize TIPC socket interface
3106 *
3107 * Returns 0 on success, errno otherwise
3108 */
3109 int tipc_socket_init(void)
3110 {
3111 int res;
3112
3113 res = proto_register(&tipc_proto, 1);
3114 if (res) {
3115 pr_err("Failed to register TIPC protocol type\n");
3116 goto out;
3117 }
3118
3119 res = sock_register(&tipc_family_ops);
3120 if (res) {
3121 pr_err("Failed to register TIPC socket type\n");
3122 proto_unregister(&tipc_proto);
3123 goto out;
3124 }
3125 out:
3126 return res;
3127 }
3128
3129 /**
3130 * tipc_socket_stop - stop TIPC socket interface
3131 */
3132 void tipc_socket_stop(void)
3133 {
3134 sock_unregister(tipc_family_ops.family);
3135 proto_unregister(&tipc_proto);
3136 }
3137
3138 /* Caller should hold socket lock for the passed tipc socket. */
3139 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3140 {
3141 u32 peer_node;
3142 u32 peer_port;
3143 struct nlattr *nest;
3144
3145 peer_node = tsk_peer_node(tsk);
3146 peer_port = tsk_peer_port(tsk);
3147
3148 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3149
3150 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3151 goto msg_full;
3152 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3153 goto msg_full;
3154
3155 if (tsk->conn_type != 0) {
3156 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3157 goto msg_full;
3158 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3159 goto msg_full;
3160 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3161 goto msg_full;
3162 }
3163 nla_nest_end(skb, nest);
3164
3165 return 0;
3166
3167 msg_full:
3168 nla_nest_cancel(skb, nest);
3169
3170 return -EMSGSIZE;
3171 }
3172
3173 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3174 *tsk)
3175 {
3176 struct net *net = sock_net(skb->sk);
3177 struct sock *sk = &tsk->sk;
3178
3179 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3180 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3181 return -EMSGSIZE;
3182
3183 if (tipc_sk_connected(sk)) {
3184 if (__tipc_nl_add_sk_con(skb, tsk))
3185 return -EMSGSIZE;
3186 } else if (!list_empty(&tsk->publications)) {
3187 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3188 return -EMSGSIZE;
3189 }
3190 return 0;
3191 }
3192
3193 /* Caller should hold socket lock for the passed tipc socket. */
3194 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3195 struct tipc_sock *tsk)
3196 {
3197 struct nlattr *attrs;
3198 void *hdr;
3199
3200 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3201 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3202 if (!hdr)
3203 goto msg_cancel;
3204
3205 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3206 if (!attrs)
3207 goto genlmsg_cancel;
3208
3209 if (__tipc_nl_add_sk_info(skb, tsk))
3210 goto attr_msg_cancel;
3211
3212 nla_nest_end(skb, attrs);
3213 genlmsg_end(skb, hdr);
3214
3215 return 0;
3216
3217 attr_msg_cancel:
3218 nla_nest_cancel(skb, attrs);
3219 genlmsg_cancel:
3220 genlmsg_cancel(skb, hdr);
3221 msg_cancel:
3222 return -EMSGSIZE;
3223 }
3224
3225 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3226 int (*skb_handler)(struct sk_buff *skb,
3227 struct netlink_callback *cb,
3228 struct tipc_sock *tsk))
3229 {
3230 struct net *net = sock_net(skb->sk);
3231 struct tipc_net *tn = tipc_net(net);
3232 const struct bucket_table *tbl;
3233 u32 prev_portid = cb->args[1];
3234 u32 tbl_id = cb->args[0];
3235 struct rhash_head *pos;
3236 struct tipc_sock *tsk;
3237 int err;
3238
3239 rcu_read_lock();
3240 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
3241 for (; tbl_id < tbl->size; tbl_id++) {
3242 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
3243 spin_lock_bh(&tsk->sk.sk_lock.slock);
3244 if (prev_portid && prev_portid != tsk->portid) {
3245 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3246 continue;
3247 }
3248
3249 err = skb_handler(skb, cb, tsk);
3250 if (err) {
3251 prev_portid = tsk->portid;
3252 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3253 goto out;
3254 }
3255
3256 prev_portid = 0;
3257 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3258 }
3259 }
3260 out:
3261 rcu_read_unlock();
3262 cb->args[0] = tbl_id;
3263 cb->args[1] = prev_portid;
3264
3265 return skb->len;
3266 }
3267 EXPORT_SYMBOL(tipc_nl_sk_walk);
3268
3269 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3270 struct tipc_sock *tsk, u32 sk_filter_state,
3271 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3272 {
3273 struct sock *sk = &tsk->sk;
3274 struct nlattr *attrs;
3275 struct nlattr *stat;
3276
3277 /*filter response w.r.t sk_state*/
3278 if (!(sk_filter_state & (1 << sk->sk_state)))
3279 return 0;
3280
3281 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3282 if (!attrs)
3283 goto msg_cancel;
3284
3285 if (__tipc_nl_add_sk_info(skb, tsk))
3286 goto attr_msg_cancel;
3287
3288 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3289 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3290 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3291 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3292 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3293 sock_i_uid(sk))) ||
3294 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3295 tipc_diag_gen_cookie(sk),
3296 TIPC_NLA_SOCK_PAD))
3297 goto attr_msg_cancel;
3298
3299 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3300 if (!stat)
3301 goto attr_msg_cancel;
3302
3303 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3304 skb_queue_len(&sk->sk_receive_queue)) ||
3305 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3306 skb_queue_len(&sk->sk_write_queue)) ||
3307 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3308 atomic_read(&sk->sk_drops)))
3309 goto stat_msg_cancel;
3310
3311 if (tsk->cong_link_cnt &&
3312 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3313 goto stat_msg_cancel;
3314
3315 if (tsk_conn_cong(tsk) &&
3316 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3317 goto stat_msg_cancel;
3318
3319 nla_nest_end(skb, stat);
3320
3321 if (tsk->group)
3322 if (tipc_group_fill_sock_diag(tsk->group, skb))
3323 goto stat_msg_cancel;
3324
3325 nla_nest_end(skb, attrs);
3326
3327 return 0;
3328
3329 stat_msg_cancel:
3330 nla_nest_cancel(skb, stat);
3331 attr_msg_cancel:
3332 nla_nest_cancel(skb, attrs);
3333 msg_cancel:
3334 return -EMSGSIZE;
3335 }
3336 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3337
3338 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3339 {
3340 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3341 }
3342
3343 /* Caller should hold socket lock for the passed tipc socket. */
3344 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3345 struct netlink_callback *cb,
3346 struct publication *publ)
3347 {
3348 void *hdr;
3349 struct nlattr *attrs;
3350
3351 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3352 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3353 if (!hdr)
3354 goto msg_cancel;
3355
3356 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3357 if (!attrs)
3358 goto genlmsg_cancel;
3359
3360 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3361 goto attr_msg_cancel;
3362 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3363 goto attr_msg_cancel;
3364 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3365 goto attr_msg_cancel;
3366 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3367 goto attr_msg_cancel;
3368
3369 nla_nest_end(skb, attrs);
3370 genlmsg_end(skb, hdr);
3371
3372 return 0;
3373
3374 attr_msg_cancel:
3375 nla_nest_cancel(skb, attrs);
3376 genlmsg_cancel:
3377 genlmsg_cancel(skb, hdr);
3378 msg_cancel:
3379 return -EMSGSIZE;
3380 }
3381
3382 /* Caller should hold socket lock for the passed tipc socket. */
3383 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3384 struct netlink_callback *cb,
3385 struct tipc_sock *tsk, u32 *last_publ)
3386 {
3387 int err;
3388 struct publication *p;
3389
3390 if (*last_publ) {
3391 list_for_each_entry(p, &tsk->publications, binding_sock) {
3392 if (p->key == *last_publ)
3393 break;
3394 }
3395 if (p->key != *last_publ) {
3396 /* We never set seq or call nl_dump_check_consistent()
3397 * this means that setting prev_seq here will cause the
3398 * consistence check to fail in the netlink callback
3399 * handler. Resulting in the last NLMSG_DONE message
3400 * having the NLM_F_DUMP_INTR flag set.
3401 */
3402 cb->prev_seq = 1;
3403 *last_publ = 0;
3404 return -EPIPE;
3405 }
3406 } else {
3407 p = list_first_entry(&tsk->publications, struct publication,
3408 binding_sock);
3409 }
3410
3411 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3412 err = __tipc_nl_add_sk_publ(skb, cb, p);
3413 if (err) {
3414 *last_publ = p->key;
3415 return err;
3416 }
3417 }
3418 *last_publ = 0;
3419
3420 return 0;
3421 }
3422
3423 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3424 {
3425 int err;
3426 u32 tsk_portid = cb->args[0];
3427 u32 last_publ = cb->args[1];
3428 u32 done = cb->args[2];
3429 struct net *net = sock_net(skb->sk);
3430 struct tipc_sock *tsk;
3431
3432 if (!tsk_portid) {
3433 struct nlattr **attrs;
3434 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3435
3436 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3437 if (err)
3438 return err;
3439
3440 if (!attrs[TIPC_NLA_SOCK])
3441 return -EINVAL;
3442
3443 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3444 attrs[TIPC_NLA_SOCK],
3445 tipc_nl_sock_policy, NULL);
3446 if (err)
3447 return err;
3448
3449 if (!sock[TIPC_NLA_SOCK_REF])
3450 return -EINVAL;
3451
3452 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3453 }
3454
3455 if (done)
3456 return 0;
3457
3458 tsk = tipc_sk_lookup(net, tsk_portid);
3459 if (!tsk)
3460 return -EINVAL;
3461
3462 lock_sock(&tsk->sk);
3463 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3464 if (!err)
3465 done = 1;
3466 release_sock(&tsk->sk);
3467 sock_put(&tsk->sk);
3468
3469 cb->args[0] = tsk_portid;
3470 cb->args[1] = last_publ;
3471 cb->args[2] = done;
3472
3473 return skb->len;
3474 }