]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/tipc/socket.c
tipc: fix one byte leak in tipc_sk_set_orig_addr()
[mirror_ubuntu-bionic-kernel.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
39
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_MAX_PORT 0xffffffff
54 #define TIPC_MIN_PORT 1
55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
56
57 enum {
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
63 };
64
65 struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
68 };
69
70 /**
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
83 * @probing_state:
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
93 */
94 struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 uint conn_timeout;
106 atomic_t dupl_rcvcnt;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 };
120
121 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
122 static void tipc_data_ready(struct sock *sk);
123 static void tipc_write_space(struct sock *sk);
124 static void tipc_sock_destruct(struct sock *sk);
125 static int tipc_release(struct socket *sock);
126 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
127 bool kern);
128 static void tipc_sk_timeout(struct timer_list *t);
129 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
130 struct tipc_name_seq const *seq);
131 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
132 struct tipc_name_seq const *seq);
133 static int tipc_sk_leave(struct tipc_sock *tsk);
134 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
135 static int tipc_sk_insert(struct tipc_sock *tsk);
136 static void tipc_sk_remove(struct tipc_sock *tsk);
137 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
138 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
139
140 static const struct proto_ops packet_ops;
141 static const struct proto_ops stream_ops;
142 static const struct proto_ops msg_ops;
143 static struct proto tipc_proto;
144 static const struct rhashtable_params tsk_rht_params;
145
146 static u32 tsk_own_node(struct tipc_sock *tsk)
147 {
148 return msg_prevnode(&tsk->phdr);
149 }
150
151 static u32 tsk_peer_node(struct tipc_sock *tsk)
152 {
153 return msg_destnode(&tsk->phdr);
154 }
155
156 static u32 tsk_peer_port(struct tipc_sock *tsk)
157 {
158 return msg_destport(&tsk->phdr);
159 }
160
161 static bool tsk_unreliable(struct tipc_sock *tsk)
162 {
163 return msg_src_droppable(&tsk->phdr) != 0;
164 }
165
166 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
167 {
168 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
169 }
170
171 static bool tsk_unreturnable(struct tipc_sock *tsk)
172 {
173 return msg_dest_droppable(&tsk->phdr) != 0;
174 }
175
176 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
177 {
178 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
179 }
180
181 static int tsk_importance(struct tipc_sock *tsk)
182 {
183 return msg_importance(&tsk->phdr);
184 }
185
186 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
187 {
188 if (imp > TIPC_CRITICAL_IMPORTANCE)
189 return -EINVAL;
190 msg_set_importance(&tsk->phdr, (u32)imp);
191 return 0;
192 }
193
194 static struct tipc_sock *tipc_sk(const struct sock *sk)
195 {
196 return container_of(sk, struct tipc_sock, sk);
197 }
198
199 static bool tsk_conn_cong(struct tipc_sock *tsk)
200 {
201 return tsk->snt_unacked > tsk->snd_win;
202 }
203
204 static u16 tsk_blocks(int len)
205 {
206 return ((len / FLOWCTL_BLK_SZ) + 1);
207 }
208
209 /* tsk_blocks(): translate a buffer size in bytes to number of
210 * advertisable blocks, taking into account the ratio truesize(len)/len
211 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
212 */
213 static u16 tsk_adv_blocks(int len)
214 {
215 return len / FLOWCTL_BLK_SZ / 4;
216 }
217
218 /* tsk_inc(): increment counter for sent or received data
219 * - If block based flow control is not supported by peer we
220 * fall back to message based ditto, incrementing the counter
221 */
222 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
223 {
224 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
225 return ((msglen / FLOWCTL_BLK_SZ) + 1);
226 return 1;
227 }
228
229 /**
230 * tsk_advance_rx_queue - discard first buffer in socket receive queue
231 *
232 * Caller must hold socket lock
233 */
234 static void tsk_advance_rx_queue(struct sock *sk)
235 {
236 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
237 }
238
239 /* tipc_sk_respond() : send response message back to sender
240 */
241 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
242 {
243 u32 selector;
244 u32 dnode;
245 u32 onode = tipc_own_addr(sock_net(sk));
246
247 if (!tipc_msg_reverse(onode, &skb, err))
248 return;
249
250 dnode = msg_destnode(buf_msg(skb));
251 selector = msg_origport(buf_msg(skb));
252 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
253 }
254
255 /**
256 * tsk_rej_rx_queue - reject all buffers in socket receive queue
257 *
258 * Caller must hold socket lock
259 */
260 static void tsk_rej_rx_queue(struct sock *sk)
261 {
262 struct sk_buff *skb;
263
264 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
265 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
266 }
267
268 static bool tipc_sk_connected(struct sock *sk)
269 {
270 return sk->sk_state == TIPC_ESTABLISHED;
271 }
272
273 /* tipc_sk_type_connectionless - check if the socket is datagram socket
274 * @sk: socket
275 *
276 * Returns true if connection less, false otherwise
277 */
278 static bool tipc_sk_type_connectionless(struct sock *sk)
279 {
280 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
281 }
282
283 /* tsk_peer_msg - verify if message was sent by connected port's peer
284 *
285 * Handles cases where the node's network address has changed from
286 * the default of <0.0.0> to its configured setting.
287 */
288 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
289 {
290 struct sock *sk = &tsk->sk;
291 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
292 u32 peer_port = tsk_peer_port(tsk);
293 u32 orig_node;
294 u32 peer_node;
295
296 if (unlikely(!tipc_sk_connected(sk)))
297 return false;
298
299 if (unlikely(msg_origport(msg) != peer_port))
300 return false;
301
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
304
305 if (likely(orig_node == peer_node))
306 return true;
307
308 if (!orig_node && (peer_node == tn->own_addr))
309 return true;
310
311 if (!peer_node && (orig_node == tn->own_addr))
312 return true;
313
314 return false;
315 }
316
317 /* tipc_set_sk_state - set the sk_state of the socket
318 * @sk: socket
319 *
320 * Caller must hold socket lock
321 *
322 * Returns 0 on success, errno otherwise
323 */
324 static int tipc_set_sk_state(struct sock *sk, int state)
325 {
326 int oldsk_state = sk->sk_state;
327 int res = -EINVAL;
328
329 switch (state) {
330 case TIPC_OPEN:
331 res = 0;
332 break;
333 case TIPC_LISTEN:
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
336 res = 0;
337 break;
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
341 res = 0;
342 break;
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
346 res = 0;
347 break;
348 }
349
350 if (!res)
351 sk->sk_state = state;
352
353 return res;
354 }
355
356 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
357 {
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
361
362 if (err)
363 return err;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
366 return -EPIPE;
367 else if (!tipc_sk_connected(sk))
368 return -ENOTCONN;
369 }
370 if (!*timeout)
371 return -EAGAIN;
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
374
375 return 0;
376 }
377
378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
379 ({ \
380 struct sock *sk_; \
381 int rc_; \
382 \
383 while ((rc_ = !(condition_))) { \
384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
385 sk_ = (sock_)->sk; \
386 rc_ = tipc_sk_sock_err((sock_), timeo_); \
387 if (rc_) \
388 break; \
389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
390 release_sock(sk_); \
391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
392 sched_annotate_sleep(); \
393 lock_sock(sk_); \
394 remove_wait_queue(sk_sleep(sk_), &wait_); \
395 } \
396 rc_; \
397 })
398
399 /**
400 * tipc_sk_create - create a TIPC socket
401 * @net: network namespace (must be default network)
402 * @sock: pre-allocated socket structure
403 * @protocol: protocol indicator (must be 0)
404 * @kern: caused by kernel or by userspace?
405 *
406 * This routine creates additional data structures used by the TIPC socket,
407 * initializes them, and links them together.
408 *
409 * Returns 0 on success, errno otherwise
410 */
411 static int tipc_sk_create(struct net *net, struct socket *sock,
412 int protocol, int kern)
413 {
414 struct tipc_net *tn;
415 const struct proto_ops *ops;
416 struct sock *sk;
417 struct tipc_sock *tsk;
418 struct tipc_msg *msg;
419
420 /* Validate arguments */
421 if (unlikely(protocol != 0))
422 return -EPROTONOSUPPORT;
423
424 switch (sock->type) {
425 case SOCK_STREAM:
426 ops = &stream_ops;
427 break;
428 case SOCK_SEQPACKET:
429 ops = &packet_ops;
430 break;
431 case SOCK_DGRAM:
432 case SOCK_RDM:
433 ops = &msg_ops;
434 break;
435 default:
436 return -EPROTOTYPE;
437 }
438
439 /* Allocate socket's protocol area */
440 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
441 if (sk == NULL)
442 return -ENOMEM;
443
444 tsk = tipc_sk(sk);
445 tsk->max_pkt = MAX_PKT_DEFAULT;
446 INIT_LIST_HEAD(&tsk->publications);
447 INIT_LIST_HEAD(&tsk->cong_links);
448 msg = &tsk->phdr;
449 tn = net_generic(sock_net(sk), tipc_net_id);
450
451 /* Finish initializing socket data structures */
452 sock->ops = ops;
453 sock_init_data(sock, sk);
454 tipc_set_sk_state(sk, TIPC_OPEN);
455 if (tipc_sk_insert(tsk)) {
456 pr_warn("Socket create failed; port number exhausted\n");
457 return -EINVAL;
458 }
459
460 /* Ensure tsk is visible before we read own_addr. */
461 smp_mb();
462
463 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
464 NAMED_H_SIZE, 0);
465
466 msg_set_origport(msg, tsk->portid);
467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
468 sk->sk_shutdown = 0;
469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
470 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
471 sk->sk_data_ready = tipc_data_ready;
472 sk->sk_write_space = tipc_write_space;
473 sk->sk_destruct = tipc_sock_destruct;
474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
475 atomic_set(&tsk->dupl_rcvcnt, 0);
476
477 /* Start out with safe limits until we receive an advertised window */
478 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
479 tsk->rcv_win = tsk->snd_win;
480
481 if (tipc_sk_type_connectionless(sk)) {
482 tsk_set_unreturnable(tsk, true);
483 if (sock->type == SOCK_DGRAM)
484 tsk_set_unreliable(tsk, true);
485 }
486
487 return 0;
488 }
489
490 static void tipc_sk_callback(struct rcu_head *head)
491 {
492 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
493
494 sock_put(&tsk->sk);
495 }
496
497 /* Caller should hold socket lock for the socket. */
498 static void __tipc_shutdown(struct socket *sock, int error)
499 {
500 struct sock *sk = sock->sk;
501 struct tipc_sock *tsk = tipc_sk(sk);
502 struct net *net = sock_net(sk);
503 long timeout = CONN_TIMEOUT_DEFAULT;
504 u32 dnode = tsk_peer_node(tsk);
505 struct sk_buff *skb;
506
507 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
508 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
509 !tsk_conn_cong(tsk)));
510
511 /* Reject all unreceived messages, except on an active connection
512 * (which disconnects locally & sends a 'FIN+' to peer).
513 */
514 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
515 if (TIPC_SKB_CB(skb)->bytes_read) {
516 kfree_skb(skb);
517 continue;
518 }
519 if (!tipc_sk_type_connectionless(sk) &&
520 sk->sk_state != TIPC_DISCONNECTING) {
521 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
522 tipc_node_remove_conn(net, dnode, tsk->portid);
523 }
524 tipc_sk_respond(sk, skb, error);
525 }
526
527 if (tipc_sk_type_connectionless(sk))
528 return;
529
530 if (sk->sk_state != TIPC_DISCONNECTING) {
531 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
532 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
533 tsk_own_node(tsk), tsk_peer_port(tsk),
534 tsk->portid, error);
535 if (skb)
536 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
537 tipc_node_remove_conn(net, dnode, tsk->portid);
538 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
539 }
540 }
541
542 /**
543 * tipc_release - destroy a TIPC socket
544 * @sock: socket to destroy
545 *
546 * This routine cleans up any messages that are still queued on the socket.
547 * For DGRAM and RDM socket types, all queued messages are rejected.
548 * For SEQPACKET and STREAM socket types, the first message is rejected
549 * and any others are discarded. (If the first message on a STREAM socket
550 * is partially-read, it is discarded and the next one is rejected instead.)
551 *
552 * NOTE: Rejected messages are not necessarily returned to the sender! They
553 * are returned or discarded according to the "destination droppable" setting
554 * specified for the message by the sender.
555 *
556 * Returns 0 on success, errno otherwise
557 */
558 static int tipc_release(struct socket *sock)
559 {
560 struct sock *sk = sock->sk;
561 struct tipc_sock *tsk;
562
563 /*
564 * Exit if socket isn't fully initialized (occurs when a failed accept()
565 * releases a pre-allocated child socket that was never used)
566 */
567 if (sk == NULL)
568 return 0;
569
570 tsk = tipc_sk(sk);
571 lock_sock(sk);
572
573 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
574 sk->sk_shutdown = SHUTDOWN_MASK;
575 tipc_sk_leave(tsk);
576 tipc_sk_withdraw(tsk, 0, NULL);
577 sk_stop_timer(sk, &sk->sk_timer);
578 tipc_sk_remove(tsk);
579
580 /* Reject any messages that accumulated in backlog queue */
581 release_sock(sk);
582 tipc_dest_list_purge(&tsk->cong_links);
583 tsk->cong_link_cnt = 0;
584 call_rcu(&tsk->rcu, tipc_sk_callback);
585 sock->sk = NULL;
586
587 return 0;
588 }
589
590 /**
591 * tipc_bind - associate or disassocate TIPC name(s) with a socket
592 * @sock: socket structure
593 * @uaddr: socket address describing name(s) and desired operation
594 * @uaddr_len: size of socket address data structure
595 *
596 * Name and name sequence binding is indicated using a positive scope value;
597 * a negative scope value unbinds the specified name. Specifying no name
598 * (i.e. a socket address length of 0) unbinds all names from the socket.
599 *
600 * Returns 0 on success, errno otherwise
601 *
602 * NOTE: This routine doesn't need to take the socket lock since it doesn't
603 * access any non-constant socket information.
604 */
605 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
606 int uaddr_len)
607 {
608 struct sock *sk = sock->sk;
609 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
610 struct tipc_sock *tsk = tipc_sk(sk);
611 int res = -EINVAL;
612
613 lock_sock(sk);
614 if (unlikely(!uaddr_len)) {
615 res = tipc_sk_withdraw(tsk, 0, NULL);
616 goto exit;
617 }
618 if (tsk->group) {
619 res = -EACCES;
620 goto exit;
621 }
622 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
623 res = -EINVAL;
624 goto exit;
625 }
626 if (addr->family != AF_TIPC) {
627 res = -EAFNOSUPPORT;
628 goto exit;
629 }
630
631 if (addr->addrtype == TIPC_ADDR_NAME)
632 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
633 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
634 res = -EAFNOSUPPORT;
635 goto exit;
636 }
637
638 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
639 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
640 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
641 res = -EACCES;
642 goto exit;
643 }
644
645 res = (addr->scope > 0) ?
646 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
647 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
648 exit:
649 release_sock(sk);
650 return res;
651 }
652
653 /**
654 * tipc_getname - get port ID of socket or peer socket
655 * @sock: socket structure
656 * @uaddr: area for returned socket address
657 * @uaddr_len: area for returned length of socket address
658 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
659 *
660 * Returns 0 on success, errno otherwise
661 *
662 * NOTE: This routine doesn't need to take the socket lock since it only
663 * accesses socket information that is unchanging (or which changes in
664 * a completely predictable manner).
665 */
666 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
667 int *uaddr_len, int peer)
668 {
669 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
670 struct sock *sk = sock->sk;
671 struct tipc_sock *tsk = tipc_sk(sk);
672 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
673
674 memset(addr, 0, sizeof(*addr));
675 if (peer) {
676 if ((!tipc_sk_connected(sk)) &&
677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
678 return -ENOTCONN;
679 addr->addr.id.ref = tsk_peer_port(tsk);
680 addr->addr.id.node = tsk_peer_node(tsk);
681 } else {
682 addr->addr.id.ref = tsk->portid;
683 addr->addr.id.node = tn->own_addr;
684 }
685
686 *uaddr_len = sizeof(*addr);
687 addr->addrtype = TIPC_ADDR_ID;
688 addr->family = AF_TIPC;
689 addr->scope = 0;
690 addr->addr.name.domain = 0;
691
692 return 0;
693 }
694
695 /**
696 * tipc_poll - read and possibly block on pollmask
697 * @file: file structure associated with the socket
698 * @sock: socket for which to calculate the poll bits
699 * @wait: ???
700 *
701 * Returns pollmask value
702 *
703 * COMMENTARY:
704 * It appears that the usual socket locking mechanisms are not useful here
705 * since the pollmask info is potentially out-of-date the moment this routine
706 * exits. TCP and other protocols seem to rely on higher level poll routines
707 * to handle any preventable race conditions, so TIPC will do the same ...
708 *
709 * IMPORTANT: The fact that a read or write operation is indicated does NOT
710 * imply that the operation will succeed, merely that it should be performed
711 * and will not block.
712 */
713 static unsigned int tipc_poll(struct file *file, struct socket *sock,
714 poll_table *wait)
715 {
716 struct sock *sk = sock->sk;
717 struct tipc_sock *tsk = tipc_sk(sk);
718 struct tipc_group *grp = tsk->group;
719 u32 revents = 0;
720
721 sock_poll_wait(file, sk_sleep(sk), wait);
722
723 if (sk->sk_shutdown & RCV_SHUTDOWN)
724 revents |= POLLRDHUP | POLLIN | POLLRDNORM;
725 if (sk->sk_shutdown == SHUTDOWN_MASK)
726 revents |= POLLHUP;
727
728 switch (sk->sk_state) {
729 case TIPC_ESTABLISHED:
730 case TIPC_CONNECTING:
731 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
732 revents |= POLLOUT;
733 /* fall thru' */
734 case TIPC_LISTEN:
735 if (!skb_queue_empty(&sk->sk_receive_queue))
736 revents |= POLLIN | POLLRDNORM;
737 break;
738 case TIPC_OPEN:
739 if (!grp || tipc_group_size(grp))
740 if (!tsk->cong_link_cnt)
741 revents |= POLLOUT;
742 if (!tipc_sk_type_connectionless(sk))
743 break;
744 if (skb_queue_empty(&sk->sk_receive_queue))
745 break;
746 revents |= POLLIN | POLLRDNORM;
747 break;
748 case TIPC_DISCONNECTING:
749 revents = POLLIN | POLLRDNORM | POLLHUP;
750 break;
751 }
752 return revents;
753 }
754
755 /**
756 * tipc_sendmcast - send multicast message
757 * @sock: socket structure
758 * @seq: destination address
759 * @msg: message to send
760 * @dlen: length of data to send
761 * @timeout: timeout to wait for wakeup
762 *
763 * Called from function tipc_sendmsg(), which has done all sanity checks
764 * Returns the number of bytes sent on success, or errno
765 */
766 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
767 struct msghdr *msg, size_t dlen, long timeout)
768 {
769 struct sock *sk = sock->sk;
770 struct tipc_sock *tsk = tipc_sk(sk);
771 struct tipc_msg *hdr = &tsk->phdr;
772 struct net *net = sock_net(sk);
773 int mtu = tipc_bcast_get_mtu(net);
774 struct tipc_mc_method *method = &tsk->mc_method;
775 u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
776 struct sk_buff_head pkts;
777 struct tipc_nlist dsts;
778 int rc;
779
780 if (tsk->group)
781 return -EACCES;
782
783 /* Block or return if any destination link is congested */
784 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
785 if (unlikely(rc))
786 return rc;
787
788 /* Lookup destination nodes */
789 tipc_nlist_init(&dsts, tipc_own_addr(net));
790 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
791 seq->upper, domain, &dsts);
792 if (!dsts.local && !dsts.remote)
793 return -EHOSTUNREACH;
794
795 /* Build message header */
796 msg_set_type(hdr, TIPC_MCAST_MSG);
797 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
798 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
799 msg_set_destport(hdr, 0);
800 msg_set_destnode(hdr, 0);
801 msg_set_nametype(hdr, seq->type);
802 msg_set_namelower(hdr, seq->lower);
803 msg_set_nameupper(hdr, seq->upper);
804
805 /* Build message as chain of buffers */
806 skb_queue_head_init(&pkts);
807 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
808
809 /* Send message if build was successful */
810 if (unlikely(rc == dlen))
811 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
812 &tsk->cong_link_cnt);
813
814 tipc_nlist_purge(&dsts);
815
816 return rc ? rc : dlen;
817 }
818
819 /**
820 * tipc_send_group_msg - send a message to a member in the group
821 * @net: network namespace
822 * @m: message to send
823 * @mb: group member
824 * @dnode: destination node
825 * @dport: destination port
826 * @dlen: total length of message data
827 */
828 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
829 struct msghdr *m, struct tipc_member *mb,
830 u32 dnode, u32 dport, int dlen)
831 {
832 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
833 struct tipc_mc_method *method = &tsk->mc_method;
834 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
835 struct tipc_msg *hdr = &tsk->phdr;
836 struct sk_buff_head pkts;
837 int mtu, rc;
838
839 /* Complete message header */
840 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
841 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
842 msg_set_destport(hdr, dport);
843 msg_set_destnode(hdr, dnode);
844 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
845
846 /* Build message as chain of buffers */
847 skb_queue_head_init(&pkts);
848 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
849 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
850 if (unlikely(rc != dlen))
851 return rc;
852
853 /* Send message */
854 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
855 if (unlikely(rc == -ELINKCONG)) {
856 tipc_dest_push(&tsk->cong_links, dnode, 0);
857 tsk->cong_link_cnt++;
858 }
859
860 /* Update send window */
861 tipc_group_update_member(mb, blks);
862
863 /* A broadcast sent within next EXPIRE period must follow same path */
864 method->rcast = true;
865 method->mandatory = true;
866 return dlen;
867 }
868
869 /**
870 * tipc_send_group_unicast - send message to a member in the group
871 * @sock: socket structure
872 * @m: message to send
873 * @dlen: total length of message data
874 * @timeout: timeout to wait for wakeup
875 *
876 * Called from function tipc_sendmsg(), which has done all sanity checks
877 * Returns the number of bytes sent on success, or errno
878 */
879 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
880 int dlen, long timeout)
881 {
882 struct sock *sk = sock->sk;
883 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
884 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
885 struct tipc_sock *tsk = tipc_sk(sk);
886 struct tipc_group *grp = tsk->group;
887 struct net *net = sock_net(sk);
888 struct tipc_member *mb = NULL;
889 u32 node, port;
890 int rc;
891
892 node = dest->addr.id.node;
893 port = dest->addr.id.ref;
894 if (!port && !node)
895 return -EHOSTUNREACH;
896
897 /* Block or return if destination link or member is congested */
898 rc = tipc_wait_for_cond(sock, &timeout,
899 !tipc_dest_find(&tsk->cong_links, node, 0) &&
900 !tipc_group_cong(grp, node, port, blks, &mb));
901 if (unlikely(rc))
902 return rc;
903
904 if (unlikely(!mb))
905 return -EHOSTUNREACH;
906
907 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
908
909 return rc ? rc : dlen;
910 }
911
912 /**
913 * tipc_send_group_anycast - send message to any member with given identity
914 * @sock: socket structure
915 * @m: message to send
916 * @dlen: total length of message data
917 * @timeout: timeout to wait for wakeup
918 *
919 * Called from function tipc_sendmsg(), which has done all sanity checks
920 * Returns the number of bytes sent on success, or errno
921 */
922 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
923 int dlen, long timeout)
924 {
925 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
926 struct sock *sk = sock->sk;
927 struct tipc_sock *tsk = tipc_sk(sk);
928 struct list_head *cong_links = &tsk->cong_links;
929 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
930 struct tipc_group *grp = tsk->group;
931 struct tipc_member *first = NULL;
932 struct tipc_member *mbr = NULL;
933 struct net *net = sock_net(sk);
934 u32 node, port, exclude;
935 u32 type, inst, domain;
936 struct list_head dsts;
937 int lookups = 0;
938 int dstcnt, rc;
939 bool cong;
940
941 INIT_LIST_HEAD(&dsts);
942
943 type = dest->addr.name.name.type;
944 inst = dest->addr.name.name.instance;
945 domain = addr_domain(net, dest->scope);
946 exclude = tipc_group_exclude(grp);
947
948 while (++lookups < 4) {
949 first = NULL;
950
951 /* Look for a non-congested destination member, if any */
952 while (1) {
953 if (!tipc_nametbl_lookup(net, type, inst, domain, &dsts,
954 &dstcnt, exclude, false))
955 return -EHOSTUNREACH;
956 tipc_dest_pop(&dsts, &node, &port);
957 cong = tipc_group_cong(grp, node, port, blks, &mbr);
958 if (!cong)
959 break;
960 if (mbr == first)
961 break;
962 if (!first)
963 first = mbr;
964 }
965
966 /* Start over if destination was not in member list */
967 if (unlikely(!mbr))
968 continue;
969
970 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
971 break;
972
973 /* Block or return if destination link or member is congested */
974 rc = tipc_wait_for_cond(sock, &timeout,
975 !tipc_dest_find(cong_links, node, 0) &&
976 !tipc_group_cong(grp, node, port,
977 blks, &mbr));
978 if (unlikely(rc))
979 return rc;
980
981 /* Send, unless destination disappeared while waiting */
982 if (likely(mbr))
983 break;
984 }
985
986 if (unlikely(lookups >= 4))
987 return -EHOSTUNREACH;
988
989 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
990
991 return rc ? rc : dlen;
992 }
993
994 /**
995 * tipc_send_group_bcast - send message to all members in communication group
996 * @sk: socket structure
997 * @m: message to send
998 * @dlen: total length of message data
999 * @timeout: timeout to wait for wakeup
1000 *
1001 * Called from function tipc_sendmsg(), which has done all sanity checks
1002 * Returns the number of bytes sent on success, or errno
1003 */
1004 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1005 int dlen, long timeout)
1006 {
1007 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1008 struct sock *sk = sock->sk;
1009 struct net *net = sock_net(sk);
1010 struct tipc_sock *tsk = tipc_sk(sk);
1011 struct tipc_group *grp = tsk->group;
1012 struct tipc_nlist *dsts = tipc_group_dests(grp);
1013 struct tipc_mc_method *method = &tsk->mc_method;
1014 bool ack = method->mandatory && method->rcast;
1015 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1016 struct tipc_msg *hdr = &tsk->phdr;
1017 int mtu = tipc_bcast_get_mtu(net);
1018 struct sk_buff_head pkts;
1019 int rc = -EHOSTUNREACH;
1020
1021 if (!dsts->local && !dsts->remote)
1022 return -EHOSTUNREACH;
1023
1024 /* Block or return if any destination link or member is congested */
1025 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
1026 !tipc_group_bc_cong(grp, blks));
1027 if (unlikely(rc))
1028 return rc;
1029
1030 /* Complete message header */
1031 if (dest) {
1032 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1033 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1034 } else {
1035 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1036 msg_set_nameinst(hdr, 0);
1037 }
1038 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1039 msg_set_destport(hdr, 0);
1040 msg_set_destnode(hdr, 0);
1041 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
1042
1043 /* Avoid getting stuck with repeated forced replicasts */
1044 msg_set_grp_bc_ack_req(hdr, ack);
1045
1046 /* Build message as chain of buffers */
1047 skb_queue_head_init(&pkts);
1048 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1049 if (unlikely(rc != dlen))
1050 return rc;
1051
1052 /* Send message */
1053 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1054 if (unlikely(rc))
1055 return rc;
1056
1057 /* Update broadcast sequence number and send windows */
1058 tipc_group_update_bc_members(tsk->group, blks, ack);
1059
1060 /* Broadcast link is now free to choose method for next broadcast */
1061 method->mandatory = false;
1062 method->expires = jiffies;
1063
1064 return dlen;
1065 }
1066
1067 /**
1068 * tipc_send_group_mcast - send message to all members with given identity
1069 * @sock: socket structure
1070 * @m: message to send
1071 * @dlen: total length of message data
1072 * @timeout: timeout to wait for wakeup
1073 *
1074 * Called from function tipc_sendmsg(), which has done all sanity checks
1075 * Returns the number of bytes sent on success, or errno
1076 */
1077 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1078 int dlen, long timeout)
1079 {
1080 struct sock *sk = sock->sk;
1081 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1082 struct tipc_name_seq *seq = &dest->addr.nameseq;
1083 struct tipc_sock *tsk = tipc_sk(sk);
1084 struct tipc_group *grp = tsk->group;
1085 struct net *net = sock_net(sk);
1086 u32 domain, exclude, dstcnt;
1087 struct list_head dsts;
1088
1089 INIT_LIST_HEAD(&dsts);
1090
1091 if (seq->lower != seq->upper)
1092 return -ENOTSUPP;
1093
1094 domain = addr_domain(net, dest->scope);
1095 exclude = tipc_group_exclude(grp);
1096 if (!tipc_nametbl_lookup(net, seq->type, seq->lower, domain,
1097 &dsts, &dstcnt, exclude, true))
1098 return -EHOSTUNREACH;
1099
1100 if (dstcnt == 1) {
1101 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1102 return tipc_send_group_unicast(sock, m, dlen, timeout);
1103 }
1104
1105 tipc_dest_list_purge(&dsts);
1106 return tipc_send_group_bcast(sock, m, dlen, timeout);
1107 }
1108
1109 /**
1110 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1111 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1112 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1113 *
1114 * Multi-threaded: parallel calls with reference to same queues may occur
1115 */
1116 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1117 struct sk_buff_head *inputq)
1118 {
1119 u32 scope = TIPC_CLUSTER_SCOPE;
1120 u32 self = tipc_own_addr(net);
1121 struct sk_buff *skb, *_skb;
1122 u32 lower = 0, upper = ~0;
1123 struct sk_buff_head tmpq;
1124 u32 portid, oport, onode;
1125 struct list_head dports;
1126 struct tipc_msg *msg;
1127 int user, mtyp, hsz;
1128
1129 __skb_queue_head_init(&tmpq);
1130 INIT_LIST_HEAD(&dports);
1131
1132 skb = tipc_skb_peek(arrvq, &inputq->lock);
1133 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1134 msg = buf_msg(skb);
1135 user = msg_user(msg);
1136 mtyp = msg_type(msg);
1137 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1138 spin_lock_bh(&inputq->lock);
1139 if (skb_peek(arrvq) == skb) {
1140 __skb_dequeue(arrvq);
1141 __skb_queue_tail(inputq, skb);
1142 }
1143 kfree_skb(skb);
1144 spin_unlock_bh(&inputq->lock);
1145 continue;
1146 }
1147 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
1148 oport = msg_origport(msg);
1149 onode = msg_orignode(msg);
1150 if (onode == self)
1151 scope = TIPC_NODE_SCOPE;
1152
1153 /* Create destination port list and message clones: */
1154 if (!msg_in_group(msg)) {
1155 lower = msg_namelower(msg);
1156 upper = msg_nameupper(msg);
1157 }
1158 tipc_nametbl_mc_translate(net, msg_nametype(msg), lower, upper,
1159 scope, &dports);
1160 while (tipc_dest_pop(&dports, NULL, &portid)) {
1161 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
1162 if (_skb) {
1163 msg_set_destport(buf_msg(_skb), portid);
1164 __skb_queue_tail(&tmpq, _skb);
1165 continue;
1166 }
1167 pr_warn("Failed to clone mcast rcv buffer\n");
1168 }
1169 /* Append to inputq if not already done by other thread */
1170 spin_lock_bh(&inputq->lock);
1171 if (skb_peek(arrvq) == skb) {
1172 skb_queue_splice_tail_init(&tmpq, inputq);
1173 kfree_skb(__skb_dequeue(arrvq));
1174 }
1175 spin_unlock_bh(&inputq->lock);
1176 __skb_queue_purge(&tmpq);
1177 kfree_skb(skb);
1178 }
1179 tipc_sk_rcv(net, inputq);
1180 }
1181
1182 /**
1183 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1184 * @tsk: receiving socket
1185 * @skb: pointer to message buffer.
1186 */
1187 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1188 struct sk_buff_head *xmitq)
1189 {
1190 struct tipc_msg *hdr = buf_msg(skb);
1191 u32 onode = tsk_own_node(tsk);
1192 struct sock *sk = &tsk->sk;
1193 int mtyp = msg_type(hdr);
1194 bool conn_cong;
1195
1196 /* Ignore if connection cannot be validated: */
1197 if (!tsk_peer_msg(tsk, hdr))
1198 goto exit;
1199
1200 if (unlikely(msg_errcode(hdr))) {
1201 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1202 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1203 tsk_peer_port(tsk));
1204 sk->sk_state_change(sk);
1205 goto exit;
1206 }
1207
1208 tsk->probe_unacked = false;
1209
1210 if (mtyp == CONN_PROBE) {
1211 msg_set_type(hdr, CONN_PROBE_REPLY);
1212 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1213 __skb_queue_tail(xmitq, skb);
1214 return;
1215 } else if (mtyp == CONN_ACK) {
1216 conn_cong = tsk_conn_cong(tsk);
1217 tsk->snt_unacked -= msg_conn_ack(hdr);
1218 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1219 tsk->snd_win = msg_adv_win(hdr);
1220 if (conn_cong)
1221 sk->sk_write_space(sk);
1222 } else if (mtyp != CONN_PROBE_REPLY) {
1223 pr_warn("Received unknown CONN_PROTO msg\n");
1224 }
1225 exit:
1226 kfree_skb(skb);
1227 }
1228
1229 /**
1230 * tipc_sendmsg - send message in connectionless manner
1231 * @sock: socket structure
1232 * @m: message to send
1233 * @dsz: amount of user data to be sent
1234 *
1235 * Message must have an destination specified explicitly.
1236 * Used for SOCK_RDM and SOCK_DGRAM messages,
1237 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1238 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1239 *
1240 * Returns the number of bytes sent on success, or errno otherwise
1241 */
1242 static int tipc_sendmsg(struct socket *sock,
1243 struct msghdr *m, size_t dsz)
1244 {
1245 struct sock *sk = sock->sk;
1246 int ret;
1247
1248 lock_sock(sk);
1249 ret = __tipc_sendmsg(sock, m, dsz);
1250 release_sock(sk);
1251
1252 return ret;
1253 }
1254
1255 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1256 {
1257 struct sock *sk = sock->sk;
1258 struct net *net = sock_net(sk);
1259 struct tipc_sock *tsk = tipc_sk(sk);
1260 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1261 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1262 struct list_head *clinks = &tsk->cong_links;
1263 bool syn = !tipc_sk_type_connectionless(sk);
1264 struct tipc_group *grp = tsk->group;
1265 struct tipc_msg *hdr = &tsk->phdr;
1266 struct tipc_name_seq *seq;
1267 struct sk_buff_head pkts;
1268 u32 type, inst, domain;
1269 u32 dnode, dport;
1270 int mtu, rc;
1271
1272 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1273 return -EMSGSIZE;
1274
1275 if (likely(dest)) {
1276 if (unlikely(m->msg_namelen < sizeof(*dest)))
1277 return -EINVAL;
1278 if (unlikely(dest->family != AF_TIPC))
1279 return -EINVAL;
1280 }
1281
1282 if (grp) {
1283 if (!dest)
1284 return tipc_send_group_bcast(sock, m, dlen, timeout);
1285 if (dest->addrtype == TIPC_ADDR_NAME)
1286 return tipc_send_group_anycast(sock, m, dlen, timeout);
1287 if (dest->addrtype == TIPC_ADDR_ID)
1288 return tipc_send_group_unicast(sock, m, dlen, timeout);
1289 if (dest->addrtype == TIPC_ADDR_MCAST)
1290 return tipc_send_group_mcast(sock, m, dlen, timeout);
1291 return -EINVAL;
1292 }
1293
1294 if (unlikely(!dest)) {
1295 dest = &tsk->peer;
1296 if (!syn || dest->family != AF_TIPC)
1297 return -EDESTADDRREQ;
1298 }
1299
1300 if (unlikely(syn)) {
1301 if (sk->sk_state == TIPC_LISTEN)
1302 return -EPIPE;
1303 if (sk->sk_state != TIPC_OPEN)
1304 return -EISCONN;
1305 if (tsk->published)
1306 return -EOPNOTSUPP;
1307 if (dest->addrtype == TIPC_ADDR_NAME) {
1308 tsk->conn_type = dest->addr.name.name.type;
1309 tsk->conn_instance = dest->addr.name.name.instance;
1310 }
1311 }
1312
1313 seq = &dest->addr.nameseq;
1314 if (dest->addrtype == TIPC_ADDR_MCAST)
1315 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1316
1317 if (dest->addrtype == TIPC_ADDR_NAME) {
1318 type = dest->addr.name.name.type;
1319 inst = dest->addr.name.name.instance;
1320 domain = dest->addr.name.domain;
1321 dnode = domain;
1322 msg_set_type(hdr, TIPC_NAMED_MSG);
1323 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1324 msg_set_nametype(hdr, type);
1325 msg_set_nameinst(hdr, inst);
1326 msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
1327 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1328 msg_set_destnode(hdr, dnode);
1329 msg_set_destport(hdr, dport);
1330 if (unlikely(!dport && !dnode))
1331 return -EHOSTUNREACH;
1332 } else if (dest->addrtype == TIPC_ADDR_ID) {
1333 dnode = dest->addr.id.node;
1334 msg_set_type(hdr, TIPC_DIRECT_MSG);
1335 msg_set_lookup_scope(hdr, 0);
1336 msg_set_destnode(hdr, dnode);
1337 msg_set_destport(hdr, dest->addr.id.ref);
1338 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1339 }
1340
1341 /* Block or return if destination link is congested */
1342 rc = tipc_wait_for_cond(sock, &timeout,
1343 !tipc_dest_find(clinks, dnode, 0));
1344 if (unlikely(rc))
1345 return rc;
1346
1347 skb_queue_head_init(&pkts);
1348 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1349 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1350 if (unlikely(rc != dlen))
1351 return rc;
1352
1353 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1354 if (unlikely(rc == -ELINKCONG)) {
1355 tipc_dest_push(clinks, dnode, 0);
1356 tsk->cong_link_cnt++;
1357 rc = 0;
1358 }
1359
1360 if (unlikely(syn && !rc))
1361 tipc_set_sk_state(sk, TIPC_CONNECTING);
1362
1363 return rc ? rc : dlen;
1364 }
1365
1366 /**
1367 * tipc_sendstream - send stream-oriented data
1368 * @sock: socket structure
1369 * @m: data to send
1370 * @dsz: total length of data to be transmitted
1371 *
1372 * Used for SOCK_STREAM data.
1373 *
1374 * Returns the number of bytes sent on success (or partial success),
1375 * or errno if no data sent
1376 */
1377 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1378 {
1379 struct sock *sk = sock->sk;
1380 int ret;
1381
1382 lock_sock(sk);
1383 ret = __tipc_sendstream(sock, m, dsz);
1384 release_sock(sk);
1385
1386 return ret;
1387 }
1388
1389 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1390 {
1391 struct sock *sk = sock->sk;
1392 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1393 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1394 struct tipc_sock *tsk = tipc_sk(sk);
1395 struct tipc_msg *hdr = &tsk->phdr;
1396 struct net *net = sock_net(sk);
1397 struct sk_buff_head pkts;
1398 u32 dnode = tsk_peer_node(tsk);
1399 int send, sent = 0;
1400 int rc = 0;
1401
1402 skb_queue_head_init(&pkts);
1403
1404 if (unlikely(dlen > INT_MAX))
1405 return -EMSGSIZE;
1406
1407 /* Handle implicit connection setup */
1408 if (unlikely(dest)) {
1409 rc = __tipc_sendmsg(sock, m, dlen);
1410 if (dlen && (dlen == rc))
1411 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1412 return rc;
1413 }
1414
1415 do {
1416 rc = tipc_wait_for_cond(sock, &timeout,
1417 (!tsk->cong_link_cnt &&
1418 !tsk_conn_cong(tsk) &&
1419 tipc_sk_connected(sk)));
1420 if (unlikely(rc))
1421 break;
1422
1423 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1424 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1425 if (unlikely(rc != send))
1426 break;
1427
1428 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1429 if (unlikely(rc == -ELINKCONG)) {
1430 tsk->cong_link_cnt = 1;
1431 rc = 0;
1432 }
1433 if (likely(!rc)) {
1434 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1435 sent += send;
1436 }
1437 } while (sent < dlen && !rc);
1438
1439 return sent ? sent : rc;
1440 }
1441
1442 /**
1443 * tipc_send_packet - send a connection-oriented message
1444 * @sock: socket structure
1445 * @m: message to send
1446 * @dsz: length of data to be transmitted
1447 *
1448 * Used for SOCK_SEQPACKET messages.
1449 *
1450 * Returns the number of bytes sent on success, or errno otherwise
1451 */
1452 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1453 {
1454 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1455 return -EMSGSIZE;
1456
1457 return tipc_sendstream(sock, m, dsz);
1458 }
1459
1460 /* tipc_sk_finish_conn - complete the setup of a connection
1461 */
1462 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1463 u32 peer_node)
1464 {
1465 struct sock *sk = &tsk->sk;
1466 struct net *net = sock_net(sk);
1467 struct tipc_msg *msg = &tsk->phdr;
1468
1469 msg_set_destnode(msg, peer_node);
1470 msg_set_destport(msg, peer_port);
1471 msg_set_type(msg, TIPC_CONN_MSG);
1472 msg_set_lookup_scope(msg, 0);
1473 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1474
1475 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1476 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1477 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1478 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1479 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1480 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1481 return;
1482
1483 /* Fall back to message based flow control */
1484 tsk->rcv_win = FLOWCTL_MSG_WIN;
1485 tsk->snd_win = FLOWCTL_MSG_WIN;
1486 }
1487
1488 /**
1489 * tipc_sk_set_orig_addr - capture sender's address for received message
1490 * @m: descriptor for message info
1491 * @hdr: received message header
1492 *
1493 * Note: Address is not captured if not requested by receiver.
1494 */
1495 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1496 {
1497 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1498 struct tipc_msg *hdr = buf_msg(skb);
1499
1500 if (!srcaddr)
1501 return;
1502
1503 srcaddr->sock.family = AF_TIPC;
1504 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1505 srcaddr->sock.scope = 0;
1506 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1507 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1508 srcaddr->sock.addr.name.domain = 0;
1509 m->msg_namelen = sizeof(struct sockaddr_tipc);
1510
1511 if (!msg_in_group(hdr))
1512 return;
1513
1514 /* Group message users may also want to know sending member's id */
1515 srcaddr->member.family = AF_TIPC;
1516 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1517 srcaddr->member.scope = 0;
1518 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1519 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1520 srcaddr->member.addr.name.domain = 0;
1521 m->msg_namelen = sizeof(*srcaddr);
1522 }
1523
1524 /**
1525 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1526 * @m: descriptor for message info
1527 * @msg: received message header
1528 * @tsk: TIPC port associated with message
1529 *
1530 * Note: Ancillary data is not captured if not requested by receiver.
1531 *
1532 * Returns 0 if successful, otherwise errno
1533 */
1534 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1535 struct tipc_sock *tsk)
1536 {
1537 u32 anc_data[3];
1538 u32 err;
1539 u32 dest_type;
1540 int has_name;
1541 int res;
1542
1543 if (likely(m->msg_controllen == 0))
1544 return 0;
1545
1546 /* Optionally capture errored message object(s) */
1547 err = msg ? msg_errcode(msg) : 0;
1548 if (unlikely(err)) {
1549 anc_data[0] = err;
1550 anc_data[1] = msg_data_sz(msg);
1551 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1552 if (res)
1553 return res;
1554 if (anc_data[1]) {
1555 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1556 msg_data(msg));
1557 if (res)
1558 return res;
1559 }
1560 }
1561
1562 /* Optionally capture message destination object */
1563 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1564 switch (dest_type) {
1565 case TIPC_NAMED_MSG:
1566 has_name = 1;
1567 anc_data[0] = msg_nametype(msg);
1568 anc_data[1] = msg_namelower(msg);
1569 anc_data[2] = msg_namelower(msg);
1570 break;
1571 case TIPC_MCAST_MSG:
1572 has_name = 1;
1573 anc_data[0] = msg_nametype(msg);
1574 anc_data[1] = msg_namelower(msg);
1575 anc_data[2] = msg_nameupper(msg);
1576 break;
1577 case TIPC_CONN_MSG:
1578 has_name = (tsk->conn_type != 0);
1579 anc_data[0] = tsk->conn_type;
1580 anc_data[1] = tsk->conn_instance;
1581 anc_data[2] = tsk->conn_instance;
1582 break;
1583 default:
1584 has_name = 0;
1585 }
1586 if (has_name) {
1587 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1588 if (res)
1589 return res;
1590 }
1591
1592 return 0;
1593 }
1594
1595 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1596 {
1597 struct sock *sk = &tsk->sk;
1598 struct net *net = sock_net(sk);
1599 struct sk_buff *skb = NULL;
1600 struct tipc_msg *msg;
1601 u32 peer_port = tsk_peer_port(tsk);
1602 u32 dnode = tsk_peer_node(tsk);
1603
1604 if (!tipc_sk_connected(sk))
1605 return;
1606 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1607 dnode, tsk_own_node(tsk), peer_port,
1608 tsk->portid, TIPC_OK);
1609 if (!skb)
1610 return;
1611 msg = buf_msg(skb);
1612 msg_set_conn_ack(msg, tsk->rcv_unacked);
1613 tsk->rcv_unacked = 0;
1614
1615 /* Adjust to and advertize the correct window limit */
1616 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1617 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1618 msg_set_adv_win(msg, tsk->rcv_win);
1619 }
1620 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1621 }
1622
1623 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1624 {
1625 struct sock *sk = sock->sk;
1626 DEFINE_WAIT(wait);
1627 long timeo = *timeop;
1628 int err = sock_error(sk);
1629
1630 if (err)
1631 return err;
1632
1633 for (;;) {
1634 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1635 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1636 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1637 err = -ENOTCONN;
1638 break;
1639 }
1640 release_sock(sk);
1641 timeo = schedule_timeout(timeo);
1642 lock_sock(sk);
1643 }
1644 err = 0;
1645 if (!skb_queue_empty(&sk->sk_receive_queue))
1646 break;
1647 err = -EAGAIN;
1648 if (!timeo)
1649 break;
1650 err = sock_intr_errno(timeo);
1651 if (signal_pending(current))
1652 break;
1653
1654 err = sock_error(sk);
1655 if (err)
1656 break;
1657 }
1658 finish_wait(sk_sleep(sk), &wait);
1659 *timeop = timeo;
1660 return err;
1661 }
1662
1663 /**
1664 * tipc_recvmsg - receive packet-oriented message
1665 * @m: descriptor for message info
1666 * @buflen: length of user buffer area
1667 * @flags: receive flags
1668 *
1669 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1670 * If the complete message doesn't fit in user area, truncate it.
1671 *
1672 * Returns size of returned message data, errno otherwise
1673 */
1674 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1675 size_t buflen, int flags)
1676 {
1677 struct sock *sk = sock->sk;
1678 bool connected = !tipc_sk_type_connectionless(sk);
1679 struct tipc_sock *tsk = tipc_sk(sk);
1680 int rc, err, hlen, dlen, copy;
1681 struct sk_buff_head xmitq;
1682 struct tipc_msg *hdr;
1683 struct sk_buff *skb;
1684 bool grp_evt;
1685 long timeout;
1686
1687 /* Catch invalid receive requests */
1688 if (unlikely(!buflen))
1689 return -EINVAL;
1690
1691 lock_sock(sk);
1692 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1693 rc = -ENOTCONN;
1694 goto exit;
1695 }
1696 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1697
1698 /* Step rcv queue to first msg with data or error; wait if necessary */
1699 do {
1700 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1701 if (unlikely(rc))
1702 goto exit;
1703 skb = skb_peek(&sk->sk_receive_queue);
1704 hdr = buf_msg(skb);
1705 dlen = msg_data_sz(hdr);
1706 hlen = msg_hdr_sz(hdr);
1707 err = msg_errcode(hdr);
1708 grp_evt = msg_is_grp_evt(hdr);
1709 if (likely(dlen || err))
1710 break;
1711 tsk_advance_rx_queue(sk);
1712 } while (1);
1713
1714 /* Collect msg meta data, including error code and rejected data */
1715 tipc_sk_set_orig_addr(m, skb);
1716 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1717 if (unlikely(rc))
1718 goto exit;
1719
1720 /* Capture data if non-error msg, otherwise just set return value */
1721 if (likely(!err)) {
1722 copy = min_t(int, dlen, buflen);
1723 if (unlikely(copy != dlen))
1724 m->msg_flags |= MSG_TRUNC;
1725 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1726 } else {
1727 copy = 0;
1728 rc = 0;
1729 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1730 rc = -ECONNRESET;
1731 }
1732 if (unlikely(rc))
1733 goto exit;
1734
1735 /* Mark message as group event if applicable */
1736 if (unlikely(grp_evt)) {
1737 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1738 m->msg_flags |= MSG_EOR;
1739 m->msg_flags |= MSG_OOB;
1740 copy = 0;
1741 }
1742
1743 /* Caption of data or error code/rejected data was successful */
1744 if (unlikely(flags & MSG_PEEK))
1745 goto exit;
1746
1747 /* Send group flow control advertisement when applicable */
1748 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1749 skb_queue_head_init(&xmitq);
1750 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1751 msg_orignode(hdr), msg_origport(hdr),
1752 &xmitq);
1753 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1754 }
1755
1756 tsk_advance_rx_queue(sk);
1757
1758 if (likely(!connected))
1759 goto exit;
1760
1761 /* Send connection flow control advertisement when applicable */
1762 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1763 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1764 tipc_sk_send_ack(tsk);
1765 exit:
1766 release_sock(sk);
1767 return rc ? rc : copy;
1768 }
1769
1770 /**
1771 * tipc_recvstream - receive stream-oriented data
1772 * @m: descriptor for message info
1773 * @buflen: total size of user buffer area
1774 * @flags: receive flags
1775 *
1776 * Used for SOCK_STREAM messages only. If not enough data is available
1777 * will optionally wait for more; never truncates data.
1778 *
1779 * Returns size of returned message data, errno otherwise
1780 */
1781 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1782 size_t buflen, int flags)
1783 {
1784 struct sock *sk = sock->sk;
1785 struct tipc_sock *tsk = tipc_sk(sk);
1786 struct sk_buff *skb;
1787 struct tipc_msg *hdr;
1788 struct tipc_skb_cb *skb_cb;
1789 bool peek = flags & MSG_PEEK;
1790 int offset, required, copy, copied = 0;
1791 int hlen, dlen, err, rc;
1792 long timeout;
1793
1794 /* Catch invalid receive attempts */
1795 if (unlikely(!buflen))
1796 return -EINVAL;
1797
1798 lock_sock(sk);
1799
1800 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1801 rc = -ENOTCONN;
1802 goto exit;
1803 }
1804 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1805 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1806
1807 do {
1808 /* Look at first msg in receive queue; wait if necessary */
1809 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1810 if (unlikely(rc))
1811 break;
1812 skb = skb_peek(&sk->sk_receive_queue);
1813 skb_cb = TIPC_SKB_CB(skb);
1814 hdr = buf_msg(skb);
1815 dlen = msg_data_sz(hdr);
1816 hlen = msg_hdr_sz(hdr);
1817 err = msg_errcode(hdr);
1818
1819 /* Discard any empty non-errored (SYN-) message */
1820 if (unlikely(!dlen && !err)) {
1821 tsk_advance_rx_queue(sk);
1822 continue;
1823 }
1824
1825 /* Collect msg meta data, incl. error code and rejected data */
1826 if (!copied) {
1827 tipc_sk_set_orig_addr(m, skb);
1828 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1829 if (rc)
1830 break;
1831 }
1832
1833 /* Copy data if msg ok, otherwise return error/partial data */
1834 if (likely(!err)) {
1835 offset = skb_cb->bytes_read;
1836 copy = min_t(int, dlen - offset, buflen - copied);
1837 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1838 if (unlikely(rc))
1839 break;
1840 copied += copy;
1841 offset += copy;
1842 if (unlikely(offset < dlen)) {
1843 if (!peek)
1844 skb_cb->bytes_read = offset;
1845 break;
1846 }
1847 } else {
1848 rc = 0;
1849 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1850 rc = -ECONNRESET;
1851 if (copied || rc)
1852 break;
1853 }
1854
1855 if (unlikely(peek))
1856 break;
1857
1858 tsk_advance_rx_queue(sk);
1859
1860 /* Send connection flow control advertisement when applicable */
1861 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1862 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1863 tipc_sk_send_ack(tsk);
1864
1865 /* Exit if all requested data or FIN/error received */
1866 if (copied == buflen || err)
1867 break;
1868
1869 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1870 exit:
1871 release_sock(sk);
1872 return copied ? copied : rc;
1873 }
1874
1875 /**
1876 * tipc_write_space - wake up thread if port congestion is released
1877 * @sk: socket
1878 */
1879 static void tipc_write_space(struct sock *sk)
1880 {
1881 struct socket_wq *wq;
1882
1883 rcu_read_lock();
1884 wq = rcu_dereference(sk->sk_wq);
1885 if (skwq_has_sleeper(wq))
1886 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1887 POLLWRNORM | POLLWRBAND);
1888 rcu_read_unlock();
1889 }
1890
1891 /**
1892 * tipc_data_ready - wake up threads to indicate messages have been received
1893 * @sk: socket
1894 * @len: the length of messages
1895 */
1896 static void tipc_data_ready(struct sock *sk)
1897 {
1898 struct socket_wq *wq;
1899
1900 rcu_read_lock();
1901 wq = rcu_dereference(sk->sk_wq);
1902 if (skwq_has_sleeper(wq))
1903 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1904 POLLRDNORM | POLLRDBAND);
1905 rcu_read_unlock();
1906 }
1907
1908 static void tipc_sock_destruct(struct sock *sk)
1909 {
1910 __skb_queue_purge(&sk->sk_receive_queue);
1911 }
1912
1913 static void tipc_sk_proto_rcv(struct sock *sk,
1914 struct sk_buff_head *inputq,
1915 struct sk_buff_head *xmitq)
1916 {
1917 struct sk_buff *skb = __skb_dequeue(inputq);
1918 struct tipc_sock *tsk = tipc_sk(sk);
1919 struct tipc_msg *hdr = buf_msg(skb);
1920 struct tipc_group *grp = tsk->group;
1921 bool wakeup = false;
1922
1923 switch (msg_user(hdr)) {
1924 case CONN_MANAGER:
1925 tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
1926 return;
1927 case SOCK_WAKEUP:
1928 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1929 tsk->cong_link_cnt--;
1930 wakeup = true;
1931 break;
1932 case GROUP_PROTOCOL:
1933 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1934 break;
1935 case TOP_SRV:
1936 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1937 skb, inputq, xmitq);
1938 skb = NULL;
1939 break;
1940 default:
1941 break;
1942 }
1943
1944 if (wakeup)
1945 sk->sk_write_space(sk);
1946
1947 kfree_skb(skb);
1948 }
1949
1950 /**
1951 * tipc_filter_connect - Handle incoming message for a connection-based socket
1952 * @tsk: TIPC socket
1953 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1954 *
1955 * Returns true if everything ok, false otherwise
1956 */
1957 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1958 {
1959 struct sock *sk = &tsk->sk;
1960 struct net *net = sock_net(sk);
1961 struct tipc_msg *hdr = buf_msg(skb);
1962 u32 pport = msg_origport(hdr);
1963 u32 pnode = msg_orignode(hdr);
1964
1965 if (unlikely(msg_mcast(hdr)))
1966 return false;
1967
1968 switch (sk->sk_state) {
1969 case TIPC_CONNECTING:
1970 /* Accept only ACK or NACK message */
1971 if (unlikely(!msg_connected(hdr))) {
1972 if (pport != tsk_peer_port(tsk) ||
1973 pnode != tsk_peer_node(tsk))
1974 return false;
1975
1976 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1977 sk->sk_err = ECONNREFUSED;
1978 sk->sk_state_change(sk);
1979 return true;
1980 }
1981
1982 if (unlikely(msg_errcode(hdr))) {
1983 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1984 sk->sk_err = ECONNREFUSED;
1985 sk->sk_state_change(sk);
1986 return true;
1987 }
1988
1989 if (unlikely(!msg_isdata(hdr))) {
1990 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1991 sk->sk_err = EINVAL;
1992 sk->sk_state_change(sk);
1993 return true;
1994 }
1995
1996 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1997 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1998
1999 /* If 'ACK+' message, add to socket receive queue */
2000 if (msg_data_sz(hdr))
2001 return true;
2002
2003 /* If empty 'ACK-' message, wake up sleeping connect() */
2004 sk->sk_data_ready(sk);
2005
2006 /* 'ACK-' message is neither accepted nor rejected: */
2007 msg_set_dest_droppable(hdr, 1);
2008 return false;
2009
2010 case TIPC_OPEN:
2011 case TIPC_DISCONNECTING:
2012 break;
2013 case TIPC_LISTEN:
2014 /* Accept only SYN message */
2015 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2016 return true;
2017 break;
2018 case TIPC_ESTABLISHED:
2019 /* Accept only connection-based messages sent by peer */
2020 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2021 return false;
2022
2023 if (unlikely(msg_errcode(hdr))) {
2024 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2025 /* Let timer expire on it's own */
2026 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2027 tsk->portid);
2028 sk->sk_state_change(sk);
2029 }
2030 return true;
2031 default:
2032 pr_err("Unknown sk_state %u\n", sk->sk_state);
2033 }
2034
2035 return false;
2036 }
2037
2038 /**
2039 * rcvbuf_limit - get proper overload limit of socket receive queue
2040 * @sk: socket
2041 * @skb: message
2042 *
2043 * For connection oriented messages, irrespective of importance,
2044 * default queue limit is 2 MB.
2045 *
2046 * For connectionless messages, queue limits are based on message
2047 * importance as follows:
2048 *
2049 * TIPC_LOW_IMPORTANCE (2 MB)
2050 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2051 * TIPC_HIGH_IMPORTANCE (8 MB)
2052 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2053 *
2054 * Returns overload limit according to corresponding message importance
2055 */
2056 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2057 {
2058 struct tipc_sock *tsk = tipc_sk(sk);
2059 struct tipc_msg *hdr = buf_msg(skb);
2060
2061 if (unlikely(msg_in_group(hdr)))
2062 return sk->sk_rcvbuf;
2063
2064 if (unlikely(!msg_connected(hdr)))
2065 return sk->sk_rcvbuf << msg_importance(hdr);
2066
2067 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2068 return sk->sk_rcvbuf;
2069
2070 return FLOWCTL_MSG_LIM;
2071 }
2072
2073 /**
2074 * tipc_sk_filter_rcv - validate incoming message
2075 * @sk: socket
2076 * @skb: pointer to message.
2077 *
2078 * Enqueues message on receive queue if acceptable; optionally handles
2079 * disconnect indication for a connected socket.
2080 *
2081 * Called with socket lock already taken
2082 *
2083 */
2084 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2085 struct sk_buff_head *xmitq)
2086 {
2087 bool sk_conn = !tipc_sk_type_connectionless(sk);
2088 struct tipc_sock *tsk = tipc_sk(sk);
2089 struct tipc_group *grp = tsk->group;
2090 struct tipc_msg *hdr = buf_msg(skb);
2091 struct net *net = sock_net(sk);
2092 struct sk_buff_head inputq;
2093 int limit, err = TIPC_OK;
2094
2095 TIPC_SKB_CB(skb)->bytes_read = 0;
2096 __skb_queue_head_init(&inputq);
2097 __skb_queue_tail(&inputq, skb);
2098
2099 if (unlikely(!msg_isdata(hdr)))
2100 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2101
2102 if (unlikely(grp))
2103 tipc_group_filter_msg(grp, &inputq, xmitq);
2104
2105 /* Validate and add to receive buffer if there is space */
2106 while ((skb = __skb_dequeue(&inputq))) {
2107 hdr = buf_msg(skb);
2108 limit = rcvbuf_limit(sk, skb);
2109 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2110 (!sk_conn && msg_connected(hdr)) ||
2111 (!grp && msg_in_group(hdr)))
2112 err = TIPC_ERR_NO_PORT;
2113 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit)
2114 err = TIPC_ERR_OVERLOAD;
2115
2116 if (unlikely(err)) {
2117 tipc_skb_reject(net, err, skb, xmitq);
2118 err = TIPC_OK;
2119 continue;
2120 }
2121 __skb_queue_tail(&sk->sk_receive_queue, skb);
2122 skb_set_owner_r(skb, sk);
2123 sk->sk_data_ready(sk);
2124 }
2125 }
2126
2127 /**
2128 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2129 * @sk: socket
2130 * @skb: message
2131 *
2132 * Caller must hold socket lock
2133 */
2134 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2135 {
2136 unsigned int before = sk_rmem_alloc_get(sk);
2137 struct sk_buff_head xmitq;
2138 unsigned int added;
2139
2140 __skb_queue_head_init(&xmitq);
2141
2142 tipc_sk_filter_rcv(sk, skb, &xmitq);
2143 added = sk_rmem_alloc_get(sk) - before;
2144 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2145
2146 /* Send pending response/rejected messages, if any */
2147 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2148 return 0;
2149 }
2150
2151 /**
2152 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2153 * inputq and try adding them to socket or backlog queue
2154 * @inputq: list of incoming buffers with potentially different destinations
2155 * @sk: socket where the buffers should be enqueued
2156 * @dport: port number for the socket
2157 *
2158 * Caller must hold socket lock
2159 */
2160 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2161 u32 dport, struct sk_buff_head *xmitq)
2162 {
2163 unsigned long time_limit = jiffies + 2;
2164 struct sk_buff *skb;
2165 unsigned int lim;
2166 atomic_t *dcnt;
2167 u32 onode;
2168
2169 while (skb_queue_len(inputq)) {
2170 if (unlikely(time_after_eq(jiffies, time_limit)))
2171 return;
2172
2173 skb = tipc_skb_dequeue(inputq, dport);
2174 if (unlikely(!skb))
2175 return;
2176
2177 /* Add message directly to receive queue if possible */
2178 if (!sock_owned_by_user(sk)) {
2179 tipc_sk_filter_rcv(sk, skb, xmitq);
2180 continue;
2181 }
2182
2183 /* Try backlog, compensating for double-counted bytes */
2184 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2185 if (!sk->sk_backlog.len)
2186 atomic_set(dcnt, 0);
2187 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2188 if (likely(!sk_add_backlog(sk, skb, lim)))
2189 continue;
2190
2191 /* Overload => reject message back to sender */
2192 onode = tipc_own_addr(sock_net(sk));
2193 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2194 __skb_queue_tail(xmitq, skb);
2195 break;
2196 }
2197 }
2198
2199 /**
2200 * tipc_sk_rcv - handle a chain of incoming buffers
2201 * @inputq: buffer list containing the buffers
2202 * Consumes all buffers in list until inputq is empty
2203 * Note: may be called in multiple threads referring to the same queue
2204 */
2205 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2206 {
2207 struct sk_buff_head xmitq;
2208 u32 dnode, dport = 0;
2209 int err;
2210 struct tipc_sock *tsk;
2211 struct sock *sk;
2212 struct sk_buff *skb;
2213
2214 __skb_queue_head_init(&xmitq);
2215 while (skb_queue_len(inputq)) {
2216 dport = tipc_skb_peek_port(inputq, dport);
2217 tsk = tipc_sk_lookup(net, dport);
2218
2219 if (likely(tsk)) {
2220 sk = &tsk->sk;
2221 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2222 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2223 spin_unlock_bh(&sk->sk_lock.slock);
2224 }
2225 /* Send pending response/rejected messages, if any */
2226 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2227 sock_put(sk);
2228 continue;
2229 }
2230 /* No destination socket => dequeue skb if still there */
2231 skb = tipc_skb_dequeue(inputq, dport);
2232 if (!skb)
2233 return;
2234
2235 /* Try secondary lookup if unresolved named message */
2236 err = TIPC_ERR_NO_PORT;
2237 if (tipc_msg_lookup_dest(net, skb, &err))
2238 goto xmit;
2239
2240 /* Prepare for message rejection */
2241 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2242 continue;
2243 xmit:
2244 dnode = msg_destnode(buf_msg(skb));
2245 tipc_node_xmit_skb(net, skb, dnode, dport);
2246 }
2247 }
2248
2249 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2250 {
2251 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2252 struct sock *sk = sock->sk;
2253 int done;
2254
2255 do {
2256 int err = sock_error(sk);
2257 if (err)
2258 return err;
2259 if (!*timeo_p)
2260 return -ETIMEDOUT;
2261 if (signal_pending(current))
2262 return sock_intr_errno(*timeo_p);
2263
2264 add_wait_queue(sk_sleep(sk), &wait);
2265 done = sk_wait_event(sk, timeo_p,
2266 sk->sk_state != TIPC_CONNECTING, &wait);
2267 remove_wait_queue(sk_sleep(sk), &wait);
2268 } while (!done);
2269 return 0;
2270 }
2271
2272 /**
2273 * tipc_connect - establish a connection to another TIPC port
2274 * @sock: socket structure
2275 * @dest: socket address for destination port
2276 * @destlen: size of socket address data structure
2277 * @flags: file-related flags associated with socket
2278 *
2279 * Returns 0 on success, errno otherwise
2280 */
2281 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2282 int destlen, int flags)
2283 {
2284 struct sock *sk = sock->sk;
2285 struct tipc_sock *tsk = tipc_sk(sk);
2286 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2287 struct msghdr m = {NULL,};
2288 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2289 int previous;
2290 int res = 0;
2291
2292 if (destlen != sizeof(struct sockaddr_tipc))
2293 return -EINVAL;
2294
2295 lock_sock(sk);
2296
2297 if (tsk->group) {
2298 res = -EINVAL;
2299 goto exit;
2300 }
2301
2302 if (dst->family == AF_UNSPEC) {
2303 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2304 if (!tipc_sk_type_connectionless(sk))
2305 res = -EINVAL;
2306 goto exit;
2307 } else if (dst->family != AF_TIPC) {
2308 res = -EINVAL;
2309 }
2310 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2311 res = -EINVAL;
2312 if (res)
2313 goto exit;
2314
2315 /* DGRAM/RDM connect(), just save the destaddr */
2316 if (tipc_sk_type_connectionless(sk)) {
2317 memcpy(&tsk->peer, dest, destlen);
2318 goto exit;
2319 }
2320
2321 previous = sk->sk_state;
2322
2323 switch (sk->sk_state) {
2324 case TIPC_OPEN:
2325 /* Send a 'SYN-' to destination */
2326 m.msg_name = dest;
2327 m.msg_namelen = destlen;
2328
2329 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2330 * indicate send_msg() is never blocked.
2331 */
2332 if (!timeout)
2333 m.msg_flags = MSG_DONTWAIT;
2334
2335 res = __tipc_sendmsg(sock, &m, 0);
2336 if ((res < 0) && (res != -EWOULDBLOCK))
2337 goto exit;
2338
2339 /* Just entered TIPC_CONNECTING state; the only
2340 * difference is that return value in non-blocking
2341 * case is EINPROGRESS, rather than EALREADY.
2342 */
2343 res = -EINPROGRESS;
2344 /* fall thru' */
2345 case TIPC_CONNECTING:
2346 if (!timeout) {
2347 if (previous == TIPC_CONNECTING)
2348 res = -EALREADY;
2349 goto exit;
2350 }
2351 timeout = msecs_to_jiffies(timeout);
2352 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2353 res = tipc_wait_for_connect(sock, &timeout);
2354 break;
2355 case TIPC_ESTABLISHED:
2356 res = -EISCONN;
2357 break;
2358 default:
2359 res = -EINVAL;
2360 }
2361
2362 exit:
2363 release_sock(sk);
2364 return res;
2365 }
2366
2367 /**
2368 * tipc_listen - allow socket to listen for incoming connections
2369 * @sock: socket structure
2370 * @len: (unused)
2371 *
2372 * Returns 0 on success, errno otherwise
2373 */
2374 static int tipc_listen(struct socket *sock, int len)
2375 {
2376 struct sock *sk = sock->sk;
2377 int res;
2378
2379 lock_sock(sk);
2380 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2381 release_sock(sk);
2382
2383 return res;
2384 }
2385
2386 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2387 {
2388 struct sock *sk = sock->sk;
2389 DEFINE_WAIT(wait);
2390 int err;
2391
2392 /* True wake-one mechanism for incoming connections: only
2393 * one process gets woken up, not the 'whole herd'.
2394 * Since we do not 'race & poll' for established sockets
2395 * anymore, the common case will execute the loop only once.
2396 */
2397 for (;;) {
2398 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2399 TASK_INTERRUPTIBLE);
2400 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2401 release_sock(sk);
2402 timeo = schedule_timeout(timeo);
2403 lock_sock(sk);
2404 }
2405 err = 0;
2406 if (!skb_queue_empty(&sk->sk_receive_queue))
2407 break;
2408 err = -EAGAIN;
2409 if (!timeo)
2410 break;
2411 err = sock_intr_errno(timeo);
2412 if (signal_pending(current))
2413 break;
2414 }
2415 finish_wait(sk_sleep(sk), &wait);
2416 return err;
2417 }
2418
2419 /**
2420 * tipc_accept - wait for connection request
2421 * @sock: listening socket
2422 * @newsock: new socket that is to be connected
2423 * @flags: file-related flags associated with socket
2424 *
2425 * Returns 0 on success, errno otherwise
2426 */
2427 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2428 bool kern)
2429 {
2430 struct sock *new_sk, *sk = sock->sk;
2431 struct sk_buff *buf;
2432 struct tipc_sock *new_tsock;
2433 struct tipc_msg *msg;
2434 long timeo;
2435 int res;
2436
2437 lock_sock(sk);
2438
2439 if (sk->sk_state != TIPC_LISTEN) {
2440 res = -EINVAL;
2441 goto exit;
2442 }
2443 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2444 res = tipc_wait_for_accept(sock, timeo);
2445 if (res)
2446 goto exit;
2447
2448 buf = skb_peek(&sk->sk_receive_queue);
2449
2450 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2451 if (res)
2452 goto exit;
2453 security_sk_clone(sock->sk, new_sock->sk);
2454
2455 new_sk = new_sock->sk;
2456 new_tsock = tipc_sk(new_sk);
2457 msg = buf_msg(buf);
2458
2459 /* we lock on new_sk; but lockdep sees the lock on sk */
2460 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2461
2462 /*
2463 * Reject any stray messages received by new socket
2464 * before the socket lock was taken (very, very unlikely)
2465 */
2466 tsk_rej_rx_queue(new_sk);
2467
2468 /* Connect new socket to it's peer */
2469 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2470
2471 tsk_set_importance(new_tsock, msg_importance(msg));
2472 if (msg_named(msg)) {
2473 new_tsock->conn_type = msg_nametype(msg);
2474 new_tsock->conn_instance = msg_nameinst(msg);
2475 }
2476
2477 /*
2478 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2479 * Respond to 'SYN+' by queuing it on new socket.
2480 */
2481 if (!msg_data_sz(msg)) {
2482 struct msghdr m = {NULL,};
2483
2484 tsk_advance_rx_queue(sk);
2485 __tipc_sendstream(new_sock, &m, 0);
2486 } else {
2487 __skb_dequeue(&sk->sk_receive_queue);
2488 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2489 skb_set_owner_r(buf, new_sk);
2490 }
2491 release_sock(new_sk);
2492 exit:
2493 release_sock(sk);
2494 return res;
2495 }
2496
2497 /**
2498 * tipc_shutdown - shutdown socket connection
2499 * @sock: socket structure
2500 * @how: direction to close (must be SHUT_RDWR)
2501 *
2502 * Terminates connection (if necessary), then purges socket's receive queue.
2503 *
2504 * Returns 0 on success, errno otherwise
2505 */
2506 static int tipc_shutdown(struct socket *sock, int how)
2507 {
2508 struct sock *sk = sock->sk;
2509 int res;
2510
2511 if (how != SHUT_RDWR)
2512 return -EINVAL;
2513
2514 lock_sock(sk);
2515
2516 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2517 sk->sk_shutdown = SEND_SHUTDOWN;
2518
2519 if (sk->sk_state == TIPC_DISCONNECTING) {
2520 /* Discard any unreceived messages */
2521 __skb_queue_purge(&sk->sk_receive_queue);
2522
2523 /* Wake up anyone sleeping in poll */
2524 sk->sk_state_change(sk);
2525 res = 0;
2526 } else {
2527 res = -ENOTCONN;
2528 }
2529
2530 release_sock(sk);
2531 return res;
2532 }
2533
2534 static void tipc_sk_timeout(struct timer_list *t)
2535 {
2536 struct sock *sk = from_timer(sk, t, sk_timer);
2537 struct tipc_sock *tsk = tipc_sk(sk);
2538 u32 peer_port = tsk_peer_port(tsk);
2539 u32 peer_node = tsk_peer_node(tsk);
2540 u32 own_node = tsk_own_node(tsk);
2541 u32 own_port = tsk->portid;
2542 struct net *net = sock_net(sk);
2543 struct sk_buff *skb = NULL;
2544
2545 bh_lock_sock(sk);
2546 if (!tipc_sk_connected(sk))
2547 goto exit;
2548
2549 /* Try again later if socket is busy */
2550 if (sock_owned_by_user(sk)) {
2551 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2552 goto exit;
2553 }
2554
2555 if (tsk->probe_unacked) {
2556 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2557 tipc_node_remove_conn(net, peer_node, peer_port);
2558 sk->sk_state_change(sk);
2559 goto exit;
2560 }
2561 /* Send new probe */
2562 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2563 peer_node, own_node, peer_port, own_port,
2564 TIPC_OK);
2565 tsk->probe_unacked = true;
2566 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2567 exit:
2568 bh_unlock_sock(sk);
2569 if (skb)
2570 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2571 sock_put(sk);
2572 }
2573
2574 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2575 struct tipc_name_seq const *seq)
2576 {
2577 struct sock *sk = &tsk->sk;
2578 struct net *net = sock_net(sk);
2579 struct publication *publ;
2580 u32 key;
2581
2582 if (tipc_sk_connected(sk))
2583 return -EINVAL;
2584 key = tsk->portid + tsk->pub_count + 1;
2585 if (key == tsk->portid)
2586 return -EADDRINUSE;
2587
2588 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2589 scope, tsk->portid, key);
2590 if (unlikely(!publ))
2591 return -EINVAL;
2592
2593 list_add(&publ->pport_list, &tsk->publications);
2594 tsk->pub_count++;
2595 tsk->published = 1;
2596 return 0;
2597 }
2598
2599 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2600 struct tipc_name_seq const *seq)
2601 {
2602 struct net *net = sock_net(&tsk->sk);
2603 struct publication *publ;
2604 struct publication *safe;
2605 int rc = -EINVAL;
2606
2607 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2608 if (seq) {
2609 if (publ->scope != scope)
2610 continue;
2611 if (publ->type != seq->type)
2612 continue;
2613 if (publ->lower != seq->lower)
2614 continue;
2615 if (publ->upper != seq->upper)
2616 break;
2617 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2618 publ->ref, publ->key);
2619 rc = 0;
2620 break;
2621 }
2622 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2623 publ->ref, publ->key);
2624 rc = 0;
2625 }
2626 if (list_empty(&tsk->publications))
2627 tsk->published = 0;
2628 return rc;
2629 }
2630
2631 /* tipc_sk_reinit: set non-zero address in all existing sockets
2632 * when we go from standalone to network mode.
2633 */
2634 void tipc_sk_reinit(struct net *net)
2635 {
2636 struct tipc_net *tn = net_generic(net, tipc_net_id);
2637 struct rhashtable_iter iter;
2638 struct tipc_sock *tsk;
2639 struct tipc_msg *msg;
2640
2641 rhashtable_walk_enter(&tn->sk_rht, &iter);
2642
2643 do {
2644 tsk = ERR_PTR(rhashtable_walk_start(&iter));
2645 if (IS_ERR(tsk))
2646 goto walk_stop;
2647
2648 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2649 spin_lock_bh(&tsk->sk.sk_lock.slock);
2650 msg = &tsk->phdr;
2651 msg_set_prevnode(msg, tn->own_addr);
2652 msg_set_orignode(msg, tn->own_addr);
2653 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2654 }
2655 walk_stop:
2656 rhashtable_walk_stop(&iter);
2657 } while (tsk == ERR_PTR(-EAGAIN));
2658 }
2659
2660 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2661 {
2662 struct tipc_net *tn = net_generic(net, tipc_net_id);
2663 struct tipc_sock *tsk;
2664
2665 rcu_read_lock();
2666 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2667 if (tsk)
2668 sock_hold(&tsk->sk);
2669 rcu_read_unlock();
2670
2671 return tsk;
2672 }
2673
2674 static int tipc_sk_insert(struct tipc_sock *tsk)
2675 {
2676 struct sock *sk = &tsk->sk;
2677 struct net *net = sock_net(sk);
2678 struct tipc_net *tn = net_generic(net, tipc_net_id);
2679 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2680 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2681
2682 while (remaining--) {
2683 portid++;
2684 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2685 portid = TIPC_MIN_PORT;
2686 tsk->portid = portid;
2687 sock_hold(&tsk->sk);
2688 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2689 tsk_rht_params))
2690 return 0;
2691 sock_put(&tsk->sk);
2692 }
2693
2694 return -1;
2695 }
2696
2697 static void tipc_sk_remove(struct tipc_sock *tsk)
2698 {
2699 struct sock *sk = &tsk->sk;
2700 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2701
2702 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2703 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2704 __sock_put(sk);
2705 }
2706 }
2707
2708 static const struct rhashtable_params tsk_rht_params = {
2709 .nelem_hint = 192,
2710 .head_offset = offsetof(struct tipc_sock, node),
2711 .key_offset = offsetof(struct tipc_sock, portid),
2712 .key_len = sizeof(u32), /* portid */
2713 .max_size = 1048576,
2714 .min_size = 256,
2715 .automatic_shrinking = true,
2716 };
2717
2718 int tipc_sk_rht_init(struct net *net)
2719 {
2720 struct tipc_net *tn = net_generic(net, tipc_net_id);
2721
2722 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2723 }
2724
2725 void tipc_sk_rht_destroy(struct net *net)
2726 {
2727 struct tipc_net *tn = net_generic(net, tipc_net_id);
2728
2729 /* Wait for socket readers to complete */
2730 synchronize_net();
2731
2732 rhashtable_destroy(&tn->sk_rht);
2733 }
2734
2735 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2736 {
2737 struct net *net = sock_net(&tsk->sk);
2738 u32 domain = addr_domain(net, mreq->scope);
2739 struct tipc_group *grp = tsk->group;
2740 struct tipc_msg *hdr = &tsk->phdr;
2741 struct tipc_name_seq seq;
2742 int rc;
2743
2744 if (mreq->type < TIPC_RESERVED_TYPES)
2745 return -EACCES;
2746 if (grp)
2747 return -EACCES;
2748 grp = tipc_group_create(net, tsk->portid, mreq);
2749 if (!grp)
2750 return -ENOMEM;
2751 tsk->group = grp;
2752 msg_set_lookup_scope(hdr, mreq->scope);
2753 msg_set_nametype(hdr, mreq->type);
2754 msg_set_dest_droppable(hdr, true);
2755 seq.type = mreq->type;
2756 seq.lower = mreq->instance;
2757 seq.upper = seq.lower;
2758 tipc_nametbl_build_group(net, grp, mreq->type, domain);
2759 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2760 if (rc) {
2761 tipc_group_delete(net, grp);
2762 tsk->group = NULL;
2763 }
2764
2765 /* Eliminate any risk that a broadcast overtakes the sent JOIN */
2766 tsk->mc_method.rcast = true;
2767 tsk->mc_method.mandatory = true;
2768 return rc;
2769 }
2770
2771 static int tipc_sk_leave(struct tipc_sock *tsk)
2772 {
2773 struct net *net = sock_net(&tsk->sk);
2774 struct tipc_group *grp = tsk->group;
2775 struct tipc_name_seq seq;
2776 int scope;
2777
2778 if (!grp)
2779 return -EINVAL;
2780 tipc_group_self(grp, &seq, &scope);
2781 tipc_group_delete(net, grp);
2782 tsk->group = NULL;
2783 tipc_sk_withdraw(tsk, scope, &seq);
2784 return 0;
2785 }
2786
2787 /**
2788 * tipc_setsockopt - set socket option
2789 * @sock: socket structure
2790 * @lvl: option level
2791 * @opt: option identifier
2792 * @ov: pointer to new option value
2793 * @ol: length of option value
2794 *
2795 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2796 * (to ease compatibility).
2797 *
2798 * Returns 0 on success, errno otherwise
2799 */
2800 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2801 char __user *ov, unsigned int ol)
2802 {
2803 struct sock *sk = sock->sk;
2804 struct tipc_sock *tsk = tipc_sk(sk);
2805 struct tipc_group_req mreq;
2806 u32 value = 0;
2807 int res = 0;
2808
2809 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2810 return 0;
2811 if (lvl != SOL_TIPC)
2812 return -ENOPROTOOPT;
2813
2814 switch (opt) {
2815 case TIPC_IMPORTANCE:
2816 case TIPC_SRC_DROPPABLE:
2817 case TIPC_DEST_DROPPABLE:
2818 case TIPC_CONN_TIMEOUT:
2819 if (ol < sizeof(value))
2820 return -EINVAL;
2821 if (get_user(value, (u32 __user *)ov))
2822 return -EFAULT;
2823 break;
2824 case TIPC_GROUP_JOIN:
2825 if (ol < sizeof(mreq))
2826 return -EINVAL;
2827 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2828 return -EFAULT;
2829 break;
2830 default:
2831 if (ov || ol)
2832 return -EINVAL;
2833 }
2834
2835 lock_sock(sk);
2836
2837 switch (opt) {
2838 case TIPC_IMPORTANCE:
2839 res = tsk_set_importance(tsk, value);
2840 break;
2841 case TIPC_SRC_DROPPABLE:
2842 if (sock->type != SOCK_STREAM)
2843 tsk_set_unreliable(tsk, value);
2844 else
2845 res = -ENOPROTOOPT;
2846 break;
2847 case TIPC_DEST_DROPPABLE:
2848 tsk_set_unreturnable(tsk, value);
2849 break;
2850 case TIPC_CONN_TIMEOUT:
2851 tipc_sk(sk)->conn_timeout = value;
2852 break;
2853 case TIPC_MCAST_BROADCAST:
2854 tsk->mc_method.rcast = false;
2855 tsk->mc_method.mandatory = true;
2856 break;
2857 case TIPC_MCAST_REPLICAST:
2858 tsk->mc_method.rcast = true;
2859 tsk->mc_method.mandatory = true;
2860 break;
2861 case TIPC_GROUP_JOIN:
2862 res = tipc_sk_join(tsk, &mreq);
2863 break;
2864 case TIPC_GROUP_LEAVE:
2865 res = tipc_sk_leave(tsk);
2866 break;
2867 default:
2868 res = -EINVAL;
2869 }
2870
2871 release_sock(sk);
2872
2873 return res;
2874 }
2875
2876 /**
2877 * tipc_getsockopt - get socket option
2878 * @sock: socket structure
2879 * @lvl: option level
2880 * @opt: option identifier
2881 * @ov: receptacle for option value
2882 * @ol: receptacle for length of option value
2883 *
2884 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2885 * (to ease compatibility).
2886 *
2887 * Returns 0 on success, errno otherwise
2888 */
2889 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2890 char __user *ov, int __user *ol)
2891 {
2892 struct sock *sk = sock->sk;
2893 struct tipc_sock *tsk = tipc_sk(sk);
2894 struct tipc_name_seq seq;
2895 int len, scope;
2896 u32 value;
2897 int res;
2898
2899 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2900 return put_user(0, ol);
2901 if (lvl != SOL_TIPC)
2902 return -ENOPROTOOPT;
2903 res = get_user(len, ol);
2904 if (res)
2905 return res;
2906
2907 lock_sock(sk);
2908
2909 switch (opt) {
2910 case TIPC_IMPORTANCE:
2911 value = tsk_importance(tsk);
2912 break;
2913 case TIPC_SRC_DROPPABLE:
2914 value = tsk_unreliable(tsk);
2915 break;
2916 case TIPC_DEST_DROPPABLE:
2917 value = tsk_unreturnable(tsk);
2918 break;
2919 case TIPC_CONN_TIMEOUT:
2920 value = tsk->conn_timeout;
2921 /* no need to set "res", since already 0 at this point */
2922 break;
2923 case TIPC_NODE_RECVQ_DEPTH:
2924 value = 0; /* was tipc_queue_size, now obsolete */
2925 break;
2926 case TIPC_SOCK_RECVQ_DEPTH:
2927 value = skb_queue_len(&sk->sk_receive_queue);
2928 break;
2929 case TIPC_GROUP_JOIN:
2930 seq.type = 0;
2931 if (tsk->group)
2932 tipc_group_self(tsk->group, &seq, &scope);
2933 value = seq.type;
2934 break;
2935 default:
2936 res = -EINVAL;
2937 }
2938
2939 release_sock(sk);
2940
2941 if (res)
2942 return res; /* "get" failed */
2943
2944 if (len < sizeof(value))
2945 return -EINVAL;
2946
2947 if (copy_to_user(ov, &value, sizeof(value)))
2948 return -EFAULT;
2949
2950 return put_user(sizeof(value), ol);
2951 }
2952
2953 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2954 {
2955 struct sock *sk = sock->sk;
2956 struct tipc_sioc_ln_req lnr;
2957 void __user *argp = (void __user *)arg;
2958
2959 switch (cmd) {
2960 case SIOCGETLINKNAME:
2961 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2962 return -EFAULT;
2963 if (!tipc_node_get_linkname(sock_net(sk),
2964 lnr.bearer_id & 0xffff, lnr.peer,
2965 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2966 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2967 return -EFAULT;
2968 return 0;
2969 }
2970 return -EADDRNOTAVAIL;
2971 default:
2972 return -ENOIOCTLCMD;
2973 }
2974 }
2975
2976 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
2977 {
2978 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
2979 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
2980 u32 onode = tipc_own_addr(sock_net(sock1->sk));
2981
2982 tsk1->peer.family = AF_TIPC;
2983 tsk1->peer.addrtype = TIPC_ADDR_ID;
2984 tsk1->peer.scope = TIPC_NODE_SCOPE;
2985 tsk1->peer.addr.id.ref = tsk2->portid;
2986 tsk1->peer.addr.id.node = onode;
2987 tsk2->peer.family = AF_TIPC;
2988 tsk2->peer.addrtype = TIPC_ADDR_ID;
2989 tsk2->peer.scope = TIPC_NODE_SCOPE;
2990 tsk2->peer.addr.id.ref = tsk1->portid;
2991 tsk2->peer.addr.id.node = onode;
2992
2993 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
2994 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
2995 return 0;
2996 }
2997
2998 /* Protocol switches for the various types of TIPC sockets */
2999
3000 static const struct proto_ops msg_ops = {
3001 .owner = THIS_MODULE,
3002 .family = AF_TIPC,
3003 .release = tipc_release,
3004 .bind = tipc_bind,
3005 .connect = tipc_connect,
3006 .socketpair = tipc_socketpair,
3007 .accept = sock_no_accept,
3008 .getname = tipc_getname,
3009 .poll = tipc_poll,
3010 .ioctl = tipc_ioctl,
3011 .listen = sock_no_listen,
3012 .shutdown = tipc_shutdown,
3013 .setsockopt = tipc_setsockopt,
3014 .getsockopt = tipc_getsockopt,
3015 .sendmsg = tipc_sendmsg,
3016 .recvmsg = tipc_recvmsg,
3017 .mmap = sock_no_mmap,
3018 .sendpage = sock_no_sendpage
3019 };
3020
3021 static const struct proto_ops packet_ops = {
3022 .owner = THIS_MODULE,
3023 .family = AF_TIPC,
3024 .release = tipc_release,
3025 .bind = tipc_bind,
3026 .connect = tipc_connect,
3027 .socketpair = tipc_socketpair,
3028 .accept = tipc_accept,
3029 .getname = tipc_getname,
3030 .poll = tipc_poll,
3031 .ioctl = tipc_ioctl,
3032 .listen = tipc_listen,
3033 .shutdown = tipc_shutdown,
3034 .setsockopt = tipc_setsockopt,
3035 .getsockopt = tipc_getsockopt,
3036 .sendmsg = tipc_send_packet,
3037 .recvmsg = tipc_recvmsg,
3038 .mmap = sock_no_mmap,
3039 .sendpage = sock_no_sendpage
3040 };
3041
3042 static const struct proto_ops stream_ops = {
3043 .owner = THIS_MODULE,
3044 .family = AF_TIPC,
3045 .release = tipc_release,
3046 .bind = tipc_bind,
3047 .connect = tipc_connect,
3048 .socketpair = tipc_socketpair,
3049 .accept = tipc_accept,
3050 .getname = tipc_getname,
3051 .poll = tipc_poll,
3052 .ioctl = tipc_ioctl,
3053 .listen = tipc_listen,
3054 .shutdown = tipc_shutdown,
3055 .setsockopt = tipc_setsockopt,
3056 .getsockopt = tipc_getsockopt,
3057 .sendmsg = tipc_sendstream,
3058 .recvmsg = tipc_recvstream,
3059 .mmap = sock_no_mmap,
3060 .sendpage = sock_no_sendpage
3061 };
3062
3063 static const struct net_proto_family tipc_family_ops = {
3064 .owner = THIS_MODULE,
3065 .family = AF_TIPC,
3066 .create = tipc_sk_create
3067 };
3068
3069 static struct proto tipc_proto = {
3070 .name = "TIPC",
3071 .owner = THIS_MODULE,
3072 .obj_size = sizeof(struct tipc_sock),
3073 .sysctl_rmem = sysctl_tipc_rmem
3074 };
3075
3076 /**
3077 * tipc_socket_init - initialize TIPC socket interface
3078 *
3079 * Returns 0 on success, errno otherwise
3080 */
3081 int tipc_socket_init(void)
3082 {
3083 int res;
3084
3085 res = proto_register(&tipc_proto, 1);
3086 if (res) {
3087 pr_err("Failed to register TIPC protocol type\n");
3088 goto out;
3089 }
3090
3091 res = sock_register(&tipc_family_ops);
3092 if (res) {
3093 pr_err("Failed to register TIPC socket type\n");
3094 proto_unregister(&tipc_proto);
3095 goto out;
3096 }
3097 out:
3098 return res;
3099 }
3100
3101 /**
3102 * tipc_socket_stop - stop TIPC socket interface
3103 */
3104 void tipc_socket_stop(void)
3105 {
3106 sock_unregister(tipc_family_ops.family);
3107 proto_unregister(&tipc_proto);
3108 }
3109
3110 /* Caller should hold socket lock for the passed tipc socket. */
3111 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3112 {
3113 u32 peer_node;
3114 u32 peer_port;
3115 struct nlattr *nest;
3116
3117 peer_node = tsk_peer_node(tsk);
3118 peer_port = tsk_peer_port(tsk);
3119
3120 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3121
3122 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3123 goto msg_full;
3124 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3125 goto msg_full;
3126
3127 if (tsk->conn_type != 0) {
3128 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3129 goto msg_full;
3130 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3131 goto msg_full;
3132 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3133 goto msg_full;
3134 }
3135 nla_nest_end(skb, nest);
3136
3137 return 0;
3138
3139 msg_full:
3140 nla_nest_cancel(skb, nest);
3141
3142 return -EMSGSIZE;
3143 }
3144
3145 /* Caller should hold socket lock for the passed tipc socket. */
3146 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3147 struct tipc_sock *tsk)
3148 {
3149 int err;
3150 void *hdr;
3151 struct nlattr *attrs;
3152 struct net *net = sock_net(skb->sk);
3153 struct tipc_net *tn = net_generic(net, tipc_net_id);
3154 struct sock *sk = &tsk->sk;
3155
3156 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3157 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3158 if (!hdr)
3159 goto msg_cancel;
3160
3161 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3162 if (!attrs)
3163 goto genlmsg_cancel;
3164 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
3165 goto attr_msg_cancel;
3166 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
3167 goto attr_msg_cancel;
3168
3169 if (tipc_sk_connected(sk)) {
3170 err = __tipc_nl_add_sk_con(skb, tsk);
3171 if (err)
3172 goto attr_msg_cancel;
3173 } else if (!list_empty(&tsk->publications)) {
3174 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3175 goto attr_msg_cancel;
3176 }
3177 nla_nest_end(skb, attrs);
3178 genlmsg_end(skb, hdr);
3179
3180 return 0;
3181
3182 attr_msg_cancel:
3183 nla_nest_cancel(skb, attrs);
3184 genlmsg_cancel:
3185 genlmsg_cancel(skb, hdr);
3186 msg_cancel:
3187 return -EMSGSIZE;
3188 }
3189
3190 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3191 {
3192 int err;
3193 struct tipc_sock *tsk;
3194 const struct bucket_table *tbl;
3195 struct rhash_head *pos;
3196 struct net *net = sock_net(skb->sk);
3197 struct tipc_net *tn = net_generic(net, tipc_net_id);
3198 u32 tbl_id = cb->args[0];
3199 u32 prev_portid = cb->args[1];
3200
3201 rcu_read_lock();
3202 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
3203 for (; tbl_id < tbl->size; tbl_id++) {
3204 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
3205 spin_lock_bh(&tsk->sk.sk_lock.slock);
3206 if (prev_portid && prev_portid != tsk->portid) {
3207 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3208 continue;
3209 }
3210
3211 err = __tipc_nl_add_sk(skb, cb, tsk);
3212 if (err) {
3213 prev_portid = tsk->portid;
3214 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3215 goto out;
3216 }
3217 prev_portid = 0;
3218 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3219 }
3220 }
3221 out:
3222 rcu_read_unlock();
3223 cb->args[0] = tbl_id;
3224 cb->args[1] = prev_portid;
3225
3226 return skb->len;
3227 }
3228
3229 /* Caller should hold socket lock for the passed tipc socket. */
3230 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3231 struct netlink_callback *cb,
3232 struct publication *publ)
3233 {
3234 void *hdr;
3235 struct nlattr *attrs;
3236
3237 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3238 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3239 if (!hdr)
3240 goto msg_cancel;
3241
3242 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3243 if (!attrs)
3244 goto genlmsg_cancel;
3245
3246 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3247 goto attr_msg_cancel;
3248 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3249 goto attr_msg_cancel;
3250 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3251 goto attr_msg_cancel;
3252 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3253 goto attr_msg_cancel;
3254
3255 nla_nest_end(skb, attrs);
3256 genlmsg_end(skb, hdr);
3257
3258 return 0;
3259
3260 attr_msg_cancel:
3261 nla_nest_cancel(skb, attrs);
3262 genlmsg_cancel:
3263 genlmsg_cancel(skb, hdr);
3264 msg_cancel:
3265 return -EMSGSIZE;
3266 }
3267
3268 /* Caller should hold socket lock for the passed tipc socket. */
3269 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3270 struct netlink_callback *cb,
3271 struct tipc_sock *tsk, u32 *last_publ)
3272 {
3273 int err;
3274 struct publication *p;
3275
3276 if (*last_publ) {
3277 list_for_each_entry(p, &tsk->publications, pport_list) {
3278 if (p->key == *last_publ)
3279 break;
3280 }
3281 if (p->key != *last_publ) {
3282 /* We never set seq or call nl_dump_check_consistent()
3283 * this means that setting prev_seq here will cause the
3284 * consistence check to fail in the netlink callback
3285 * handler. Resulting in the last NLMSG_DONE message
3286 * having the NLM_F_DUMP_INTR flag set.
3287 */
3288 cb->prev_seq = 1;
3289 *last_publ = 0;
3290 return -EPIPE;
3291 }
3292 } else {
3293 p = list_first_entry(&tsk->publications, struct publication,
3294 pport_list);
3295 }
3296
3297 list_for_each_entry_from(p, &tsk->publications, pport_list) {
3298 err = __tipc_nl_add_sk_publ(skb, cb, p);
3299 if (err) {
3300 *last_publ = p->key;
3301 return err;
3302 }
3303 }
3304 *last_publ = 0;
3305
3306 return 0;
3307 }
3308
3309 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3310 {
3311 int err;
3312 u32 tsk_portid = cb->args[0];
3313 u32 last_publ = cb->args[1];
3314 u32 done = cb->args[2];
3315 struct net *net = sock_net(skb->sk);
3316 struct tipc_sock *tsk;
3317
3318 if (!tsk_portid) {
3319 struct nlattr **attrs;
3320 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3321
3322 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3323 if (err)
3324 return err;
3325
3326 if (!attrs[TIPC_NLA_SOCK])
3327 return -EINVAL;
3328
3329 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3330 attrs[TIPC_NLA_SOCK],
3331 tipc_nl_sock_policy, NULL);
3332 if (err)
3333 return err;
3334
3335 if (!sock[TIPC_NLA_SOCK_REF])
3336 return -EINVAL;
3337
3338 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3339 }
3340
3341 if (done)
3342 return 0;
3343
3344 tsk = tipc_sk_lookup(net, tsk_portid);
3345 if (!tsk)
3346 return -EINVAL;
3347
3348 lock_sock(&tsk->sk);
3349 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3350 if (!err)
3351 done = 1;
3352 release_sock(&tsk->sk);
3353 sock_put(&tsk->sk);
3354
3355 cb->args[0] = tsk_portid;
3356 cb->args[1] = last_publ;
3357 cb->args[2] = done;
3358
3359 return skb->len;
3360 }