]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/tipc/socket.c
Merge tag 'kvm-arm-fixes-for-v4.15-3-v2' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-bionic-kernel.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
39
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_MAX_PORT 0xffffffff
54 #define TIPC_MIN_PORT 1
55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
56
57 enum {
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
63 };
64
65 struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
68 };
69
70 /**
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
83 * @probing_state:
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
93 */
94 struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 uint conn_timeout;
106 atomic_t dupl_rcvcnt;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 };
120
121 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
122 static void tipc_data_ready(struct sock *sk);
123 static void tipc_write_space(struct sock *sk);
124 static void tipc_sock_destruct(struct sock *sk);
125 static int tipc_release(struct socket *sock);
126 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
127 bool kern);
128 static void tipc_sk_timeout(struct timer_list *t);
129 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
130 struct tipc_name_seq const *seq);
131 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
132 struct tipc_name_seq const *seq);
133 static int tipc_sk_leave(struct tipc_sock *tsk);
134 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
135 static int tipc_sk_insert(struct tipc_sock *tsk);
136 static void tipc_sk_remove(struct tipc_sock *tsk);
137 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
138 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
139
140 static const struct proto_ops packet_ops;
141 static const struct proto_ops stream_ops;
142 static const struct proto_ops msg_ops;
143 static struct proto tipc_proto;
144 static const struct rhashtable_params tsk_rht_params;
145
146 static u32 tsk_own_node(struct tipc_sock *tsk)
147 {
148 return msg_prevnode(&tsk->phdr);
149 }
150
151 static u32 tsk_peer_node(struct tipc_sock *tsk)
152 {
153 return msg_destnode(&tsk->phdr);
154 }
155
156 static u32 tsk_peer_port(struct tipc_sock *tsk)
157 {
158 return msg_destport(&tsk->phdr);
159 }
160
161 static bool tsk_unreliable(struct tipc_sock *tsk)
162 {
163 return msg_src_droppable(&tsk->phdr) != 0;
164 }
165
166 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
167 {
168 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
169 }
170
171 static bool tsk_unreturnable(struct tipc_sock *tsk)
172 {
173 return msg_dest_droppable(&tsk->phdr) != 0;
174 }
175
176 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
177 {
178 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
179 }
180
181 static int tsk_importance(struct tipc_sock *tsk)
182 {
183 return msg_importance(&tsk->phdr);
184 }
185
186 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
187 {
188 if (imp > TIPC_CRITICAL_IMPORTANCE)
189 return -EINVAL;
190 msg_set_importance(&tsk->phdr, (u32)imp);
191 return 0;
192 }
193
194 static struct tipc_sock *tipc_sk(const struct sock *sk)
195 {
196 return container_of(sk, struct tipc_sock, sk);
197 }
198
199 static bool tsk_conn_cong(struct tipc_sock *tsk)
200 {
201 return tsk->snt_unacked > tsk->snd_win;
202 }
203
204 static u16 tsk_blocks(int len)
205 {
206 return ((len / FLOWCTL_BLK_SZ) + 1);
207 }
208
209 /* tsk_blocks(): translate a buffer size in bytes to number of
210 * advertisable blocks, taking into account the ratio truesize(len)/len
211 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
212 */
213 static u16 tsk_adv_blocks(int len)
214 {
215 return len / FLOWCTL_BLK_SZ / 4;
216 }
217
218 /* tsk_inc(): increment counter for sent or received data
219 * - If block based flow control is not supported by peer we
220 * fall back to message based ditto, incrementing the counter
221 */
222 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
223 {
224 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
225 return ((msglen / FLOWCTL_BLK_SZ) + 1);
226 return 1;
227 }
228
229 /**
230 * tsk_advance_rx_queue - discard first buffer in socket receive queue
231 *
232 * Caller must hold socket lock
233 */
234 static void tsk_advance_rx_queue(struct sock *sk)
235 {
236 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
237 }
238
239 /* tipc_sk_respond() : send response message back to sender
240 */
241 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
242 {
243 u32 selector;
244 u32 dnode;
245 u32 onode = tipc_own_addr(sock_net(sk));
246
247 if (!tipc_msg_reverse(onode, &skb, err))
248 return;
249
250 dnode = msg_destnode(buf_msg(skb));
251 selector = msg_origport(buf_msg(skb));
252 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
253 }
254
255 /**
256 * tsk_rej_rx_queue - reject all buffers in socket receive queue
257 *
258 * Caller must hold socket lock
259 */
260 static void tsk_rej_rx_queue(struct sock *sk)
261 {
262 struct sk_buff *skb;
263
264 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
265 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
266 }
267
268 static bool tipc_sk_connected(struct sock *sk)
269 {
270 return sk->sk_state == TIPC_ESTABLISHED;
271 }
272
273 /* tipc_sk_type_connectionless - check if the socket is datagram socket
274 * @sk: socket
275 *
276 * Returns true if connection less, false otherwise
277 */
278 static bool tipc_sk_type_connectionless(struct sock *sk)
279 {
280 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
281 }
282
283 /* tsk_peer_msg - verify if message was sent by connected port's peer
284 *
285 * Handles cases where the node's network address has changed from
286 * the default of <0.0.0> to its configured setting.
287 */
288 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
289 {
290 struct sock *sk = &tsk->sk;
291 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
292 u32 peer_port = tsk_peer_port(tsk);
293 u32 orig_node;
294 u32 peer_node;
295
296 if (unlikely(!tipc_sk_connected(sk)))
297 return false;
298
299 if (unlikely(msg_origport(msg) != peer_port))
300 return false;
301
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
304
305 if (likely(orig_node == peer_node))
306 return true;
307
308 if (!orig_node && (peer_node == tn->own_addr))
309 return true;
310
311 if (!peer_node && (orig_node == tn->own_addr))
312 return true;
313
314 return false;
315 }
316
317 /* tipc_set_sk_state - set the sk_state of the socket
318 * @sk: socket
319 *
320 * Caller must hold socket lock
321 *
322 * Returns 0 on success, errno otherwise
323 */
324 static int tipc_set_sk_state(struct sock *sk, int state)
325 {
326 int oldsk_state = sk->sk_state;
327 int res = -EINVAL;
328
329 switch (state) {
330 case TIPC_OPEN:
331 res = 0;
332 break;
333 case TIPC_LISTEN:
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
336 res = 0;
337 break;
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
341 res = 0;
342 break;
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
346 res = 0;
347 break;
348 }
349
350 if (!res)
351 sk->sk_state = state;
352
353 return res;
354 }
355
356 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
357 {
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
361
362 if (err)
363 return err;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
366 return -EPIPE;
367 else if (!tipc_sk_connected(sk))
368 return -ENOTCONN;
369 }
370 if (!*timeout)
371 return -EAGAIN;
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
374
375 return 0;
376 }
377
378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
379 ({ \
380 struct sock *sk_; \
381 int rc_; \
382 \
383 while ((rc_ = !(condition_))) { \
384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
385 sk_ = (sock_)->sk; \
386 rc_ = tipc_sk_sock_err((sock_), timeo_); \
387 if (rc_) \
388 break; \
389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
390 release_sock(sk_); \
391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
392 sched_annotate_sleep(); \
393 lock_sock(sk_); \
394 remove_wait_queue(sk_sleep(sk_), &wait_); \
395 } \
396 rc_; \
397 })
398
399 /**
400 * tipc_sk_create - create a TIPC socket
401 * @net: network namespace (must be default network)
402 * @sock: pre-allocated socket structure
403 * @protocol: protocol indicator (must be 0)
404 * @kern: caused by kernel or by userspace?
405 *
406 * This routine creates additional data structures used by the TIPC socket,
407 * initializes them, and links them together.
408 *
409 * Returns 0 on success, errno otherwise
410 */
411 static int tipc_sk_create(struct net *net, struct socket *sock,
412 int protocol, int kern)
413 {
414 struct tipc_net *tn;
415 const struct proto_ops *ops;
416 struct sock *sk;
417 struct tipc_sock *tsk;
418 struct tipc_msg *msg;
419
420 /* Validate arguments */
421 if (unlikely(protocol != 0))
422 return -EPROTONOSUPPORT;
423
424 switch (sock->type) {
425 case SOCK_STREAM:
426 ops = &stream_ops;
427 break;
428 case SOCK_SEQPACKET:
429 ops = &packet_ops;
430 break;
431 case SOCK_DGRAM:
432 case SOCK_RDM:
433 ops = &msg_ops;
434 break;
435 default:
436 return -EPROTOTYPE;
437 }
438
439 /* Allocate socket's protocol area */
440 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
441 if (sk == NULL)
442 return -ENOMEM;
443
444 tsk = tipc_sk(sk);
445 tsk->max_pkt = MAX_PKT_DEFAULT;
446 INIT_LIST_HEAD(&tsk->publications);
447 INIT_LIST_HEAD(&tsk->cong_links);
448 msg = &tsk->phdr;
449 tn = net_generic(sock_net(sk), tipc_net_id);
450
451 /* Finish initializing socket data structures */
452 sock->ops = ops;
453 sock_init_data(sock, sk);
454 tipc_set_sk_state(sk, TIPC_OPEN);
455 if (tipc_sk_insert(tsk)) {
456 pr_warn("Socket create failed; port number exhausted\n");
457 return -EINVAL;
458 }
459
460 /* Ensure tsk is visible before we read own_addr. */
461 smp_mb();
462
463 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
464 NAMED_H_SIZE, 0);
465
466 msg_set_origport(msg, tsk->portid);
467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
468 sk->sk_shutdown = 0;
469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
470 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
471 sk->sk_data_ready = tipc_data_ready;
472 sk->sk_write_space = tipc_write_space;
473 sk->sk_destruct = tipc_sock_destruct;
474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
475 atomic_set(&tsk->dupl_rcvcnt, 0);
476
477 /* Start out with safe limits until we receive an advertised window */
478 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
479 tsk->rcv_win = tsk->snd_win;
480
481 if (tipc_sk_type_connectionless(sk)) {
482 tsk_set_unreturnable(tsk, true);
483 if (sock->type == SOCK_DGRAM)
484 tsk_set_unreliable(tsk, true);
485 }
486
487 return 0;
488 }
489
490 static void tipc_sk_callback(struct rcu_head *head)
491 {
492 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
493
494 sock_put(&tsk->sk);
495 }
496
497 /* Caller should hold socket lock for the socket. */
498 static void __tipc_shutdown(struct socket *sock, int error)
499 {
500 struct sock *sk = sock->sk;
501 struct tipc_sock *tsk = tipc_sk(sk);
502 struct net *net = sock_net(sk);
503 long timeout = CONN_TIMEOUT_DEFAULT;
504 u32 dnode = tsk_peer_node(tsk);
505 struct sk_buff *skb;
506
507 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
508 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
509 !tsk_conn_cong(tsk)));
510
511 /* Reject all unreceived messages, except on an active connection
512 * (which disconnects locally & sends a 'FIN+' to peer).
513 */
514 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
515 if (TIPC_SKB_CB(skb)->bytes_read) {
516 kfree_skb(skb);
517 continue;
518 }
519 if (!tipc_sk_type_connectionless(sk) &&
520 sk->sk_state != TIPC_DISCONNECTING) {
521 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
522 tipc_node_remove_conn(net, dnode, tsk->portid);
523 }
524 tipc_sk_respond(sk, skb, error);
525 }
526
527 if (tipc_sk_type_connectionless(sk))
528 return;
529
530 if (sk->sk_state != TIPC_DISCONNECTING) {
531 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
532 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
533 tsk_own_node(tsk), tsk_peer_port(tsk),
534 tsk->portid, error);
535 if (skb)
536 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
537 tipc_node_remove_conn(net, dnode, tsk->portid);
538 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
539 }
540 }
541
542 /**
543 * tipc_release - destroy a TIPC socket
544 * @sock: socket to destroy
545 *
546 * This routine cleans up any messages that are still queued on the socket.
547 * For DGRAM and RDM socket types, all queued messages are rejected.
548 * For SEQPACKET and STREAM socket types, the first message is rejected
549 * and any others are discarded. (If the first message on a STREAM socket
550 * is partially-read, it is discarded and the next one is rejected instead.)
551 *
552 * NOTE: Rejected messages are not necessarily returned to the sender! They
553 * are returned or discarded according to the "destination droppable" setting
554 * specified for the message by the sender.
555 *
556 * Returns 0 on success, errno otherwise
557 */
558 static int tipc_release(struct socket *sock)
559 {
560 struct sock *sk = sock->sk;
561 struct tipc_sock *tsk;
562
563 /*
564 * Exit if socket isn't fully initialized (occurs when a failed accept()
565 * releases a pre-allocated child socket that was never used)
566 */
567 if (sk == NULL)
568 return 0;
569
570 tsk = tipc_sk(sk);
571 lock_sock(sk);
572
573 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
574 sk->sk_shutdown = SHUTDOWN_MASK;
575 tipc_sk_leave(tsk);
576 tipc_sk_withdraw(tsk, 0, NULL);
577 sk_stop_timer(sk, &sk->sk_timer);
578 tipc_sk_remove(tsk);
579
580 /* Reject any messages that accumulated in backlog queue */
581 release_sock(sk);
582 tipc_dest_list_purge(&tsk->cong_links);
583 tsk->cong_link_cnt = 0;
584 call_rcu(&tsk->rcu, tipc_sk_callback);
585 sock->sk = NULL;
586
587 return 0;
588 }
589
590 /**
591 * tipc_bind - associate or disassocate TIPC name(s) with a socket
592 * @sock: socket structure
593 * @uaddr: socket address describing name(s) and desired operation
594 * @uaddr_len: size of socket address data structure
595 *
596 * Name and name sequence binding is indicated using a positive scope value;
597 * a negative scope value unbinds the specified name. Specifying no name
598 * (i.e. a socket address length of 0) unbinds all names from the socket.
599 *
600 * Returns 0 on success, errno otherwise
601 *
602 * NOTE: This routine doesn't need to take the socket lock since it doesn't
603 * access any non-constant socket information.
604 */
605 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
606 int uaddr_len)
607 {
608 struct sock *sk = sock->sk;
609 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
610 struct tipc_sock *tsk = tipc_sk(sk);
611 int res = -EINVAL;
612
613 lock_sock(sk);
614 if (unlikely(!uaddr_len)) {
615 res = tipc_sk_withdraw(tsk, 0, NULL);
616 goto exit;
617 }
618 if (tsk->group) {
619 res = -EACCES;
620 goto exit;
621 }
622 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
623 res = -EINVAL;
624 goto exit;
625 }
626 if (addr->family != AF_TIPC) {
627 res = -EAFNOSUPPORT;
628 goto exit;
629 }
630
631 if (addr->addrtype == TIPC_ADDR_NAME)
632 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
633 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
634 res = -EAFNOSUPPORT;
635 goto exit;
636 }
637
638 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
639 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
640 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
641 res = -EACCES;
642 goto exit;
643 }
644
645 res = (addr->scope > 0) ?
646 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
647 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
648 exit:
649 release_sock(sk);
650 return res;
651 }
652
653 /**
654 * tipc_getname - get port ID of socket or peer socket
655 * @sock: socket structure
656 * @uaddr: area for returned socket address
657 * @uaddr_len: area for returned length of socket address
658 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
659 *
660 * Returns 0 on success, errno otherwise
661 *
662 * NOTE: This routine doesn't need to take the socket lock since it only
663 * accesses socket information that is unchanging (or which changes in
664 * a completely predictable manner).
665 */
666 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
667 int *uaddr_len, int peer)
668 {
669 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
670 struct sock *sk = sock->sk;
671 struct tipc_sock *tsk = tipc_sk(sk);
672 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
673
674 memset(addr, 0, sizeof(*addr));
675 if (peer) {
676 if ((!tipc_sk_connected(sk)) &&
677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
678 return -ENOTCONN;
679 addr->addr.id.ref = tsk_peer_port(tsk);
680 addr->addr.id.node = tsk_peer_node(tsk);
681 } else {
682 addr->addr.id.ref = tsk->portid;
683 addr->addr.id.node = tn->own_addr;
684 }
685
686 *uaddr_len = sizeof(*addr);
687 addr->addrtype = TIPC_ADDR_ID;
688 addr->family = AF_TIPC;
689 addr->scope = 0;
690 addr->addr.name.domain = 0;
691
692 return 0;
693 }
694
695 /**
696 * tipc_poll - read and possibly block on pollmask
697 * @file: file structure associated with the socket
698 * @sock: socket for which to calculate the poll bits
699 * @wait: ???
700 *
701 * Returns pollmask value
702 *
703 * COMMENTARY:
704 * It appears that the usual socket locking mechanisms are not useful here
705 * since the pollmask info is potentially out-of-date the moment this routine
706 * exits. TCP and other protocols seem to rely on higher level poll routines
707 * to handle any preventable race conditions, so TIPC will do the same ...
708 *
709 * IMPORTANT: The fact that a read or write operation is indicated does NOT
710 * imply that the operation will succeed, merely that it should be performed
711 * and will not block.
712 */
713 static unsigned int tipc_poll(struct file *file, struct socket *sock,
714 poll_table *wait)
715 {
716 struct sock *sk = sock->sk;
717 struct tipc_sock *tsk = tipc_sk(sk);
718 struct tipc_group *grp = tsk->group;
719 u32 revents = 0;
720
721 sock_poll_wait(file, sk_sleep(sk), wait);
722
723 if (sk->sk_shutdown & RCV_SHUTDOWN)
724 revents |= POLLRDHUP | POLLIN | POLLRDNORM;
725 if (sk->sk_shutdown == SHUTDOWN_MASK)
726 revents |= POLLHUP;
727
728 switch (sk->sk_state) {
729 case TIPC_ESTABLISHED:
730 case TIPC_CONNECTING:
731 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
732 revents |= POLLOUT;
733 /* fall thru' */
734 case TIPC_LISTEN:
735 if (!skb_queue_empty(&sk->sk_receive_queue))
736 revents |= POLLIN | POLLRDNORM;
737 break;
738 case TIPC_OPEN:
739 if (!grp || tipc_group_size(grp))
740 if (!tsk->cong_link_cnt)
741 revents |= POLLOUT;
742 if (!tipc_sk_type_connectionless(sk))
743 break;
744 if (skb_queue_empty(&sk->sk_receive_queue))
745 break;
746 revents |= POLLIN | POLLRDNORM;
747 break;
748 case TIPC_DISCONNECTING:
749 revents = POLLIN | POLLRDNORM | POLLHUP;
750 break;
751 }
752 return revents;
753 }
754
755 /**
756 * tipc_sendmcast - send multicast message
757 * @sock: socket structure
758 * @seq: destination address
759 * @msg: message to send
760 * @dlen: length of data to send
761 * @timeout: timeout to wait for wakeup
762 *
763 * Called from function tipc_sendmsg(), which has done all sanity checks
764 * Returns the number of bytes sent on success, or errno
765 */
766 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
767 struct msghdr *msg, size_t dlen, long timeout)
768 {
769 struct sock *sk = sock->sk;
770 struct tipc_sock *tsk = tipc_sk(sk);
771 struct tipc_msg *hdr = &tsk->phdr;
772 struct net *net = sock_net(sk);
773 int mtu = tipc_bcast_get_mtu(net);
774 struct tipc_mc_method *method = &tsk->mc_method;
775 u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
776 struct sk_buff_head pkts;
777 struct tipc_nlist dsts;
778 int rc;
779
780 if (tsk->group)
781 return -EACCES;
782
783 /* Block or return if any destination link is congested */
784 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
785 if (unlikely(rc))
786 return rc;
787
788 /* Lookup destination nodes */
789 tipc_nlist_init(&dsts, tipc_own_addr(net));
790 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
791 seq->upper, domain, &dsts);
792 if (!dsts.local && !dsts.remote)
793 return -EHOSTUNREACH;
794
795 /* Build message header */
796 msg_set_type(hdr, TIPC_MCAST_MSG);
797 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
798 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
799 msg_set_destport(hdr, 0);
800 msg_set_destnode(hdr, 0);
801 msg_set_nametype(hdr, seq->type);
802 msg_set_namelower(hdr, seq->lower);
803 msg_set_nameupper(hdr, seq->upper);
804
805 /* Build message as chain of buffers */
806 skb_queue_head_init(&pkts);
807 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
808
809 /* Send message if build was successful */
810 if (unlikely(rc == dlen))
811 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
812 &tsk->cong_link_cnt);
813
814 tipc_nlist_purge(&dsts);
815
816 return rc ? rc : dlen;
817 }
818
819 /**
820 * tipc_send_group_msg - send a message to a member in the group
821 * @net: network namespace
822 * @m: message to send
823 * @mb: group member
824 * @dnode: destination node
825 * @dport: destination port
826 * @dlen: total length of message data
827 */
828 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
829 struct msghdr *m, struct tipc_member *mb,
830 u32 dnode, u32 dport, int dlen)
831 {
832 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
833 struct tipc_mc_method *method = &tsk->mc_method;
834 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
835 struct tipc_msg *hdr = &tsk->phdr;
836 struct sk_buff_head pkts;
837 int mtu, rc;
838
839 /* Complete message header */
840 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
841 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
842 msg_set_destport(hdr, dport);
843 msg_set_destnode(hdr, dnode);
844 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
845
846 /* Build message as chain of buffers */
847 skb_queue_head_init(&pkts);
848 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
849 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
850 if (unlikely(rc != dlen))
851 return rc;
852
853 /* Send message */
854 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
855 if (unlikely(rc == -ELINKCONG)) {
856 tipc_dest_push(&tsk->cong_links, dnode, 0);
857 tsk->cong_link_cnt++;
858 }
859
860 /* Update send window */
861 tipc_group_update_member(mb, blks);
862
863 /* A broadcast sent within next EXPIRE period must follow same path */
864 method->rcast = true;
865 method->mandatory = true;
866 return dlen;
867 }
868
869 /**
870 * tipc_send_group_unicast - send message to a member in the group
871 * @sock: socket structure
872 * @m: message to send
873 * @dlen: total length of message data
874 * @timeout: timeout to wait for wakeup
875 *
876 * Called from function tipc_sendmsg(), which has done all sanity checks
877 * Returns the number of bytes sent on success, or errno
878 */
879 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
880 int dlen, long timeout)
881 {
882 struct sock *sk = sock->sk;
883 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
884 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
885 struct tipc_sock *tsk = tipc_sk(sk);
886 struct tipc_group *grp = tsk->group;
887 struct net *net = sock_net(sk);
888 struct tipc_member *mb = NULL;
889 u32 node, port;
890 int rc;
891
892 node = dest->addr.id.node;
893 port = dest->addr.id.ref;
894 if (!port && !node)
895 return -EHOSTUNREACH;
896
897 /* Block or return if destination link or member is congested */
898 rc = tipc_wait_for_cond(sock, &timeout,
899 !tipc_dest_find(&tsk->cong_links, node, 0) &&
900 !tipc_group_cong(grp, node, port, blks, &mb));
901 if (unlikely(rc))
902 return rc;
903
904 if (unlikely(!mb))
905 return -EHOSTUNREACH;
906
907 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
908
909 return rc ? rc : dlen;
910 }
911
912 /**
913 * tipc_send_group_anycast - send message to any member with given identity
914 * @sock: socket structure
915 * @m: message to send
916 * @dlen: total length of message data
917 * @timeout: timeout to wait for wakeup
918 *
919 * Called from function tipc_sendmsg(), which has done all sanity checks
920 * Returns the number of bytes sent on success, or errno
921 */
922 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
923 int dlen, long timeout)
924 {
925 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
926 struct sock *sk = sock->sk;
927 struct tipc_sock *tsk = tipc_sk(sk);
928 struct list_head *cong_links = &tsk->cong_links;
929 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
930 struct tipc_group *grp = tsk->group;
931 struct tipc_member *first = NULL;
932 struct tipc_member *mbr = NULL;
933 struct net *net = sock_net(sk);
934 u32 node, port, exclude;
935 u32 type, inst, domain;
936 struct list_head dsts;
937 int lookups = 0;
938 int dstcnt, rc;
939 bool cong;
940
941 INIT_LIST_HEAD(&dsts);
942
943 type = dest->addr.name.name.type;
944 inst = dest->addr.name.name.instance;
945 domain = addr_domain(net, dest->scope);
946 exclude = tipc_group_exclude(grp);
947
948 while (++lookups < 4) {
949 first = NULL;
950
951 /* Look for a non-congested destination member, if any */
952 while (1) {
953 if (!tipc_nametbl_lookup(net, type, inst, domain, &dsts,
954 &dstcnt, exclude, false))
955 return -EHOSTUNREACH;
956 tipc_dest_pop(&dsts, &node, &port);
957 cong = tipc_group_cong(grp, node, port, blks, &mbr);
958 if (!cong)
959 break;
960 if (mbr == first)
961 break;
962 if (!first)
963 first = mbr;
964 }
965
966 /* Start over if destination was not in member list */
967 if (unlikely(!mbr))
968 continue;
969
970 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
971 break;
972
973 /* Block or return if destination link or member is congested */
974 rc = tipc_wait_for_cond(sock, &timeout,
975 !tipc_dest_find(cong_links, node, 0) &&
976 !tipc_group_cong(grp, node, port,
977 blks, &mbr));
978 if (unlikely(rc))
979 return rc;
980
981 /* Send, unless destination disappeared while waiting */
982 if (likely(mbr))
983 break;
984 }
985
986 if (unlikely(lookups >= 4))
987 return -EHOSTUNREACH;
988
989 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
990
991 return rc ? rc : dlen;
992 }
993
994 /**
995 * tipc_send_group_bcast - send message to all members in communication group
996 * @sk: socket structure
997 * @m: message to send
998 * @dlen: total length of message data
999 * @timeout: timeout to wait for wakeup
1000 *
1001 * Called from function tipc_sendmsg(), which has done all sanity checks
1002 * Returns the number of bytes sent on success, or errno
1003 */
1004 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1005 int dlen, long timeout)
1006 {
1007 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1008 struct sock *sk = sock->sk;
1009 struct net *net = sock_net(sk);
1010 struct tipc_sock *tsk = tipc_sk(sk);
1011 struct tipc_group *grp = tsk->group;
1012 struct tipc_nlist *dsts = tipc_group_dests(grp);
1013 struct tipc_mc_method *method = &tsk->mc_method;
1014 bool ack = method->mandatory && method->rcast;
1015 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1016 struct tipc_msg *hdr = &tsk->phdr;
1017 int mtu = tipc_bcast_get_mtu(net);
1018 struct sk_buff_head pkts;
1019 int rc = -EHOSTUNREACH;
1020
1021 if (!dsts->local && !dsts->remote)
1022 return -EHOSTUNREACH;
1023
1024 /* Block or return if any destination link or member is congested */
1025 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
1026 !tipc_group_bc_cong(grp, blks));
1027 if (unlikely(rc))
1028 return rc;
1029
1030 /* Complete message header */
1031 if (dest) {
1032 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1033 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1034 } else {
1035 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1036 msg_set_nameinst(hdr, 0);
1037 }
1038 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1039 msg_set_destport(hdr, 0);
1040 msg_set_destnode(hdr, 0);
1041 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
1042
1043 /* Avoid getting stuck with repeated forced replicasts */
1044 msg_set_grp_bc_ack_req(hdr, ack);
1045
1046 /* Build message as chain of buffers */
1047 skb_queue_head_init(&pkts);
1048 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1049 if (unlikely(rc != dlen))
1050 return rc;
1051
1052 /* Send message */
1053 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1054 if (unlikely(rc))
1055 return rc;
1056
1057 /* Update broadcast sequence number and send windows */
1058 tipc_group_update_bc_members(tsk->group, blks, ack);
1059
1060 /* Broadcast link is now free to choose method for next broadcast */
1061 method->mandatory = false;
1062 method->expires = jiffies;
1063
1064 return dlen;
1065 }
1066
1067 /**
1068 * tipc_send_group_mcast - send message to all members with given identity
1069 * @sock: socket structure
1070 * @m: message to send
1071 * @dlen: total length of message data
1072 * @timeout: timeout to wait for wakeup
1073 *
1074 * Called from function tipc_sendmsg(), which has done all sanity checks
1075 * Returns the number of bytes sent on success, or errno
1076 */
1077 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1078 int dlen, long timeout)
1079 {
1080 struct sock *sk = sock->sk;
1081 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1082 struct tipc_name_seq *seq = &dest->addr.nameseq;
1083 struct tipc_sock *tsk = tipc_sk(sk);
1084 struct tipc_group *grp = tsk->group;
1085 struct net *net = sock_net(sk);
1086 u32 domain, exclude, dstcnt;
1087 struct list_head dsts;
1088
1089 INIT_LIST_HEAD(&dsts);
1090
1091 if (seq->lower != seq->upper)
1092 return -ENOTSUPP;
1093
1094 domain = addr_domain(net, dest->scope);
1095 exclude = tipc_group_exclude(grp);
1096 if (!tipc_nametbl_lookup(net, seq->type, seq->lower, domain,
1097 &dsts, &dstcnt, exclude, true))
1098 return -EHOSTUNREACH;
1099
1100 if (dstcnt == 1) {
1101 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1102 return tipc_send_group_unicast(sock, m, dlen, timeout);
1103 }
1104
1105 tipc_dest_list_purge(&dsts);
1106 return tipc_send_group_bcast(sock, m, dlen, timeout);
1107 }
1108
1109 /**
1110 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1111 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1112 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1113 *
1114 * Multi-threaded: parallel calls with reference to same queues may occur
1115 */
1116 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1117 struct sk_buff_head *inputq)
1118 {
1119 u32 scope = TIPC_CLUSTER_SCOPE;
1120 u32 self = tipc_own_addr(net);
1121 struct sk_buff *skb, *_skb;
1122 u32 lower = 0, upper = ~0;
1123 struct sk_buff_head tmpq;
1124 u32 portid, oport, onode;
1125 struct list_head dports;
1126 struct tipc_msg *msg;
1127 int user, mtyp, hsz;
1128
1129 __skb_queue_head_init(&tmpq);
1130 INIT_LIST_HEAD(&dports);
1131
1132 skb = tipc_skb_peek(arrvq, &inputq->lock);
1133 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1134 msg = buf_msg(skb);
1135 user = msg_user(msg);
1136 mtyp = msg_type(msg);
1137 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1138 spin_lock_bh(&inputq->lock);
1139 if (skb_peek(arrvq) == skb) {
1140 __skb_dequeue(arrvq);
1141 __skb_queue_tail(inputq, skb);
1142 }
1143 kfree_skb(skb);
1144 spin_unlock_bh(&inputq->lock);
1145 continue;
1146 }
1147 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
1148 oport = msg_origport(msg);
1149 onode = msg_orignode(msg);
1150 if (onode == self)
1151 scope = TIPC_NODE_SCOPE;
1152
1153 /* Create destination port list and message clones: */
1154 if (!msg_in_group(msg)) {
1155 lower = msg_namelower(msg);
1156 upper = msg_nameupper(msg);
1157 }
1158 tipc_nametbl_mc_translate(net, msg_nametype(msg), lower, upper,
1159 scope, &dports);
1160 while (tipc_dest_pop(&dports, NULL, &portid)) {
1161 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
1162 if (_skb) {
1163 msg_set_destport(buf_msg(_skb), portid);
1164 __skb_queue_tail(&tmpq, _skb);
1165 continue;
1166 }
1167 pr_warn("Failed to clone mcast rcv buffer\n");
1168 }
1169 /* Append to inputq if not already done by other thread */
1170 spin_lock_bh(&inputq->lock);
1171 if (skb_peek(arrvq) == skb) {
1172 skb_queue_splice_tail_init(&tmpq, inputq);
1173 kfree_skb(__skb_dequeue(arrvq));
1174 }
1175 spin_unlock_bh(&inputq->lock);
1176 __skb_queue_purge(&tmpq);
1177 kfree_skb(skb);
1178 }
1179 tipc_sk_rcv(net, inputq);
1180 }
1181
1182 /**
1183 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1184 * @tsk: receiving socket
1185 * @skb: pointer to message buffer.
1186 */
1187 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1188 struct sk_buff_head *xmitq)
1189 {
1190 struct tipc_msg *hdr = buf_msg(skb);
1191 u32 onode = tsk_own_node(tsk);
1192 struct sock *sk = &tsk->sk;
1193 int mtyp = msg_type(hdr);
1194 bool conn_cong;
1195
1196 /* Ignore if connection cannot be validated: */
1197 if (!tsk_peer_msg(tsk, hdr))
1198 goto exit;
1199
1200 if (unlikely(msg_errcode(hdr))) {
1201 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1202 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1203 tsk_peer_port(tsk));
1204 sk->sk_state_change(sk);
1205 goto exit;
1206 }
1207
1208 tsk->probe_unacked = false;
1209
1210 if (mtyp == CONN_PROBE) {
1211 msg_set_type(hdr, CONN_PROBE_REPLY);
1212 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1213 __skb_queue_tail(xmitq, skb);
1214 return;
1215 } else if (mtyp == CONN_ACK) {
1216 conn_cong = tsk_conn_cong(tsk);
1217 tsk->snt_unacked -= msg_conn_ack(hdr);
1218 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1219 tsk->snd_win = msg_adv_win(hdr);
1220 if (conn_cong)
1221 sk->sk_write_space(sk);
1222 } else if (mtyp != CONN_PROBE_REPLY) {
1223 pr_warn("Received unknown CONN_PROTO msg\n");
1224 }
1225 exit:
1226 kfree_skb(skb);
1227 }
1228
1229 /**
1230 * tipc_sendmsg - send message in connectionless manner
1231 * @sock: socket structure
1232 * @m: message to send
1233 * @dsz: amount of user data to be sent
1234 *
1235 * Message must have an destination specified explicitly.
1236 * Used for SOCK_RDM and SOCK_DGRAM messages,
1237 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1238 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1239 *
1240 * Returns the number of bytes sent on success, or errno otherwise
1241 */
1242 static int tipc_sendmsg(struct socket *sock,
1243 struct msghdr *m, size_t dsz)
1244 {
1245 struct sock *sk = sock->sk;
1246 int ret;
1247
1248 lock_sock(sk);
1249 ret = __tipc_sendmsg(sock, m, dsz);
1250 release_sock(sk);
1251
1252 return ret;
1253 }
1254
1255 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1256 {
1257 struct sock *sk = sock->sk;
1258 struct net *net = sock_net(sk);
1259 struct tipc_sock *tsk = tipc_sk(sk);
1260 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1261 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1262 struct list_head *clinks = &tsk->cong_links;
1263 bool syn = !tipc_sk_type_connectionless(sk);
1264 struct tipc_group *grp = tsk->group;
1265 struct tipc_msg *hdr = &tsk->phdr;
1266 struct tipc_name_seq *seq;
1267 struct sk_buff_head pkts;
1268 u32 type, inst, domain;
1269 u32 dnode, dport;
1270 int mtu, rc;
1271
1272 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1273 return -EMSGSIZE;
1274
1275 if (likely(dest)) {
1276 if (unlikely(m->msg_namelen < sizeof(*dest)))
1277 return -EINVAL;
1278 if (unlikely(dest->family != AF_TIPC))
1279 return -EINVAL;
1280 }
1281
1282 if (grp) {
1283 if (!dest)
1284 return tipc_send_group_bcast(sock, m, dlen, timeout);
1285 if (dest->addrtype == TIPC_ADDR_NAME)
1286 return tipc_send_group_anycast(sock, m, dlen, timeout);
1287 if (dest->addrtype == TIPC_ADDR_ID)
1288 return tipc_send_group_unicast(sock, m, dlen, timeout);
1289 if (dest->addrtype == TIPC_ADDR_MCAST)
1290 return tipc_send_group_mcast(sock, m, dlen, timeout);
1291 return -EINVAL;
1292 }
1293
1294 if (unlikely(!dest)) {
1295 dest = &tsk->peer;
1296 if (!syn || dest->family != AF_TIPC)
1297 return -EDESTADDRREQ;
1298 }
1299
1300 if (unlikely(syn)) {
1301 if (sk->sk_state == TIPC_LISTEN)
1302 return -EPIPE;
1303 if (sk->sk_state != TIPC_OPEN)
1304 return -EISCONN;
1305 if (tsk->published)
1306 return -EOPNOTSUPP;
1307 if (dest->addrtype == TIPC_ADDR_NAME) {
1308 tsk->conn_type = dest->addr.name.name.type;
1309 tsk->conn_instance = dest->addr.name.name.instance;
1310 }
1311 }
1312
1313 seq = &dest->addr.nameseq;
1314 if (dest->addrtype == TIPC_ADDR_MCAST)
1315 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1316
1317 if (dest->addrtype == TIPC_ADDR_NAME) {
1318 type = dest->addr.name.name.type;
1319 inst = dest->addr.name.name.instance;
1320 domain = dest->addr.name.domain;
1321 dnode = domain;
1322 msg_set_type(hdr, TIPC_NAMED_MSG);
1323 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1324 msg_set_nametype(hdr, type);
1325 msg_set_nameinst(hdr, inst);
1326 msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
1327 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1328 msg_set_destnode(hdr, dnode);
1329 msg_set_destport(hdr, dport);
1330 if (unlikely(!dport && !dnode))
1331 return -EHOSTUNREACH;
1332 } else if (dest->addrtype == TIPC_ADDR_ID) {
1333 dnode = dest->addr.id.node;
1334 msg_set_type(hdr, TIPC_DIRECT_MSG);
1335 msg_set_lookup_scope(hdr, 0);
1336 msg_set_destnode(hdr, dnode);
1337 msg_set_destport(hdr, dest->addr.id.ref);
1338 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1339 }
1340
1341 /* Block or return if destination link is congested */
1342 rc = tipc_wait_for_cond(sock, &timeout,
1343 !tipc_dest_find(clinks, dnode, 0));
1344 if (unlikely(rc))
1345 return rc;
1346
1347 skb_queue_head_init(&pkts);
1348 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1349 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1350 if (unlikely(rc != dlen))
1351 return rc;
1352
1353 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1354 if (unlikely(rc == -ELINKCONG)) {
1355 tipc_dest_push(clinks, dnode, 0);
1356 tsk->cong_link_cnt++;
1357 rc = 0;
1358 }
1359
1360 if (unlikely(syn && !rc))
1361 tipc_set_sk_state(sk, TIPC_CONNECTING);
1362
1363 return rc ? rc : dlen;
1364 }
1365
1366 /**
1367 * tipc_sendstream - send stream-oriented data
1368 * @sock: socket structure
1369 * @m: data to send
1370 * @dsz: total length of data to be transmitted
1371 *
1372 * Used for SOCK_STREAM data.
1373 *
1374 * Returns the number of bytes sent on success (or partial success),
1375 * or errno if no data sent
1376 */
1377 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1378 {
1379 struct sock *sk = sock->sk;
1380 int ret;
1381
1382 lock_sock(sk);
1383 ret = __tipc_sendstream(sock, m, dsz);
1384 release_sock(sk);
1385
1386 return ret;
1387 }
1388
1389 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1390 {
1391 struct sock *sk = sock->sk;
1392 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1393 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1394 struct tipc_sock *tsk = tipc_sk(sk);
1395 struct tipc_msg *hdr = &tsk->phdr;
1396 struct net *net = sock_net(sk);
1397 struct sk_buff_head pkts;
1398 u32 dnode = tsk_peer_node(tsk);
1399 int send, sent = 0;
1400 int rc = 0;
1401
1402 skb_queue_head_init(&pkts);
1403
1404 if (unlikely(dlen > INT_MAX))
1405 return -EMSGSIZE;
1406
1407 /* Handle implicit connection setup */
1408 if (unlikely(dest)) {
1409 rc = __tipc_sendmsg(sock, m, dlen);
1410 if (dlen && (dlen == rc))
1411 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1412 return rc;
1413 }
1414
1415 do {
1416 rc = tipc_wait_for_cond(sock, &timeout,
1417 (!tsk->cong_link_cnt &&
1418 !tsk_conn_cong(tsk) &&
1419 tipc_sk_connected(sk)));
1420 if (unlikely(rc))
1421 break;
1422
1423 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1424 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1425 if (unlikely(rc != send))
1426 break;
1427
1428 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1429 if (unlikely(rc == -ELINKCONG)) {
1430 tsk->cong_link_cnt = 1;
1431 rc = 0;
1432 }
1433 if (likely(!rc)) {
1434 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1435 sent += send;
1436 }
1437 } while (sent < dlen && !rc);
1438
1439 return sent ? sent : rc;
1440 }
1441
1442 /**
1443 * tipc_send_packet - send a connection-oriented message
1444 * @sock: socket structure
1445 * @m: message to send
1446 * @dsz: length of data to be transmitted
1447 *
1448 * Used for SOCK_SEQPACKET messages.
1449 *
1450 * Returns the number of bytes sent on success, or errno otherwise
1451 */
1452 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1453 {
1454 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1455 return -EMSGSIZE;
1456
1457 return tipc_sendstream(sock, m, dsz);
1458 }
1459
1460 /* tipc_sk_finish_conn - complete the setup of a connection
1461 */
1462 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1463 u32 peer_node)
1464 {
1465 struct sock *sk = &tsk->sk;
1466 struct net *net = sock_net(sk);
1467 struct tipc_msg *msg = &tsk->phdr;
1468
1469 msg_set_destnode(msg, peer_node);
1470 msg_set_destport(msg, peer_port);
1471 msg_set_type(msg, TIPC_CONN_MSG);
1472 msg_set_lookup_scope(msg, 0);
1473 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1474
1475 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1476 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1477 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1478 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1479 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1480 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1481 return;
1482
1483 /* Fall back to message based flow control */
1484 tsk->rcv_win = FLOWCTL_MSG_WIN;
1485 tsk->snd_win = FLOWCTL_MSG_WIN;
1486 }
1487
1488 /**
1489 * tipc_sk_set_orig_addr - capture sender's address for received message
1490 * @m: descriptor for message info
1491 * @hdr: received message header
1492 *
1493 * Note: Address is not captured if not requested by receiver.
1494 */
1495 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1496 {
1497 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1498 struct tipc_msg *hdr = buf_msg(skb);
1499
1500 if (!srcaddr)
1501 return;
1502
1503 srcaddr->sock.family = AF_TIPC;
1504 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1505 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1506 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1507 srcaddr->sock.addr.name.domain = 0;
1508 srcaddr->sock.scope = 0;
1509 m->msg_namelen = sizeof(struct sockaddr_tipc);
1510
1511 if (!msg_in_group(hdr))
1512 return;
1513
1514 /* Group message users may also want to know sending member's id */
1515 srcaddr->member.family = AF_TIPC;
1516 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1517 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1518 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1519 srcaddr->member.addr.name.domain = 0;
1520 m->msg_namelen = sizeof(*srcaddr);
1521 }
1522
1523 /**
1524 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1525 * @m: descriptor for message info
1526 * @msg: received message header
1527 * @tsk: TIPC port associated with message
1528 *
1529 * Note: Ancillary data is not captured if not requested by receiver.
1530 *
1531 * Returns 0 if successful, otherwise errno
1532 */
1533 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1534 struct tipc_sock *tsk)
1535 {
1536 u32 anc_data[3];
1537 u32 err;
1538 u32 dest_type;
1539 int has_name;
1540 int res;
1541
1542 if (likely(m->msg_controllen == 0))
1543 return 0;
1544
1545 /* Optionally capture errored message object(s) */
1546 err = msg ? msg_errcode(msg) : 0;
1547 if (unlikely(err)) {
1548 anc_data[0] = err;
1549 anc_data[1] = msg_data_sz(msg);
1550 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1551 if (res)
1552 return res;
1553 if (anc_data[1]) {
1554 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1555 msg_data(msg));
1556 if (res)
1557 return res;
1558 }
1559 }
1560
1561 /* Optionally capture message destination object */
1562 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1563 switch (dest_type) {
1564 case TIPC_NAMED_MSG:
1565 has_name = 1;
1566 anc_data[0] = msg_nametype(msg);
1567 anc_data[1] = msg_namelower(msg);
1568 anc_data[2] = msg_namelower(msg);
1569 break;
1570 case TIPC_MCAST_MSG:
1571 has_name = 1;
1572 anc_data[0] = msg_nametype(msg);
1573 anc_data[1] = msg_namelower(msg);
1574 anc_data[2] = msg_nameupper(msg);
1575 break;
1576 case TIPC_CONN_MSG:
1577 has_name = (tsk->conn_type != 0);
1578 anc_data[0] = tsk->conn_type;
1579 anc_data[1] = tsk->conn_instance;
1580 anc_data[2] = tsk->conn_instance;
1581 break;
1582 default:
1583 has_name = 0;
1584 }
1585 if (has_name) {
1586 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1587 if (res)
1588 return res;
1589 }
1590
1591 return 0;
1592 }
1593
1594 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1595 {
1596 struct sock *sk = &tsk->sk;
1597 struct net *net = sock_net(sk);
1598 struct sk_buff *skb = NULL;
1599 struct tipc_msg *msg;
1600 u32 peer_port = tsk_peer_port(tsk);
1601 u32 dnode = tsk_peer_node(tsk);
1602
1603 if (!tipc_sk_connected(sk))
1604 return;
1605 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1606 dnode, tsk_own_node(tsk), peer_port,
1607 tsk->portid, TIPC_OK);
1608 if (!skb)
1609 return;
1610 msg = buf_msg(skb);
1611 msg_set_conn_ack(msg, tsk->rcv_unacked);
1612 tsk->rcv_unacked = 0;
1613
1614 /* Adjust to and advertize the correct window limit */
1615 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1616 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1617 msg_set_adv_win(msg, tsk->rcv_win);
1618 }
1619 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1620 }
1621
1622 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1623 {
1624 struct sock *sk = sock->sk;
1625 DEFINE_WAIT(wait);
1626 long timeo = *timeop;
1627 int err = sock_error(sk);
1628
1629 if (err)
1630 return err;
1631
1632 for (;;) {
1633 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1634 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1635 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1636 err = -ENOTCONN;
1637 break;
1638 }
1639 release_sock(sk);
1640 timeo = schedule_timeout(timeo);
1641 lock_sock(sk);
1642 }
1643 err = 0;
1644 if (!skb_queue_empty(&sk->sk_receive_queue))
1645 break;
1646 err = -EAGAIN;
1647 if (!timeo)
1648 break;
1649 err = sock_intr_errno(timeo);
1650 if (signal_pending(current))
1651 break;
1652
1653 err = sock_error(sk);
1654 if (err)
1655 break;
1656 }
1657 finish_wait(sk_sleep(sk), &wait);
1658 *timeop = timeo;
1659 return err;
1660 }
1661
1662 /**
1663 * tipc_recvmsg - receive packet-oriented message
1664 * @m: descriptor for message info
1665 * @buflen: length of user buffer area
1666 * @flags: receive flags
1667 *
1668 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1669 * If the complete message doesn't fit in user area, truncate it.
1670 *
1671 * Returns size of returned message data, errno otherwise
1672 */
1673 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1674 size_t buflen, int flags)
1675 {
1676 struct sock *sk = sock->sk;
1677 bool connected = !tipc_sk_type_connectionless(sk);
1678 struct tipc_sock *tsk = tipc_sk(sk);
1679 int rc, err, hlen, dlen, copy;
1680 struct sk_buff_head xmitq;
1681 struct tipc_msg *hdr;
1682 struct sk_buff *skb;
1683 bool grp_evt;
1684 long timeout;
1685
1686 /* Catch invalid receive requests */
1687 if (unlikely(!buflen))
1688 return -EINVAL;
1689
1690 lock_sock(sk);
1691 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1692 rc = -ENOTCONN;
1693 goto exit;
1694 }
1695 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1696
1697 /* Step rcv queue to first msg with data or error; wait if necessary */
1698 do {
1699 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1700 if (unlikely(rc))
1701 goto exit;
1702 skb = skb_peek(&sk->sk_receive_queue);
1703 hdr = buf_msg(skb);
1704 dlen = msg_data_sz(hdr);
1705 hlen = msg_hdr_sz(hdr);
1706 err = msg_errcode(hdr);
1707 grp_evt = msg_is_grp_evt(hdr);
1708 if (likely(dlen || err))
1709 break;
1710 tsk_advance_rx_queue(sk);
1711 } while (1);
1712
1713 /* Collect msg meta data, including error code and rejected data */
1714 tipc_sk_set_orig_addr(m, skb);
1715 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1716 if (unlikely(rc))
1717 goto exit;
1718
1719 /* Capture data if non-error msg, otherwise just set return value */
1720 if (likely(!err)) {
1721 copy = min_t(int, dlen, buflen);
1722 if (unlikely(copy != dlen))
1723 m->msg_flags |= MSG_TRUNC;
1724 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1725 } else {
1726 copy = 0;
1727 rc = 0;
1728 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1729 rc = -ECONNRESET;
1730 }
1731 if (unlikely(rc))
1732 goto exit;
1733
1734 /* Mark message as group event if applicable */
1735 if (unlikely(grp_evt)) {
1736 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1737 m->msg_flags |= MSG_EOR;
1738 m->msg_flags |= MSG_OOB;
1739 copy = 0;
1740 }
1741
1742 /* Caption of data or error code/rejected data was successful */
1743 if (unlikely(flags & MSG_PEEK))
1744 goto exit;
1745
1746 /* Send group flow control advertisement when applicable */
1747 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1748 skb_queue_head_init(&xmitq);
1749 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1750 msg_orignode(hdr), msg_origport(hdr),
1751 &xmitq);
1752 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1753 }
1754
1755 tsk_advance_rx_queue(sk);
1756
1757 if (likely(!connected))
1758 goto exit;
1759
1760 /* Send connection flow control advertisement when applicable */
1761 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1762 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1763 tipc_sk_send_ack(tsk);
1764 exit:
1765 release_sock(sk);
1766 return rc ? rc : copy;
1767 }
1768
1769 /**
1770 * tipc_recvstream - receive stream-oriented data
1771 * @m: descriptor for message info
1772 * @buflen: total size of user buffer area
1773 * @flags: receive flags
1774 *
1775 * Used for SOCK_STREAM messages only. If not enough data is available
1776 * will optionally wait for more; never truncates data.
1777 *
1778 * Returns size of returned message data, errno otherwise
1779 */
1780 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1781 size_t buflen, int flags)
1782 {
1783 struct sock *sk = sock->sk;
1784 struct tipc_sock *tsk = tipc_sk(sk);
1785 struct sk_buff *skb;
1786 struct tipc_msg *hdr;
1787 struct tipc_skb_cb *skb_cb;
1788 bool peek = flags & MSG_PEEK;
1789 int offset, required, copy, copied = 0;
1790 int hlen, dlen, err, rc;
1791 long timeout;
1792
1793 /* Catch invalid receive attempts */
1794 if (unlikely(!buflen))
1795 return -EINVAL;
1796
1797 lock_sock(sk);
1798
1799 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1800 rc = -ENOTCONN;
1801 goto exit;
1802 }
1803 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1804 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1805
1806 do {
1807 /* Look at first msg in receive queue; wait if necessary */
1808 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1809 if (unlikely(rc))
1810 break;
1811 skb = skb_peek(&sk->sk_receive_queue);
1812 skb_cb = TIPC_SKB_CB(skb);
1813 hdr = buf_msg(skb);
1814 dlen = msg_data_sz(hdr);
1815 hlen = msg_hdr_sz(hdr);
1816 err = msg_errcode(hdr);
1817
1818 /* Discard any empty non-errored (SYN-) message */
1819 if (unlikely(!dlen && !err)) {
1820 tsk_advance_rx_queue(sk);
1821 continue;
1822 }
1823
1824 /* Collect msg meta data, incl. error code and rejected data */
1825 if (!copied) {
1826 tipc_sk_set_orig_addr(m, skb);
1827 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1828 if (rc)
1829 break;
1830 }
1831
1832 /* Copy data if msg ok, otherwise return error/partial data */
1833 if (likely(!err)) {
1834 offset = skb_cb->bytes_read;
1835 copy = min_t(int, dlen - offset, buflen - copied);
1836 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1837 if (unlikely(rc))
1838 break;
1839 copied += copy;
1840 offset += copy;
1841 if (unlikely(offset < dlen)) {
1842 if (!peek)
1843 skb_cb->bytes_read = offset;
1844 break;
1845 }
1846 } else {
1847 rc = 0;
1848 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1849 rc = -ECONNRESET;
1850 if (copied || rc)
1851 break;
1852 }
1853
1854 if (unlikely(peek))
1855 break;
1856
1857 tsk_advance_rx_queue(sk);
1858
1859 /* Send connection flow control advertisement when applicable */
1860 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1861 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1862 tipc_sk_send_ack(tsk);
1863
1864 /* Exit if all requested data or FIN/error received */
1865 if (copied == buflen || err)
1866 break;
1867
1868 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1869 exit:
1870 release_sock(sk);
1871 return copied ? copied : rc;
1872 }
1873
1874 /**
1875 * tipc_write_space - wake up thread if port congestion is released
1876 * @sk: socket
1877 */
1878 static void tipc_write_space(struct sock *sk)
1879 {
1880 struct socket_wq *wq;
1881
1882 rcu_read_lock();
1883 wq = rcu_dereference(sk->sk_wq);
1884 if (skwq_has_sleeper(wq))
1885 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1886 POLLWRNORM | POLLWRBAND);
1887 rcu_read_unlock();
1888 }
1889
1890 /**
1891 * tipc_data_ready - wake up threads to indicate messages have been received
1892 * @sk: socket
1893 * @len: the length of messages
1894 */
1895 static void tipc_data_ready(struct sock *sk)
1896 {
1897 struct socket_wq *wq;
1898
1899 rcu_read_lock();
1900 wq = rcu_dereference(sk->sk_wq);
1901 if (skwq_has_sleeper(wq))
1902 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1903 POLLRDNORM | POLLRDBAND);
1904 rcu_read_unlock();
1905 }
1906
1907 static void tipc_sock_destruct(struct sock *sk)
1908 {
1909 __skb_queue_purge(&sk->sk_receive_queue);
1910 }
1911
1912 static void tipc_sk_proto_rcv(struct sock *sk,
1913 struct sk_buff_head *inputq,
1914 struct sk_buff_head *xmitq)
1915 {
1916 struct sk_buff *skb = __skb_dequeue(inputq);
1917 struct tipc_sock *tsk = tipc_sk(sk);
1918 struct tipc_msg *hdr = buf_msg(skb);
1919 struct tipc_group *grp = tsk->group;
1920 bool wakeup = false;
1921
1922 switch (msg_user(hdr)) {
1923 case CONN_MANAGER:
1924 tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
1925 return;
1926 case SOCK_WAKEUP:
1927 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1928 tsk->cong_link_cnt--;
1929 wakeup = true;
1930 break;
1931 case GROUP_PROTOCOL:
1932 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1933 break;
1934 case TOP_SRV:
1935 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1936 skb, inputq, xmitq);
1937 skb = NULL;
1938 break;
1939 default:
1940 break;
1941 }
1942
1943 if (wakeup)
1944 sk->sk_write_space(sk);
1945
1946 kfree_skb(skb);
1947 }
1948
1949 /**
1950 * tipc_filter_connect - Handle incoming message for a connection-based socket
1951 * @tsk: TIPC socket
1952 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1953 *
1954 * Returns true if everything ok, false otherwise
1955 */
1956 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1957 {
1958 struct sock *sk = &tsk->sk;
1959 struct net *net = sock_net(sk);
1960 struct tipc_msg *hdr = buf_msg(skb);
1961 u32 pport = msg_origport(hdr);
1962 u32 pnode = msg_orignode(hdr);
1963
1964 if (unlikely(msg_mcast(hdr)))
1965 return false;
1966
1967 switch (sk->sk_state) {
1968 case TIPC_CONNECTING:
1969 /* Accept only ACK or NACK message */
1970 if (unlikely(!msg_connected(hdr))) {
1971 if (pport != tsk_peer_port(tsk) ||
1972 pnode != tsk_peer_node(tsk))
1973 return false;
1974
1975 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1976 sk->sk_err = ECONNREFUSED;
1977 sk->sk_state_change(sk);
1978 return true;
1979 }
1980
1981 if (unlikely(msg_errcode(hdr))) {
1982 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1983 sk->sk_err = ECONNREFUSED;
1984 sk->sk_state_change(sk);
1985 return true;
1986 }
1987
1988 if (unlikely(!msg_isdata(hdr))) {
1989 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1990 sk->sk_err = EINVAL;
1991 sk->sk_state_change(sk);
1992 return true;
1993 }
1994
1995 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
1996 msg_set_importance(&tsk->phdr, msg_importance(hdr));
1997
1998 /* If 'ACK+' message, add to socket receive queue */
1999 if (msg_data_sz(hdr))
2000 return true;
2001
2002 /* If empty 'ACK-' message, wake up sleeping connect() */
2003 sk->sk_data_ready(sk);
2004
2005 /* 'ACK-' message is neither accepted nor rejected: */
2006 msg_set_dest_droppable(hdr, 1);
2007 return false;
2008
2009 case TIPC_OPEN:
2010 case TIPC_DISCONNECTING:
2011 break;
2012 case TIPC_LISTEN:
2013 /* Accept only SYN message */
2014 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2015 return true;
2016 break;
2017 case TIPC_ESTABLISHED:
2018 /* Accept only connection-based messages sent by peer */
2019 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2020 return false;
2021
2022 if (unlikely(msg_errcode(hdr))) {
2023 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2024 /* Let timer expire on it's own */
2025 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2026 tsk->portid);
2027 sk->sk_state_change(sk);
2028 }
2029 return true;
2030 default:
2031 pr_err("Unknown sk_state %u\n", sk->sk_state);
2032 }
2033
2034 return false;
2035 }
2036
2037 /**
2038 * rcvbuf_limit - get proper overload limit of socket receive queue
2039 * @sk: socket
2040 * @skb: message
2041 *
2042 * For connection oriented messages, irrespective of importance,
2043 * default queue limit is 2 MB.
2044 *
2045 * For connectionless messages, queue limits are based on message
2046 * importance as follows:
2047 *
2048 * TIPC_LOW_IMPORTANCE (2 MB)
2049 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2050 * TIPC_HIGH_IMPORTANCE (8 MB)
2051 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2052 *
2053 * Returns overload limit according to corresponding message importance
2054 */
2055 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2056 {
2057 struct tipc_sock *tsk = tipc_sk(sk);
2058 struct tipc_msg *hdr = buf_msg(skb);
2059
2060 if (unlikely(msg_in_group(hdr)))
2061 return sk->sk_rcvbuf;
2062
2063 if (unlikely(!msg_connected(hdr)))
2064 return sk->sk_rcvbuf << msg_importance(hdr);
2065
2066 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2067 return sk->sk_rcvbuf;
2068
2069 return FLOWCTL_MSG_LIM;
2070 }
2071
2072 /**
2073 * tipc_sk_filter_rcv - validate incoming message
2074 * @sk: socket
2075 * @skb: pointer to message.
2076 *
2077 * Enqueues message on receive queue if acceptable; optionally handles
2078 * disconnect indication for a connected socket.
2079 *
2080 * Called with socket lock already taken
2081 *
2082 */
2083 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2084 struct sk_buff_head *xmitq)
2085 {
2086 bool sk_conn = !tipc_sk_type_connectionless(sk);
2087 struct tipc_sock *tsk = tipc_sk(sk);
2088 struct tipc_group *grp = tsk->group;
2089 struct tipc_msg *hdr = buf_msg(skb);
2090 struct net *net = sock_net(sk);
2091 struct sk_buff_head inputq;
2092 int limit, err = TIPC_OK;
2093
2094 TIPC_SKB_CB(skb)->bytes_read = 0;
2095 __skb_queue_head_init(&inputq);
2096 __skb_queue_tail(&inputq, skb);
2097
2098 if (unlikely(!msg_isdata(hdr)))
2099 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2100
2101 if (unlikely(grp))
2102 tipc_group_filter_msg(grp, &inputq, xmitq);
2103
2104 /* Validate and add to receive buffer if there is space */
2105 while ((skb = __skb_dequeue(&inputq))) {
2106 hdr = buf_msg(skb);
2107 limit = rcvbuf_limit(sk, skb);
2108 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2109 (!sk_conn && msg_connected(hdr)) ||
2110 (!grp && msg_in_group(hdr)))
2111 err = TIPC_ERR_NO_PORT;
2112 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit)
2113 err = TIPC_ERR_OVERLOAD;
2114
2115 if (unlikely(err)) {
2116 tipc_skb_reject(net, err, skb, xmitq);
2117 err = TIPC_OK;
2118 continue;
2119 }
2120 __skb_queue_tail(&sk->sk_receive_queue, skb);
2121 skb_set_owner_r(skb, sk);
2122 sk->sk_data_ready(sk);
2123 }
2124 }
2125
2126 /**
2127 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2128 * @sk: socket
2129 * @skb: message
2130 *
2131 * Caller must hold socket lock
2132 */
2133 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2134 {
2135 unsigned int before = sk_rmem_alloc_get(sk);
2136 struct sk_buff_head xmitq;
2137 unsigned int added;
2138
2139 __skb_queue_head_init(&xmitq);
2140
2141 tipc_sk_filter_rcv(sk, skb, &xmitq);
2142 added = sk_rmem_alloc_get(sk) - before;
2143 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2144
2145 /* Send pending response/rejected messages, if any */
2146 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2147 return 0;
2148 }
2149
2150 /**
2151 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2152 * inputq and try adding them to socket or backlog queue
2153 * @inputq: list of incoming buffers with potentially different destinations
2154 * @sk: socket where the buffers should be enqueued
2155 * @dport: port number for the socket
2156 *
2157 * Caller must hold socket lock
2158 */
2159 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2160 u32 dport, struct sk_buff_head *xmitq)
2161 {
2162 unsigned long time_limit = jiffies + 2;
2163 struct sk_buff *skb;
2164 unsigned int lim;
2165 atomic_t *dcnt;
2166 u32 onode;
2167
2168 while (skb_queue_len(inputq)) {
2169 if (unlikely(time_after_eq(jiffies, time_limit)))
2170 return;
2171
2172 skb = tipc_skb_dequeue(inputq, dport);
2173 if (unlikely(!skb))
2174 return;
2175
2176 /* Add message directly to receive queue if possible */
2177 if (!sock_owned_by_user(sk)) {
2178 tipc_sk_filter_rcv(sk, skb, xmitq);
2179 continue;
2180 }
2181
2182 /* Try backlog, compensating for double-counted bytes */
2183 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2184 if (!sk->sk_backlog.len)
2185 atomic_set(dcnt, 0);
2186 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2187 if (likely(!sk_add_backlog(sk, skb, lim)))
2188 continue;
2189
2190 /* Overload => reject message back to sender */
2191 onode = tipc_own_addr(sock_net(sk));
2192 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2193 __skb_queue_tail(xmitq, skb);
2194 break;
2195 }
2196 }
2197
2198 /**
2199 * tipc_sk_rcv - handle a chain of incoming buffers
2200 * @inputq: buffer list containing the buffers
2201 * Consumes all buffers in list until inputq is empty
2202 * Note: may be called in multiple threads referring to the same queue
2203 */
2204 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2205 {
2206 struct sk_buff_head xmitq;
2207 u32 dnode, dport = 0;
2208 int err;
2209 struct tipc_sock *tsk;
2210 struct sock *sk;
2211 struct sk_buff *skb;
2212
2213 __skb_queue_head_init(&xmitq);
2214 while (skb_queue_len(inputq)) {
2215 dport = tipc_skb_peek_port(inputq, dport);
2216 tsk = tipc_sk_lookup(net, dport);
2217
2218 if (likely(tsk)) {
2219 sk = &tsk->sk;
2220 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2221 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2222 spin_unlock_bh(&sk->sk_lock.slock);
2223 }
2224 /* Send pending response/rejected messages, if any */
2225 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2226 sock_put(sk);
2227 continue;
2228 }
2229 /* No destination socket => dequeue skb if still there */
2230 skb = tipc_skb_dequeue(inputq, dport);
2231 if (!skb)
2232 return;
2233
2234 /* Try secondary lookup if unresolved named message */
2235 err = TIPC_ERR_NO_PORT;
2236 if (tipc_msg_lookup_dest(net, skb, &err))
2237 goto xmit;
2238
2239 /* Prepare for message rejection */
2240 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2241 continue;
2242 xmit:
2243 dnode = msg_destnode(buf_msg(skb));
2244 tipc_node_xmit_skb(net, skb, dnode, dport);
2245 }
2246 }
2247
2248 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2249 {
2250 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2251 struct sock *sk = sock->sk;
2252 int done;
2253
2254 do {
2255 int err = sock_error(sk);
2256 if (err)
2257 return err;
2258 if (!*timeo_p)
2259 return -ETIMEDOUT;
2260 if (signal_pending(current))
2261 return sock_intr_errno(*timeo_p);
2262
2263 add_wait_queue(sk_sleep(sk), &wait);
2264 done = sk_wait_event(sk, timeo_p,
2265 sk->sk_state != TIPC_CONNECTING, &wait);
2266 remove_wait_queue(sk_sleep(sk), &wait);
2267 } while (!done);
2268 return 0;
2269 }
2270
2271 /**
2272 * tipc_connect - establish a connection to another TIPC port
2273 * @sock: socket structure
2274 * @dest: socket address for destination port
2275 * @destlen: size of socket address data structure
2276 * @flags: file-related flags associated with socket
2277 *
2278 * Returns 0 on success, errno otherwise
2279 */
2280 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2281 int destlen, int flags)
2282 {
2283 struct sock *sk = sock->sk;
2284 struct tipc_sock *tsk = tipc_sk(sk);
2285 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2286 struct msghdr m = {NULL,};
2287 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2288 int previous;
2289 int res = 0;
2290
2291 if (destlen != sizeof(struct sockaddr_tipc))
2292 return -EINVAL;
2293
2294 lock_sock(sk);
2295
2296 if (tsk->group) {
2297 res = -EINVAL;
2298 goto exit;
2299 }
2300
2301 if (dst->family == AF_UNSPEC) {
2302 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2303 if (!tipc_sk_type_connectionless(sk))
2304 res = -EINVAL;
2305 goto exit;
2306 } else if (dst->family != AF_TIPC) {
2307 res = -EINVAL;
2308 }
2309 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2310 res = -EINVAL;
2311 if (res)
2312 goto exit;
2313
2314 /* DGRAM/RDM connect(), just save the destaddr */
2315 if (tipc_sk_type_connectionless(sk)) {
2316 memcpy(&tsk->peer, dest, destlen);
2317 goto exit;
2318 }
2319
2320 previous = sk->sk_state;
2321
2322 switch (sk->sk_state) {
2323 case TIPC_OPEN:
2324 /* Send a 'SYN-' to destination */
2325 m.msg_name = dest;
2326 m.msg_namelen = destlen;
2327
2328 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2329 * indicate send_msg() is never blocked.
2330 */
2331 if (!timeout)
2332 m.msg_flags = MSG_DONTWAIT;
2333
2334 res = __tipc_sendmsg(sock, &m, 0);
2335 if ((res < 0) && (res != -EWOULDBLOCK))
2336 goto exit;
2337
2338 /* Just entered TIPC_CONNECTING state; the only
2339 * difference is that return value in non-blocking
2340 * case is EINPROGRESS, rather than EALREADY.
2341 */
2342 res = -EINPROGRESS;
2343 /* fall thru' */
2344 case TIPC_CONNECTING:
2345 if (!timeout) {
2346 if (previous == TIPC_CONNECTING)
2347 res = -EALREADY;
2348 goto exit;
2349 }
2350 timeout = msecs_to_jiffies(timeout);
2351 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2352 res = tipc_wait_for_connect(sock, &timeout);
2353 break;
2354 case TIPC_ESTABLISHED:
2355 res = -EISCONN;
2356 break;
2357 default:
2358 res = -EINVAL;
2359 }
2360
2361 exit:
2362 release_sock(sk);
2363 return res;
2364 }
2365
2366 /**
2367 * tipc_listen - allow socket to listen for incoming connections
2368 * @sock: socket structure
2369 * @len: (unused)
2370 *
2371 * Returns 0 on success, errno otherwise
2372 */
2373 static int tipc_listen(struct socket *sock, int len)
2374 {
2375 struct sock *sk = sock->sk;
2376 int res;
2377
2378 lock_sock(sk);
2379 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2380 release_sock(sk);
2381
2382 return res;
2383 }
2384
2385 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2386 {
2387 struct sock *sk = sock->sk;
2388 DEFINE_WAIT(wait);
2389 int err;
2390
2391 /* True wake-one mechanism for incoming connections: only
2392 * one process gets woken up, not the 'whole herd'.
2393 * Since we do not 'race & poll' for established sockets
2394 * anymore, the common case will execute the loop only once.
2395 */
2396 for (;;) {
2397 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2398 TASK_INTERRUPTIBLE);
2399 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2400 release_sock(sk);
2401 timeo = schedule_timeout(timeo);
2402 lock_sock(sk);
2403 }
2404 err = 0;
2405 if (!skb_queue_empty(&sk->sk_receive_queue))
2406 break;
2407 err = -EAGAIN;
2408 if (!timeo)
2409 break;
2410 err = sock_intr_errno(timeo);
2411 if (signal_pending(current))
2412 break;
2413 }
2414 finish_wait(sk_sleep(sk), &wait);
2415 return err;
2416 }
2417
2418 /**
2419 * tipc_accept - wait for connection request
2420 * @sock: listening socket
2421 * @newsock: new socket that is to be connected
2422 * @flags: file-related flags associated with socket
2423 *
2424 * Returns 0 on success, errno otherwise
2425 */
2426 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2427 bool kern)
2428 {
2429 struct sock *new_sk, *sk = sock->sk;
2430 struct sk_buff *buf;
2431 struct tipc_sock *new_tsock;
2432 struct tipc_msg *msg;
2433 long timeo;
2434 int res;
2435
2436 lock_sock(sk);
2437
2438 if (sk->sk_state != TIPC_LISTEN) {
2439 res = -EINVAL;
2440 goto exit;
2441 }
2442 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2443 res = tipc_wait_for_accept(sock, timeo);
2444 if (res)
2445 goto exit;
2446
2447 buf = skb_peek(&sk->sk_receive_queue);
2448
2449 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2450 if (res)
2451 goto exit;
2452 security_sk_clone(sock->sk, new_sock->sk);
2453
2454 new_sk = new_sock->sk;
2455 new_tsock = tipc_sk(new_sk);
2456 msg = buf_msg(buf);
2457
2458 /* we lock on new_sk; but lockdep sees the lock on sk */
2459 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2460
2461 /*
2462 * Reject any stray messages received by new socket
2463 * before the socket lock was taken (very, very unlikely)
2464 */
2465 tsk_rej_rx_queue(new_sk);
2466
2467 /* Connect new socket to it's peer */
2468 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2469
2470 tsk_set_importance(new_tsock, msg_importance(msg));
2471 if (msg_named(msg)) {
2472 new_tsock->conn_type = msg_nametype(msg);
2473 new_tsock->conn_instance = msg_nameinst(msg);
2474 }
2475
2476 /*
2477 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2478 * Respond to 'SYN+' by queuing it on new socket.
2479 */
2480 if (!msg_data_sz(msg)) {
2481 struct msghdr m = {NULL,};
2482
2483 tsk_advance_rx_queue(sk);
2484 __tipc_sendstream(new_sock, &m, 0);
2485 } else {
2486 __skb_dequeue(&sk->sk_receive_queue);
2487 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2488 skb_set_owner_r(buf, new_sk);
2489 }
2490 release_sock(new_sk);
2491 exit:
2492 release_sock(sk);
2493 return res;
2494 }
2495
2496 /**
2497 * tipc_shutdown - shutdown socket connection
2498 * @sock: socket structure
2499 * @how: direction to close (must be SHUT_RDWR)
2500 *
2501 * Terminates connection (if necessary), then purges socket's receive queue.
2502 *
2503 * Returns 0 on success, errno otherwise
2504 */
2505 static int tipc_shutdown(struct socket *sock, int how)
2506 {
2507 struct sock *sk = sock->sk;
2508 int res;
2509
2510 if (how != SHUT_RDWR)
2511 return -EINVAL;
2512
2513 lock_sock(sk);
2514
2515 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2516 sk->sk_shutdown = SEND_SHUTDOWN;
2517
2518 if (sk->sk_state == TIPC_DISCONNECTING) {
2519 /* Discard any unreceived messages */
2520 __skb_queue_purge(&sk->sk_receive_queue);
2521
2522 /* Wake up anyone sleeping in poll */
2523 sk->sk_state_change(sk);
2524 res = 0;
2525 } else {
2526 res = -ENOTCONN;
2527 }
2528
2529 release_sock(sk);
2530 return res;
2531 }
2532
2533 static void tipc_sk_timeout(struct timer_list *t)
2534 {
2535 struct sock *sk = from_timer(sk, t, sk_timer);
2536 struct tipc_sock *tsk = tipc_sk(sk);
2537 u32 peer_port = tsk_peer_port(tsk);
2538 u32 peer_node = tsk_peer_node(tsk);
2539 u32 own_node = tsk_own_node(tsk);
2540 u32 own_port = tsk->portid;
2541 struct net *net = sock_net(sk);
2542 struct sk_buff *skb = NULL;
2543
2544 bh_lock_sock(sk);
2545 if (!tipc_sk_connected(sk))
2546 goto exit;
2547
2548 /* Try again later if socket is busy */
2549 if (sock_owned_by_user(sk)) {
2550 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2551 goto exit;
2552 }
2553
2554 if (tsk->probe_unacked) {
2555 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2556 tipc_node_remove_conn(net, peer_node, peer_port);
2557 sk->sk_state_change(sk);
2558 goto exit;
2559 }
2560 /* Send new probe */
2561 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2562 peer_node, own_node, peer_port, own_port,
2563 TIPC_OK);
2564 tsk->probe_unacked = true;
2565 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2566 exit:
2567 bh_unlock_sock(sk);
2568 if (skb)
2569 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2570 sock_put(sk);
2571 }
2572
2573 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2574 struct tipc_name_seq const *seq)
2575 {
2576 struct sock *sk = &tsk->sk;
2577 struct net *net = sock_net(sk);
2578 struct publication *publ;
2579 u32 key;
2580
2581 if (tipc_sk_connected(sk))
2582 return -EINVAL;
2583 key = tsk->portid + tsk->pub_count + 1;
2584 if (key == tsk->portid)
2585 return -EADDRINUSE;
2586
2587 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2588 scope, tsk->portid, key);
2589 if (unlikely(!publ))
2590 return -EINVAL;
2591
2592 list_add(&publ->pport_list, &tsk->publications);
2593 tsk->pub_count++;
2594 tsk->published = 1;
2595 return 0;
2596 }
2597
2598 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2599 struct tipc_name_seq const *seq)
2600 {
2601 struct net *net = sock_net(&tsk->sk);
2602 struct publication *publ;
2603 struct publication *safe;
2604 int rc = -EINVAL;
2605
2606 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2607 if (seq) {
2608 if (publ->scope != scope)
2609 continue;
2610 if (publ->type != seq->type)
2611 continue;
2612 if (publ->lower != seq->lower)
2613 continue;
2614 if (publ->upper != seq->upper)
2615 break;
2616 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2617 publ->ref, publ->key);
2618 rc = 0;
2619 break;
2620 }
2621 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2622 publ->ref, publ->key);
2623 rc = 0;
2624 }
2625 if (list_empty(&tsk->publications))
2626 tsk->published = 0;
2627 return rc;
2628 }
2629
2630 /* tipc_sk_reinit: set non-zero address in all existing sockets
2631 * when we go from standalone to network mode.
2632 */
2633 void tipc_sk_reinit(struct net *net)
2634 {
2635 struct tipc_net *tn = net_generic(net, tipc_net_id);
2636 struct rhashtable_iter iter;
2637 struct tipc_sock *tsk;
2638 struct tipc_msg *msg;
2639
2640 rhashtable_walk_enter(&tn->sk_rht, &iter);
2641
2642 do {
2643 tsk = ERR_PTR(rhashtable_walk_start(&iter));
2644 if (IS_ERR(tsk))
2645 goto walk_stop;
2646
2647 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2648 spin_lock_bh(&tsk->sk.sk_lock.slock);
2649 msg = &tsk->phdr;
2650 msg_set_prevnode(msg, tn->own_addr);
2651 msg_set_orignode(msg, tn->own_addr);
2652 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2653 }
2654 walk_stop:
2655 rhashtable_walk_stop(&iter);
2656 } while (tsk == ERR_PTR(-EAGAIN));
2657 }
2658
2659 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2660 {
2661 struct tipc_net *tn = net_generic(net, tipc_net_id);
2662 struct tipc_sock *tsk;
2663
2664 rcu_read_lock();
2665 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2666 if (tsk)
2667 sock_hold(&tsk->sk);
2668 rcu_read_unlock();
2669
2670 return tsk;
2671 }
2672
2673 static int tipc_sk_insert(struct tipc_sock *tsk)
2674 {
2675 struct sock *sk = &tsk->sk;
2676 struct net *net = sock_net(sk);
2677 struct tipc_net *tn = net_generic(net, tipc_net_id);
2678 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2679 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2680
2681 while (remaining--) {
2682 portid++;
2683 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2684 portid = TIPC_MIN_PORT;
2685 tsk->portid = portid;
2686 sock_hold(&tsk->sk);
2687 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2688 tsk_rht_params))
2689 return 0;
2690 sock_put(&tsk->sk);
2691 }
2692
2693 return -1;
2694 }
2695
2696 static void tipc_sk_remove(struct tipc_sock *tsk)
2697 {
2698 struct sock *sk = &tsk->sk;
2699 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2700
2701 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2702 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2703 __sock_put(sk);
2704 }
2705 }
2706
2707 static const struct rhashtable_params tsk_rht_params = {
2708 .nelem_hint = 192,
2709 .head_offset = offsetof(struct tipc_sock, node),
2710 .key_offset = offsetof(struct tipc_sock, portid),
2711 .key_len = sizeof(u32), /* portid */
2712 .max_size = 1048576,
2713 .min_size = 256,
2714 .automatic_shrinking = true,
2715 };
2716
2717 int tipc_sk_rht_init(struct net *net)
2718 {
2719 struct tipc_net *tn = net_generic(net, tipc_net_id);
2720
2721 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2722 }
2723
2724 void tipc_sk_rht_destroy(struct net *net)
2725 {
2726 struct tipc_net *tn = net_generic(net, tipc_net_id);
2727
2728 /* Wait for socket readers to complete */
2729 synchronize_net();
2730
2731 rhashtable_destroy(&tn->sk_rht);
2732 }
2733
2734 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2735 {
2736 struct net *net = sock_net(&tsk->sk);
2737 u32 domain = addr_domain(net, mreq->scope);
2738 struct tipc_group *grp = tsk->group;
2739 struct tipc_msg *hdr = &tsk->phdr;
2740 struct tipc_name_seq seq;
2741 int rc;
2742
2743 if (mreq->type < TIPC_RESERVED_TYPES)
2744 return -EACCES;
2745 if (grp)
2746 return -EACCES;
2747 grp = tipc_group_create(net, tsk->portid, mreq);
2748 if (!grp)
2749 return -ENOMEM;
2750 tsk->group = grp;
2751 msg_set_lookup_scope(hdr, mreq->scope);
2752 msg_set_nametype(hdr, mreq->type);
2753 msg_set_dest_droppable(hdr, true);
2754 seq.type = mreq->type;
2755 seq.lower = mreq->instance;
2756 seq.upper = seq.lower;
2757 tipc_nametbl_build_group(net, grp, mreq->type, domain);
2758 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2759 if (rc) {
2760 tipc_group_delete(net, grp);
2761 tsk->group = NULL;
2762 }
2763
2764 /* Eliminate any risk that a broadcast overtakes the sent JOIN */
2765 tsk->mc_method.rcast = true;
2766 tsk->mc_method.mandatory = true;
2767 return rc;
2768 }
2769
2770 static int tipc_sk_leave(struct tipc_sock *tsk)
2771 {
2772 struct net *net = sock_net(&tsk->sk);
2773 struct tipc_group *grp = tsk->group;
2774 struct tipc_name_seq seq;
2775 int scope;
2776
2777 if (!grp)
2778 return -EINVAL;
2779 tipc_group_self(grp, &seq, &scope);
2780 tipc_group_delete(net, grp);
2781 tsk->group = NULL;
2782 tipc_sk_withdraw(tsk, scope, &seq);
2783 return 0;
2784 }
2785
2786 /**
2787 * tipc_setsockopt - set socket option
2788 * @sock: socket structure
2789 * @lvl: option level
2790 * @opt: option identifier
2791 * @ov: pointer to new option value
2792 * @ol: length of option value
2793 *
2794 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2795 * (to ease compatibility).
2796 *
2797 * Returns 0 on success, errno otherwise
2798 */
2799 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2800 char __user *ov, unsigned int ol)
2801 {
2802 struct sock *sk = sock->sk;
2803 struct tipc_sock *tsk = tipc_sk(sk);
2804 struct tipc_group_req mreq;
2805 u32 value = 0;
2806 int res = 0;
2807
2808 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2809 return 0;
2810 if (lvl != SOL_TIPC)
2811 return -ENOPROTOOPT;
2812
2813 switch (opt) {
2814 case TIPC_IMPORTANCE:
2815 case TIPC_SRC_DROPPABLE:
2816 case TIPC_DEST_DROPPABLE:
2817 case TIPC_CONN_TIMEOUT:
2818 if (ol < sizeof(value))
2819 return -EINVAL;
2820 if (get_user(value, (u32 __user *)ov))
2821 return -EFAULT;
2822 break;
2823 case TIPC_GROUP_JOIN:
2824 if (ol < sizeof(mreq))
2825 return -EINVAL;
2826 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2827 return -EFAULT;
2828 break;
2829 default:
2830 if (ov || ol)
2831 return -EINVAL;
2832 }
2833
2834 lock_sock(sk);
2835
2836 switch (opt) {
2837 case TIPC_IMPORTANCE:
2838 res = tsk_set_importance(tsk, value);
2839 break;
2840 case TIPC_SRC_DROPPABLE:
2841 if (sock->type != SOCK_STREAM)
2842 tsk_set_unreliable(tsk, value);
2843 else
2844 res = -ENOPROTOOPT;
2845 break;
2846 case TIPC_DEST_DROPPABLE:
2847 tsk_set_unreturnable(tsk, value);
2848 break;
2849 case TIPC_CONN_TIMEOUT:
2850 tipc_sk(sk)->conn_timeout = value;
2851 break;
2852 case TIPC_MCAST_BROADCAST:
2853 tsk->mc_method.rcast = false;
2854 tsk->mc_method.mandatory = true;
2855 break;
2856 case TIPC_MCAST_REPLICAST:
2857 tsk->mc_method.rcast = true;
2858 tsk->mc_method.mandatory = true;
2859 break;
2860 case TIPC_GROUP_JOIN:
2861 res = tipc_sk_join(tsk, &mreq);
2862 break;
2863 case TIPC_GROUP_LEAVE:
2864 res = tipc_sk_leave(tsk);
2865 break;
2866 default:
2867 res = -EINVAL;
2868 }
2869
2870 release_sock(sk);
2871
2872 return res;
2873 }
2874
2875 /**
2876 * tipc_getsockopt - get socket option
2877 * @sock: socket structure
2878 * @lvl: option level
2879 * @opt: option identifier
2880 * @ov: receptacle for option value
2881 * @ol: receptacle for length of option value
2882 *
2883 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2884 * (to ease compatibility).
2885 *
2886 * Returns 0 on success, errno otherwise
2887 */
2888 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2889 char __user *ov, int __user *ol)
2890 {
2891 struct sock *sk = sock->sk;
2892 struct tipc_sock *tsk = tipc_sk(sk);
2893 struct tipc_name_seq seq;
2894 int len, scope;
2895 u32 value;
2896 int res;
2897
2898 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2899 return put_user(0, ol);
2900 if (lvl != SOL_TIPC)
2901 return -ENOPROTOOPT;
2902 res = get_user(len, ol);
2903 if (res)
2904 return res;
2905
2906 lock_sock(sk);
2907
2908 switch (opt) {
2909 case TIPC_IMPORTANCE:
2910 value = tsk_importance(tsk);
2911 break;
2912 case TIPC_SRC_DROPPABLE:
2913 value = tsk_unreliable(tsk);
2914 break;
2915 case TIPC_DEST_DROPPABLE:
2916 value = tsk_unreturnable(tsk);
2917 break;
2918 case TIPC_CONN_TIMEOUT:
2919 value = tsk->conn_timeout;
2920 /* no need to set "res", since already 0 at this point */
2921 break;
2922 case TIPC_NODE_RECVQ_DEPTH:
2923 value = 0; /* was tipc_queue_size, now obsolete */
2924 break;
2925 case TIPC_SOCK_RECVQ_DEPTH:
2926 value = skb_queue_len(&sk->sk_receive_queue);
2927 break;
2928 case TIPC_GROUP_JOIN:
2929 seq.type = 0;
2930 if (tsk->group)
2931 tipc_group_self(tsk->group, &seq, &scope);
2932 value = seq.type;
2933 break;
2934 default:
2935 res = -EINVAL;
2936 }
2937
2938 release_sock(sk);
2939
2940 if (res)
2941 return res; /* "get" failed */
2942
2943 if (len < sizeof(value))
2944 return -EINVAL;
2945
2946 if (copy_to_user(ov, &value, sizeof(value)))
2947 return -EFAULT;
2948
2949 return put_user(sizeof(value), ol);
2950 }
2951
2952 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2953 {
2954 struct sock *sk = sock->sk;
2955 struct tipc_sioc_ln_req lnr;
2956 void __user *argp = (void __user *)arg;
2957
2958 switch (cmd) {
2959 case SIOCGETLINKNAME:
2960 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2961 return -EFAULT;
2962 if (!tipc_node_get_linkname(sock_net(sk),
2963 lnr.bearer_id & 0xffff, lnr.peer,
2964 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2965 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2966 return -EFAULT;
2967 return 0;
2968 }
2969 return -EADDRNOTAVAIL;
2970 default:
2971 return -ENOIOCTLCMD;
2972 }
2973 }
2974
2975 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
2976 {
2977 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
2978 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
2979 u32 onode = tipc_own_addr(sock_net(sock1->sk));
2980
2981 tsk1->peer.family = AF_TIPC;
2982 tsk1->peer.addrtype = TIPC_ADDR_ID;
2983 tsk1->peer.scope = TIPC_NODE_SCOPE;
2984 tsk1->peer.addr.id.ref = tsk2->portid;
2985 tsk1->peer.addr.id.node = onode;
2986 tsk2->peer.family = AF_TIPC;
2987 tsk2->peer.addrtype = TIPC_ADDR_ID;
2988 tsk2->peer.scope = TIPC_NODE_SCOPE;
2989 tsk2->peer.addr.id.ref = tsk1->portid;
2990 tsk2->peer.addr.id.node = onode;
2991
2992 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
2993 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
2994 return 0;
2995 }
2996
2997 /* Protocol switches for the various types of TIPC sockets */
2998
2999 static const struct proto_ops msg_ops = {
3000 .owner = THIS_MODULE,
3001 .family = AF_TIPC,
3002 .release = tipc_release,
3003 .bind = tipc_bind,
3004 .connect = tipc_connect,
3005 .socketpair = tipc_socketpair,
3006 .accept = sock_no_accept,
3007 .getname = tipc_getname,
3008 .poll = tipc_poll,
3009 .ioctl = tipc_ioctl,
3010 .listen = sock_no_listen,
3011 .shutdown = tipc_shutdown,
3012 .setsockopt = tipc_setsockopt,
3013 .getsockopt = tipc_getsockopt,
3014 .sendmsg = tipc_sendmsg,
3015 .recvmsg = tipc_recvmsg,
3016 .mmap = sock_no_mmap,
3017 .sendpage = sock_no_sendpage
3018 };
3019
3020 static const struct proto_ops packet_ops = {
3021 .owner = THIS_MODULE,
3022 .family = AF_TIPC,
3023 .release = tipc_release,
3024 .bind = tipc_bind,
3025 .connect = tipc_connect,
3026 .socketpair = tipc_socketpair,
3027 .accept = tipc_accept,
3028 .getname = tipc_getname,
3029 .poll = tipc_poll,
3030 .ioctl = tipc_ioctl,
3031 .listen = tipc_listen,
3032 .shutdown = tipc_shutdown,
3033 .setsockopt = tipc_setsockopt,
3034 .getsockopt = tipc_getsockopt,
3035 .sendmsg = tipc_send_packet,
3036 .recvmsg = tipc_recvmsg,
3037 .mmap = sock_no_mmap,
3038 .sendpage = sock_no_sendpage
3039 };
3040
3041 static const struct proto_ops stream_ops = {
3042 .owner = THIS_MODULE,
3043 .family = AF_TIPC,
3044 .release = tipc_release,
3045 .bind = tipc_bind,
3046 .connect = tipc_connect,
3047 .socketpair = tipc_socketpair,
3048 .accept = tipc_accept,
3049 .getname = tipc_getname,
3050 .poll = tipc_poll,
3051 .ioctl = tipc_ioctl,
3052 .listen = tipc_listen,
3053 .shutdown = tipc_shutdown,
3054 .setsockopt = tipc_setsockopt,
3055 .getsockopt = tipc_getsockopt,
3056 .sendmsg = tipc_sendstream,
3057 .recvmsg = tipc_recvstream,
3058 .mmap = sock_no_mmap,
3059 .sendpage = sock_no_sendpage
3060 };
3061
3062 static const struct net_proto_family tipc_family_ops = {
3063 .owner = THIS_MODULE,
3064 .family = AF_TIPC,
3065 .create = tipc_sk_create
3066 };
3067
3068 static struct proto tipc_proto = {
3069 .name = "TIPC",
3070 .owner = THIS_MODULE,
3071 .obj_size = sizeof(struct tipc_sock),
3072 .sysctl_rmem = sysctl_tipc_rmem
3073 };
3074
3075 /**
3076 * tipc_socket_init - initialize TIPC socket interface
3077 *
3078 * Returns 0 on success, errno otherwise
3079 */
3080 int tipc_socket_init(void)
3081 {
3082 int res;
3083
3084 res = proto_register(&tipc_proto, 1);
3085 if (res) {
3086 pr_err("Failed to register TIPC protocol type\n");
3087 goto out;
3088 }
3089
3090 res = sock_register(&tipc_family_ops);
3091 if (res) {
3092 pr_err("Failed to register TIPC socket type\n");
3093 proto_unregister(&tipc_proto);
3094 goto out;
3095 }
3096 out:
3097 return res;
3098 }
3099
3100 /**
3101 * tipc_socket_stop - stop TIPC socket interface
3102 */
3103 void tipc_socket_stop(void)
3104 {
3105 sock_unregister(tipc_family_ops.family);
3106 proto_unregister(&tipc_proto);
3107 }
3108
3109 /* Caller should hold socket lock for the passed tipc socket. */
3110 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3111 {
3112 u32 peer_node;
3113 u32 peer_port;
3114 struct nlattr *nest;
3115
3116 peer_node = tsk_peer_node(tsk);
3117 peer_port = tsk_peer_port(tsk);
3118
3119 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3120
3121 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3122 goto msg_full;
3123 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3124 goto msg_full;
3125
3126 if (tsk->conn_type != 0) {
3127 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3128 goto msg_full;
3129 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3130 goto msg_full;
3131 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3132 goto msg_full;
3133 }
3134 nla_nest_end(skb, nest);
3135
3136 return 0;
3137
3138 msg_full:
3139 nla_nest_cancel(skb, nest);
3140
3141 return -EMSGSIZE;
3142 }
3143
3144 /* Caller should hold socket lock for the passed tipc socket. */
3145 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3146 struct tipc_sock *tsk)
3147 {
3148 int err;
3149 void *hdr;
3150 struct nlattr *attrs;
3151 struct net *net = sock_net(skb->sk);
3152 struct tipc_net *tn = net_generic(net, tipc_net_id);
3153 struct sock *sk = &tsk->sk;
3154
3155 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3156 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3157 if (!hdr)
3158 goto msg_cancel;
3159
3160 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3161 if (!attrs)
3162 goto genlmsg_cancel;
3163 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
3164 goto attr_msg_cancel;
3165 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
3166 goto attr_msg_cancel;
3167
3168 if (tipc_sk_connected(sk)) {
3169 err = __tipc_nl_add_sk_con(skb, tsk);
3170 if (err)
3171 goto attr_msg_cancel;
3172 } else if (!list_empty(&tsk->publications)) {
3173 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3174 goto attr_msg_cancel;
3175 }
3176 nla_nest_end(skb, attrs);
3177 genlmsg_end(skb, hdr);
3178
3179 return 0;
3180
3181 attr_msg_cancel:
3182 nla_nest_cancel(skb, attrs);
3183 genlmsg_cancel:
3184 genlmsg_cancel(skb, hdr);
3185 msg_cancel:
3186 return -EMSGSIZE;
3187 }
3188
3189 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3190 {
3191 int err;
3192 struct tipc_sock *tsk;
3193 const struct bucket_table *tbl;
3194 struct rhash_head *pos;
3195 struct net *net = sock_net(skb->sk);
3196 struct tipc_net *tn = net_generic(net, tipc_net_id);
3197 u32 tbl_id = cb->args[0];
3198 u32 prev_portid = cb->args[1];
3199
3200 rcu_read_lock();
3201 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
3202 for (; tbl_id < tbl->size; tbl_id++) {
3203 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
3204 spin_lock_bh(&tsk->sk.sk_lock.slock);
3205 if (prev_portid && prev_portid != tsk->portid) {
3206 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3207 continue;
3208 }
3209
3210 err = __tipc_nl_add_sk(skb, cb, tsk);
3211 if (err) {
3212 prev_portid = tsk->portid;
3213 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3214 goto out;
3215 }
3216 prev_portid = 0;
3217 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3218 }
3219 }
3220 out:
3221 rcu_read_unlock();
3222 cb->args[0] = tbl_id;
3223 cb->args[1] = prev_portid;
3224
3225 return skb->len;
3226 }
3227
3228 /* Caller should hold socket lock for the passed tipc socket. */
3229 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3230 struct netlink_callback *cb,
3231 struct publication *publ)
3232 {
3233 void *hdr;
3234 struct nlattr *attrs;
3235
3236 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3237 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3238 if (!hdr)
3239 goto msg_cancel;
3240
3241 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3242 if (!attrs)
3243 goto genlmsg_cancel;
3244
3245 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3246 goto attr_msg_cancel;
3247 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3248 goto attr_msg_cancel;
3249 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3250 goto attr_msg_cancel;
3251 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3252 goto attr_msg_cancel;
3253
3254 nla_nest_end(skb, attrs);
3255 genlmsg_end(skb, hdr);
3256
3257 return 0;
3258
3259 attr_msg_cancel:
3260 nla_nest_cancel(skb, attrs);
3261 genlmsg_cancel:
3262 genlmsg_cancel(skb, hdr);
3263 msg_cancel:
3264 return -EMSGSIZE;
3265 }
3266
3267 /* Caller should hold socket lock for the passed tipc socket. */
3268 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3269 struct netlink_callback *cb,
3270 struct tipc_sock *tsk, u32 *last_publ)
3271 {
3272 int err;
3273 struct publication *p;
3274
3275 if (*last_publ) {
3276 list_for_each_entry(p, &tsk->publications, pport_list) {
3277 if (p->key == *last_publ)
3278 break;
3279 }
3280 if (p->key != *last_publ) {
3281 /* We never set seq or call nl_dump_check_consistent()
3282 * this means that setting prev_seq here will cause the
3283 * consistence check to fail in the netlink callback
3284 * handler. Resulting in the last NLMSG_DONE message
3285 * having the NLM_F_DUMP_INTR flag set.
3286 */
3287 cb->prev_seq = 1;
3288 *last_publ = 0;
3289 return -EPIPE;
3290 }
3291 } else {
3292 p = list_first_entry(&tsk->publications, struct publication,
3293 pport_list);
3294 }
3295
3296 list_for_each_entry_from(p, &tsk->publications, pport_list) {
3297 err = __tipc_nl_add_sk_publ(skb, cb, p);
3298 if (err) {
3299 *last_publ = p->key;
3300 return err;
3301 }
3302 }
3303 *last_publ = 0;
3304
3305 return 0;
3306 }
3307
3308 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3309 {
3310 int err;
3311 u32 tsk_portid = cb->args[0];
3312 u32 last_publ = cb->args[1];
3313 u32 done = cb->args[2];
3314 struct net *net = sock_net(skb->sk);
3315 struct tipc_sock *tsk;
3316
3317 if (!tsk_portid) {
3318 struct nlattr **attrs;
3319 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3320
3321 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3322 if (err)
3323 return err;
3324
3325 if (!attrs[TIPC_NLA_SOCK])
3326 return -EINVAL;
3327
3328 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3329 attrs[TIPC_NLA_SOCK],
3330 tipc_nl_sock_policy, NULL);
3331 if (err)
3332 return err;
3333
3334 if (!sock[TIPC_NLA_SOCK_REF])
3335 return -EINVAL;
3336
3337 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3338 }
3339
3340 if (done)
3341 return 0;
3342
3343 tsk = tipc_sk_lookup(net, tsk_portid);
3344 if (!tsk)
3345 return -EINVAL;
3346
3347 lock_sock(&tsk->sk);
3348 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3349 if (!err)
3350 done = 1;
3351 release_sock(&tsk->sk);
3352 sock_put(&tsk->sk);
3353
3354 cb->args[0] = tsk_portid;
3355 cb->args[1] = last_publ;
3356 cb->args[2] = done;
3357
3358 return skb->len;
3359 }