]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/tipc/node.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / net / tipc / node.c
1 /*
2 * net/tipc/node.c: TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "link.h"
39 #include "node.h"
40 #include "name_distr.h"
41 #include "socket.h"
42 #include "bcast.h"
43 #include "monitor.h"
44 #include "discover.h"
45 #include "netlink.h"
46 #include "trace.h"
47 #include "crypto.h"
48
49 #define INVALID_NODE_SIG 0x10000
50 #define NODE_CLEANUP_AFTER 300000
51
52 /* Flags used to take different actions according to flag type
53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 */
57 enum {
58 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
59 TIPC_NOTIFY_NODE_UP = (1 << 4),
60 TIPC_NOTIFY_LINK_UP = (1 << 6),
61 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
62 };
63
64 struct tipc_link_entry {
65 struct tipc_link *link;
66 spinlock_t lock; /* per link */
67 u32 mtu;
68 struct sk_buff_head inputq;
69 struct tipc_media_addr maddr;
70 };
71
72 struct tipc_bclink_entry {
73 struct tipc_link *link;
74 struct sk_buff_head inputq1;
75 struct sk_buff_head arrvq;
76 struct sk_buff_head inputq2;
77 struct sk_buff_head namedq;
78 };
79
80 /**
81 * struct tipc_node - TIPC node structure
82 * @addr: network address of node
83 * @ref: reference counter to node object
84 * @lock: rwlock governing access to structure
85 * @net: the applicable net namespace
86 * @hash: links to adjacent nodes in unsorted hash chain
87 * @inputq: pointer to input queue containing messages for msg event
88 * @namedq: pointer to name table input queue with name table messages
89 * @active_links: bearer ids of active links, used as index into links[] array
90 * @links: array containing references to all links to node
91 * @action_flags: bit mask of different types of node actions
92 * @state: connectivity state vs peer node
93 * @preliminary: a preliminary node or not
94 * @sync_point: sequence number where synch/failover is finished
95 * @list: links to adjacent nodes in sorted list of cluster's nodes
96 * @working_links: number of working links to node (both active and standby)
97 * @link_cnt: number of links to node
98 * @capabilities: bitmap, indicating peer node's functional capabilities
99 * @signature: node instance identifier
100 * @link_id: local and remote bearer ids of changing link, if any
101 * @publ_list: list of publications
102 * @rcu: rcu struct for tipc_node
103 * @delete_at: indicates the time for deleting a down node
104 * @crypto_rx: RX crypto handler
105 */
106 struct tipc_node {
107 u32 addr;
108 struct kref kref;
109 rwlock_t lock;
110 struct net *net;
111 struct hlist_node hash;
112 int active_links[2];
113 struct tipc_link_entry links[MAX_BEARERS];
114 struct tipc_bclink_entry bc_entry;
115 int action_flags;
116 struct list_head list;
117 int state;
118 bool preliminary;
119 bool failover_sent;
120 u16 sync_point;
121 int link_cnt;
122 u16 working_links;
123 u16 capabilities;
124 u32 signature;
125 u32 link_id;
126 u8 peer_id[16];
127 char peer_id_string[NODE_ID_STR_LEN];
128 struct list_head publ_list;
129 struct list_head conn_sks;
130 unsigned long keepalive_intv;
131 struct timer_list timer;
132 struct rcu_head rcu;
133 unsigned long delete_at;
134 struct net *peer_net;
135 u32 peer_hash_mix;
136 #ifdef CONFIG_TIPC_CRYPTO
137 struct tipc_crypto *crypto_rx;
138 #endif
139 };
140
141 /* Node FSM states and events:
142 */
143 enum {
144 SELF_DOWN_PEER_DOWN = 0xdd,
145 SELF_UP_PEER_UP = 0xaa,
146 SELF_DOWN_PEER_LEAVING = 0xd1,
147 SELF_UP_PEER_COMING = 0xac,
148 SELF_COMING_PEER_UP = 0xca,
149 SELF_LEAVING_PEER_DOWN = 0x1d,
150 NODE_FAILINGOVER = 0xf0,
151 NODE_SYNCHING = 0xcc
152 };
153
154 enum {
155 SELF_ESTABL_CONTACT_EVT = 0xece,
156 SELF_LOST_CONTACT_EVT = 0x1ce,
157 PEER_ESTABL_CONTACT_EVT = 0x9ece,
158 PEER_LOST_CONTACT_EVT = 0x91ce,
159 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
160 NODE_FAILOVER_END_EVT = 0xfee,
161 NODE_SYNCH_BEGIN_EVT = 0xcbe,
162 NODE_SYNCH_END_EVT = 0xcee
163 };
164
165 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
166 struct sk_buff_head *xmitq,
167 struct tipc_media_addr **maddr);
168 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
169 bool delete);
170 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
171 static void tipc_node_delete(struct tipc_node *node);
172 static void tipc_node_timeout(struct timer_list *t);
173 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
174 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
175 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
176 static bool node_is_up(struct tipc_node *n);
177 static void tipc_node_delete_from_list(struct tipc_node *node);
178
179 struct tipc_sock_conn {
180 u32 port;
181 u32 peer_port;
182 u32 peer_node;
183 struct list_head list;
184 };
185
186 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
187 {
188 int bearer_id = n->active_links[sel & 1];
189
190 if (unlikely(bearer_id == INVALID_BEARER_ID))
191 return NULL;
192
193 return n->links[bearer_id].link;
194 }
195
196 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
197 {
198 struct tipc_node *n;
199 int bearer_id;
200 unsigned int mtu = MAX_MSG_SIZE;
201
202 n = tipc_node_find(net, addr);
203 if (unlikely(!n))
204 return mtu;
205
206 /* Allow MAX_MSG_SIZE when building connection oriented message
207 * if they are in the same core network
208 */
209 if (n->peer_net && connected) {
210 tipc_node_put(n);
211 return mtu;
212 }
213
214 bearer_id = n->active_links[sel & 1];
215 if (likely(bearer_id != INVALID_BEARER_ID))
216 mtu = n->links[bearer_id].mtu;
217 tipc_node_put(n);
218 return mtu;
219 }
220
221 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
222 {
223 u8 *own_id = tipc_own_id(net);
224 struct tipc_node *n;
225
226 if (!own_id)
227 return true;
228
229 if (addr == tipc_own_addr(net)) {
230 memcpy(id, own_id, TIPC_NODEID_LEN);
231 return true;
232 }
233 n = tipc_node_find(net, addr);
234 if (!n)
235 return false;
236
237 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
238 tipc_node_put(n);
239 return true;
240 }
241
242 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
243 {
244 struct tipc_node *n;
245 u16 caps;
246
247 n = tipc_node_find(net, addr);
248 if (unlikely(!n))
249 return TIPC_NODE_CAPABILITIES;
250 caps = n->capabilities;
251 tipc_node_put(n);
252 return caps;
253 }
254
255 u32 tipc_node_get_addr(struct tipc_node *node)
256 {
257 return (node) ? node->addr : 0;
258 }
259
260 char *tipc_node_get_id_str(struct tipc_node *node)
261 {
262 return node->peer_id_string;
263 }
264
265 #ifdef CONFIG_TIPC_CRYPTO
266 /**
267 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
268 * Note: node ref counter must be held first!
269 */
270 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
271 {
272 return (__n) ? __n->crypto_rx : NULL;
273 }
274
275 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
276 {
277 return container_of(pos, struct tipc_node, list)->crypto_rx;
278 }
279 #endif
280
281 void tipc_node_free(struct rcu_head *rp)
282 {
283 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
284
285 #ifdef CONFIG_TIPC_CRYPTO
286 tipc_crypto_stop(&n->crypto_rx);
287 #endif
288 kfree(n);
289 }
290
291 static void tipc_node_kref_release(struct kref *kref)
292 {
293 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
294
295 kfree(n->bc_entry.link);
296 call_rcu(&n->rcu, tipc_node_free);
297 }
298
299 void tipc_node_put(struct tipc_node *node)
300 {
301 kref_put(&node->kref, tipc_node_kref_release);
302 }
303
304 static void tipc_node_get(struct tipc_node *node)
305 {
306 kref_get(&node->kref);
307 }
308
309 /*
310 * tipc_node_find - locate specified node object, if it exists
311 */
312 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
313 {
314 struct tipc_net *tn = tipc_net(net);
315 struct tipc_node *node;
316 unsigned int thash = tipc_hashfn(addr);
317
318 rcu_read_lock();
319 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
320 if (node->addr != addr || node->preliminary)
321 continue;
322 if (!kref_get_unless_zero(&node->kref))
323 node = NULL;
324 break;
325 }
326 rcu_read_unlock();
327 return node;
328 }
329
330 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
331 * Note: this function is called only when a discovery request failed
332 * to find the node by its 32-bit id, and is not time critical
333 */
334 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
335 {
336 struct tipc_net *tn = tipc_net(net);
337 struct tipc_node *n;
338 bool found = false;
339
340 rcu_read_lock();
341 list_for_each_entry_rcu(n, &tn->node_list, list) {
342 read_lock_bh(&n->lock);
343 if (!memcmp(id, n->peer_id, 16) &&
344 kref_get_unless_zero(&n->kref))
345 found = true;
346 read_unlock_bh(&n->lock);
347 if (found)
348 break;
349 }
350 rcu_read_unlock();
351 return found ? n : NULL;
352 }
353
354 static void tipc_node_read_lock(struct tipc_node *n)
355 {
356 read_lock_bh(&n->lock);
357 }
358
359 static void tipc_node_read_unlock(struct tipc_node *n)
360 {
361 read_unlock_bh(&n->lock);
362 }
363
364 static void tipc_node_write_lock(struct tipc_node *n)
365 {
366 write_lock_bh(&n->lock);
367 }
368
369 static void tipc_node_write_unlock_fast(struct tipc_node *n)
370 {
371 write_unlock_bh(&n->lock);
372 }
373
374 static void tipc_node_write_unlock(struct tipc_node *n)
375 {
376 struct net *net = n->net;
377 u32 addr = 0;
378 u32 flags = n->action_flags;
379 u32 link_id = 0;
380 u32 bearer_id;
381 struct list_head *publ_list;
382
383 if (likely(!flags)) {
384 write_unlock_bh(&n->lock);
385 return;
386 }
387
388 addr = n->addr;
389 link_id = n->link_id;
390 bearer_id = link_id & 0xffff;
391 publ_list = &n->publ_list;
392
393 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
394 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
395
396 write_unlock_bh(&n->lock);
397
398 if (flags & TIPC_NOTIFY_NODE_DOWN)
399 tipc_publ_notify(net, publ_list, addr);
400
401 if (flags & TIPC_NOTIFY_NODE_UP)
402 tipc_named_node_up(net, addr);
403
404 if (flags & TIPC_NOTIFY_LINK_UP) {
405 tipc_mon_peer_up(net, addr, bearer_id);
406 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
407 TIPC_NODE_SCOPE, link_id, link_id);
408 }
409 if (flags & TIPC_NOTIFY_LINK_DOWN) {
410 tipc_mon_peer_down(net, addr, bearer_id);
411 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
412 addr, link_id);
413 }
414 }
415
416 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
417 {
418 int net_id = tipc_netid(n->net);
419 struct tipc_net *tn_peer;
420 struct net *tmp;
421 u32 hash_chk;
422
423 if (n->peer_net)
424 return;
425
426 for_each_net_rcu(tmp) {
427 tn_peer = tipc_net(tmp);
428 if (!tn_peer)
429 continue;
430 /* Integrity checking whether node exists in namespace or not */
431 if (tn_peer->net_id != net_id)
432 continue;
433 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
434 continue;
435 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
436 if (hash_mixes ^ hash_chk)
437 continue;
438 n->peer_net = tmp;
439 n->peer_hash_mix = hash_mixes;
440 break;
441 }
442 }
443
444 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
445 u16 capabilities, u32 hash_mixes,
446 bool preliminary)
447 {
448 struct tipc_net *tn = net_generic(net, tipc_net_id);
449 struct tipc_node *n, *temp_node;
450 struct tipc_link *l;
451 unsigned long intv;
452 int bearer_id;
453 int i;
454
455 spin_lock_bh(&tn->node_list_lock);
456 n = tipc_node_find(net, addr) ?:
457 tipc_node_find_by_id(net, peer_id);
458 if (n) {
459 if (!n->preliminary)
460 goto update;
461 if (preliminary)
462 goto exit;
463 /* A preliminary node becomes "real" now, refresh its data */
464 tipc_node_write_lock(n);
465 n->preliminary = false;
466 n->addr = addr;
467 hlist_del_rcu(&n->hash);
468 hlist_add_head_rcu(&n->hash,
469 &tn->node_htable[tipc_hashfn(addr)]);
470 list_del_rcu(&n->list);
471 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
472 if (n->addr < temp_node->addr)
473 break;
474 }
475 list_add_tail_rcu(&n->list, &temp_node->list);
476 tipc_node_write_unlock_fast(n);
477
478 update:
479 if (n->peer_hash_mix ^ hash_mixes)
480 tipc_node_assign_peer_net(n, hash_mixes);
481 if (n->capabilities == capabilities)
482 goto exit;
483 /* Same node may come back with new capabilities */
484 tipc_node_write_lock(n);
485 n->capabilities = capabilities;
486 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
487 l = n->links[bearer_id].link;
488 if (l)
489 tipc_link_update_caps(l, capabilities);
490 }
491 tipc_node_write_unlock_fast(n);
492
493 /* Calculate cluster capabilities */
494 tn->capabilities = TIPC_NODE_CAPABILITIES;
495 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
496 tn->capabilities &= temp_node->capabilities;
497 }
498
499 tipc_bcast_toggle_rcast(net,
500 (tn->capabilities & TIPC_BCAST_RCAST));
501
502 goto exit;
503 }
504 n = kzalloc(sizeof(*n), GFP_ATOMIC);
505 if (!n) {
506 pr_warn("Node creation failed, no memory\n");
507 goto exit;
508 }
509 tipc_nodeid2string(n->peer_id_string, peer_id);
510 #ifdef CONFIG_TIPC_CRYPTO
511 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
512 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
513 kfree(n);
514 n = NULL;
515 goto exit;
516 }
517 #endif
518 n->addr = addr;
519 n->preliminary = preliminary;
520 memcpy(&n->peer_id, peer_id, 16);
521 n->net = net;
522 n->peer_net = NULL;
523 n->peer_hash_mix = 0;
524 /* Assign kernel local namespace if exists */
525 tipc_node_assign_peer_net(n, hash_mixes);
526 n->capabilities = capabilities;
527 kref_init(&n->kref);
528 rwlock_init(&n->lock);
529 INIT_HLIST_NODE(&n->hash);
530 INIT_LIST_HEAD(&n->list);
531 INIT_LIST_HEAD(&n->publ_list);
532 INIT_LIST_HEAD(&n->conn_sks);
533 skb_queue_head_init(&n->bc_entry.namedq);
534 skb_queue_head_init(&n->bc_entry.inputq1);
535 __skb_queue_head_init(&n->bc_entry.arrvq);
536 skb_queue_head_init(&n->bc_entry.inputq2);
537 for (i = 0; i < MAX_BEARERS; i++)
538 spin_lock_init(&n->links[i].lock);
539 n->state = SELF_DOWN_PEER_LEAVING;
540 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
541 n->signature = INVALID_NODE_SIG;
542 n->active_links[0] = INVALID_BEARER_ID;
543 n->active_links[1] = INVALID_BEARER_ID;
544 n->bc_entry.link = NULL;
545 tipc_node_get(n);
546 timer_setup(&n->timer, tipc_node_timeout, 0);
547 /* Start a slow timer anyway, crypto needs it */
548 n->keepalive_intv = 10000;
549 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
550 if (!mod_timer(&n->timer, intv))
551 tipc_node_get(n);
552 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
553 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
554 if (n->addr < temp_node->addr)
555 break;
556 }
557 list_add_tail_rcu(&n->list, &temp_node->list);
558 /* Calculate cluster capabilities */
559 tn->capabilities = TIPC_NODE_CAPABILITIES;
560 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
561 tn->capabilities &= temp_node->capabilities;
562 }
563 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
564 trace_tipc_node_create(n, true, " ");
565 exit:
566 spin_unlock_bh(&tn->node_list_lock);
567 return n;
568 }
569
570 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
571 {
572 unsigned long tol = tipc_link_tolerance(l);
573 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
574
575 /* Link with lowest tolerance determines timer interval */
576 if (intv < n->keepalive_intv)
577 n->keepalive_intv = intv;
578
579 /* Ensure link's abort limit corresponds to current tolerance */
580 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
581 }
582
583 static void tipc_node_delete_from_list(struct tipc_node *node)
584 {
585 list_del_rcu(&node->list);
586 hlist_del_rcu(&node->hash);
587 tipc_node_put(node);
588 }
589
590 static void tipc_node_delete(struct tipc_node *node)
591 {
592 trace_tipc_node_delete(node, true, " ");
593 tipc_node_delete_from_list(node);
594
595 del_timer_sync(&node->timer);
596 tipc_node_put(node);
597 }
598
599 void tipc_node_stop(struct net *net)
600 {
601 struct tipc_net *tn = tipc_net(net);
602 struct tipc_node *node, *t_node;
603
604 spin_lock_bh(&tn->node_list_lock);
605 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
606 tipc_node_delete(node);
607 spin_unlock_bh(&tn->node_list_lock);
608 }
609
610 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
611 {
612 struct tipc_node *n;
613
614 if (in_own_node(net, addr))
615 return;
616
617 n = tipc_node_find(net, addr);
618 if (!n) {
619 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
620 return;
621 }
622 tipc_node_write_lock(n);
623 list_add_tail(subscr, &n->publ_list);
624 tipc_node_write_unlock_fast(n);
625 tipc_node_put(n);
626 }
627
628 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
629 {
630 struct tipc_node *n;
631
632 if (in_own_node(net, addr))
633 return;
634
635 n = tipc_node_find(net, addr);
636 if (!n) {
637 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
638 return;
639 }
640 tipc_node_write_lock(n);
641 list_del_init(subscr);
642 tipc_node_write_unlock_fast(n);
643 tipc_node_put(n);
644 }
645
646 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
647 {
648 struct tipc_node *node;
649 struct tipc_sock_conn *conn;
650 int err = 0;
651
652 if (in_own_node(net, dnode))
653 return 0;
654
655 node = tipc_node_find(net, dnode);
656 if (!node) {
657 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
658 return -EHOSTUNREACH;
659 }
660 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
661 if (!conn) {
662 err = -EHOSTUNREACH;
663 goto exit;
664 }
665 conn->peer_node = dnode;
666 conn->port = port;
667 conn->peer_port = peer_port;
668
669 tipc_node_write_lock(node);
670 list_add_tail(&conn->list, &node->conn_sks);
671 tipc_node_write_unlock(node);
672 exit:
673 tipc_node_put(node);
674 return err;
675 }
676
677 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
678 {
679 struct tipc_node *node;
680 struct tipc_sock_conn *conn, *safe;
681
682 if (in_own_node(net, dnode))
683 return;
684
685 node = tipc_node_find(net, dnode);
686 if (!node)
687 return;
688
689 tipc_node_write_lock(node);
690 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
691 if (port != conn->port)
692 continue;
693 list_del(&conn->list);
694 kfree(conn);
695 }
696 tipc_node_write_unlock(node);
697 tipc_node_put(node);
698 }
699
700 static void tipc_node_clear_links(struct tipc_node *node)
701 {
702 int i;
703
704 for (i = 0; i < MAX_BEARERS; i++) {
705 struct tipc_link_entry *le = &node->links[i];
706
707 if (le->link) {
708 kfree(le->link);
709 le->link = NULL;
710 node->link_cnt--;
711 }
712 }
713 }
714
715 /* tipc_node_cleanup - delete nodes that does not
716 * have active links for NODE_CLEANUP_AFTER time
717 */
718 static bool tipc_node_cleanup(struct tipc_node *peer)
719 {
720 struct tipc_node *temp_node;
721 struct tipc_net *tn = tipc_net(peer->net);
722 bool deleted = false;
723
724 /* If lock held by tipc_node_stop() the node will be deleted anyway */
725 if (!spin_trylock_bh(&tn->node_list_lock))
726 return false;
727
728 tipc_node_write_lock(peer);
729
730 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
731 tipc_node_clear_links(peer);
732 tipc_node_delete_from_list(peer);
733 deleted = true;
734 }
735 tipc_node_write_unlock(peer);
736
737 if (!deleted) {
738 spin_unlock_bh(&tn->node_list_lock);
739 return deleted;
740 }
741
742 /* Calculate cluster capabilities */
743 tn->capabilities = TIPC_NODE_CAPABILITIES;
744 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
745 tn->capabilities &= temp_node->capabilities;
746 }
747 tipc_bcast_toggle_rcast(peer->net,
748 (tn->capabilities & TIPC_BCAST_RCAST));
749 spin_unlock_bh(&tn->node_list_lock);
750 return deleted;
751 }
752
753 /* tipc_node_timeout - handle expiration of node timer
754 */
755 static void tipc_node_timeout(struct timer_list *t)
756 {
757 struct tipc_node *n = from_timer(n, t, timer);
758 struct tipc_link_entry *le;
759 struct sk_buff_head xmitq;
760 int remains = n->link_cnt;
761 int bearer_id;
762 int rc = 0;
763
764 trace_tipc_node_timeout(n, false, " ");
765 if (!node_is_up(n) && tipc_node_cleanup(n)) {
766 /*Removing the reference of Timer*/
767 tipc_node_put(n);
768 return;
769 }
770
771 #ifdef CONFIG_TIPC_CRYPTO
772 /* Take any crypto key related actions first */
773 tipc_crypto_timeout(n->crypto_rx);
774 #endif
775 __skb_queue_head_init(&xmitq);
776
777 /* Initial node interval to value larger (10 seconds), then it will be
778 * recalculated with link lowest tolerance
779 */
780 tipc_node_read_lock(n);
781 n->keepalive_intv = 10000;
782 tipc_node_read_unlock(n);
783 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
784 tipc_node_read_lock(n);
785 le = &n->links[bearer_id];
786 if (le->link) {
787 spin_lock_bh(&le->lock);
788 /* Link tolerance may change asynchronously: */
789 tipc_node_calculate_timer(n, le->link);
790 rc = tipc_link_timeout(le->link, &xmitq);
791 spin_unlock_bh(&le->lock);
792 remains--;
793 }
794 tipc_node_read_unlock(n);
795 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
796 if (rc & TIPC_LINK_DOWN_EVT)
797 tipc_node_link_down(n, bearer_id, false);
798 }
799 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
800 }
801
802 /**
803 * __tipc_node_link_up - handle addition of link
804 * Node lock must be held by caller
805 * Link becomes active (alone or shared) or standby, depending on its priority.
806 */
807 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
808 struct sk_buff_head *xmitq)
809 {
810 int *slot0 = &n->active_links[0];
811 int *slot1 = &n->active_links[1];
812 struct tipc_link *ol = node_active_link(n, 0);
813 struct tipc_link *nl = n->links[bearer_id].link;
814
815 if (!nl || tipc_link_is_up(nl))
816 return;
817
818 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
819 if (!tipc_link_is_up(nl))
820 return;
821
822 n->working_links++;
823 n->action_flags |= TIPC_NOTIFY_LINK_UP;
824 n->link_id = tipc_link_id(nl);
825
826 /* Leave room for tunnel header when returning 'mtu' to users: */
827 n->links[bearer_id].mtu = tipc_link_mss(nl);
828
829 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
830 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
831
832 pr_debug("Established link <%s> on network plane %c\n",
833 tipc_link_name(nl), tipc_link_plane(nl));
834 trace_tipc_node_link_up(n, true, " ");
835
836 /* Ensure that a STATE message goes first */
837 tipc_link_build_state_msg(nl, xmitq);
838
839 /* First link? => give it both slots */
840 if (!ol) {
841 *slot0 = bearer_id;
842 *slot1 = bearer_id;
843 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
844 n->action_flags |= TIPC_NOTIFY_NODE_UP;
845 tipc_link_set_active(nl, true);
846 tipc_bcast_add_peer(n->net, nl, xmitq);
847 return;
848 }
849
850 /* Second link => redistribute slots */
851 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
852 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
853 *slot0 = bearer_id;
854 *slot1 = bearer_id;
855 tipc_link_set_active(nl, true);
856 tipc_link_set_active(ol, false);
857 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
858 tipc_link_set_active(nl, true);
859 *slot1 = bearer_id;
860 } else {
861 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
862 }
863
864 /* Prepare synchronization with first link */
865 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
866 }
867
868 /**
869 * tipc_node_link_up - handle addition of link
870 *
871 * Link becomes active (alone or shared) or standby, depending on its priority.
872 */
873 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
874 struct sk_buff_head *xmitq)
875 {
876 struct tipc_media_addr *maddr;
877
878 tipc_node_write_lock(n);
879 __tipc_node_link_up(n, bearer_id, xmitq);
880 maddr = &n->links[bearer_id].maddr;
881 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
882 tipc_node_write_unlock(n);
883 }
884
885 /**
886 * tipc_node_link_failover() - start failover in case "half-failover"
887 *
888 * This function is only called in a very special situation where link
889 * failover can be already started on peer node but not on this node.
890 * This can happen when e.g.
891 * 1. Both links <1A-2A>, <1B-2B> down
892 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
893 * disturbance, wrong session, etc.)
894 * 3. Link <1B-2B> up
895 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
896 * 5. Node 2 starts failover onto link <1B-2B>
897 *
898 * ==> Node 1 does never start link/node failover!
899 *
900 * @n: tipc node structure
901 * @l: link peer endpoint failingover (- can be NULL)
902 * @tnl: tunnel link
903 * @xmitq: queue for messages to be xmited on tnl link later
904 */
905 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
906 struct tipc_link *tnl,
907 struct sk_buff_head *xmitq)
908 {
909 /* Avoid to be "self-failover" that can never end */
910 if (!tipc_link_is_up(tnl))
911 return;
912
913 /* Don't rush, failure link may be in the process of resetting */
914 if (l && !tipc_link_is_reset(l))
915 return;
916
917 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
918 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
919
920 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
921 tipc_link_failover_prepare(l, tnl, xmitq);
922
923 if (l)
924 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
925 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
926 }
927
928 /**
929 * __tipc_node_link_down - handle loss of link
930 */
931 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
932 struct sk_buff_head *xmitq,
933 struct tipc_media_addr **maddr)
934 {
935 struct tipc_link_entry *le = &n->links[*bearer_id];
936 int *slot0 = &n->active_links[0];
937 int *slot1 = &n->active_links[1];
938 int i, highest = 0, prio;
939 struct tipc_link *l, *_l, *tnl;
940
941 l = n->links[*bearer_id].link;
942 if (!l || tipc_link_is_reset(l))
943 return;
944
945 n->working_links--;
946 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
947 n->link_id = tipc_link_id(l);
948
949 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
950
951 pr_debug("Lost link <%s> on network plane %c\n",
952 tipc_link_name(l), tipc_link_plane(l));
953
954 /* Select new active link if any available */
955 *slot0 = INVALID_BEARER_ID;
956 *slot1 = INVALID_BEARER_ID;
957 for (i = 0; i < MAX_BEARERS; i++) {
958 _l = n->links[i].link;
959 if (!_l || !tipc_link_is_up(_l))
960 continue;
961 if (_l == l)
962 continue;
963 prio = tipc_link_prio(_l);
964 if (prio < highest)
965 continue;
966 if (prio > highest) {
967 highest = prio;
968 *slot0 = i;
969 *slot1 = i;
970 continue;
971 }
972 *slot1 = i;
973 }
974
975 if (!node_is_up(n)) {
976 if (tipc_link_peer_is_down(l))
977 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
978 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
979 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
980 tipc_link_fsm_evt(l, LINK_RESET_EVT);
981 tipc_link_reset(l);
982 tipc_link_build_reset_msg(l, xmitq);
983 *maddr = &n->links[*bearer_id].maddr;
984 node_lost_contact(n, &le->inputq);
985 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
986 return;
987 }
988 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
989
990 /* There is still a working link => initiate failover */
991 *bearer_id = n->active_links[0];
992 tnl = n->links[*bearer_id].link;
993 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
994 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
995 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
996 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
997 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
998 tipc_link_reset(l);
999 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1000 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1001 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1002 *maddr = &n->links[*bearer_id].maddr;
1003 }
1004
1005 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1006 {
1007 struct tipc_link_entry *le = &n->links[bearer_id];
1008 struct tipc_media_addr *maddr = NULL;
1009 struct tipc_link *l = le->link;
1010 int old_bearer_id = bearer_id;
1011 struct sk_buff_head xmitq;
1012
1013 if (!l)
1014 return;
1015
1016 __skb_queue_head_init(&xmitq);
1017
1018 tipc_node_write_lock(n);
1019 if (!tipc_link_is_establishing(l)) {
1020 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1021 } else {
1022 /* Defuse pending tipc_node_link_up() */
1023 tipc_link_reset(l);
1024 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1025 }
1026 if (delete) {
1027 kfree(l);
1028 le->link = NULL;
1029 n->link_cnt--;
1030 }
1031 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1032 tipc_node_write_unlock(n);
1033 if (delete)
1034 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1035 if (!skb_queue_empty(&xmitq))
1036 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1037 tipc_sk_rcv(n->net, &le->inputq);
1038 }
1039
1040 static bool node_is_up(struct tipc_node *n)
1041 {
1042 return n->active_links[0] != INVALID_BEARER_ID;
1043 }
1044
1045 bool tipc_node_is_up(struct net *net, u32 addr)
1046 {
1047 struct tipc_node *n;
1048 bool retval = false;
1049
1050 if (in_own_node(net, addr))
1051 return true;
1052
1053 n = tipc_node_find(net, addr);
1054 if (!n)
1055 return false;
1056 retval = node_is_up(n);
1057 tipc_node_put(n);
1058 return retval;
1059 }
1060
1061 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1062 {
1063 struct tipc_node *n;
1064
1065 addr ^= tipc_net(net)->random;
1066 while ((n = tipc_node_find(net, addr))) {
1067 tipc_node_put(n);
1068 addr++;
1069 }
1070 return addr;
1071 }
1072
1073 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1074 * Returns suggested address if any, otherwise 0
1075 */
1076 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1077 {
1078 struct tipc_net *tn = tipc_net(net);
1079 struct tipc_node *n;
1080 bool preliminary;
1081 u32 sugg_addr;
1082
1083 /* Suggest new address if some other peer is using this one */
1084 n = tipc_node_find(net, addr);
1085 if (n) {
1086 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1087 addr = 0;
1088 tipc_node_put(n);
1089 if (!addr)
1090 return 0;
1091 return tipc_node_suggest_addr(net, addr);
1092 }
1093
1094 /* Suggest previously used address if peer is known */
1095 n = tipc_node_find_by_id(net, id);
1096 if (n) {
1097 sugg_addr = n->addr;
1098 preliminary = n->preliminary;
1099 tipc_node_put(n);
1100 if (!preliminary)
1101 return sugg_addr;
1102 }
1103
1104 /* Even this node may be in conflict */
1105 if (tn->trial_addr == addr)
1106 return tipc_node_suggest_addr(net, addr);
1107
1108 return 0;
1109 }
1110
1111 void tipc_node_check_dest(struct net *net, u32 addr,
1112 u8 *peer_id, struct tipc_bearer *b,
1113 u16 capabilities, u32 signature, u32 hash_mixes,
1114 struct tipc_media_addr *maddr,
1115 bool *respond, bool *dupl_addr)
1116 {
1117 struct tipc_node *n;
1118 struct tipc_link *l, *snd_l;
1119 struct tipc_link_entry *le;
1120 bool addr_match = false;
1121 bool sign_match = false;
1122 bool link_up = false;
1123 bool accept_addr = false;
1124 bool reset = true;
1125 char *if_name;
1126 unsigned long intv;
1127 u16 session;
1128
1129 *dupl_addr = false;
1130 *respond = false;
1131
1132 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1133 false);
1134 if (!n)
1135 return;
1136
1137 tipc_node_write_lock(n);
1138 if (unlikely(!n->bc_entry.link)) {
1139 snd_l = tipc_bc_sndlink(net);
1140 if (!tipc_link_bc_create(net, tipc_own_addr(net),
1141 addr, U16_MAX,
1142 tipc_link_window(snd_l),
1143 n->capabilities,
1144 &n->bc_entry.inputq1,
1145 &n->bc_entry.namedq, snd_l,
1146 &n->bc_entry.link)) {
1147 pr_warn("Broadcast rcv link creation failed, no mem\n");
1148 tipc_node_write_unlock_fast(n);
1149 tipc_node_put(n);
1150 return;
1151 }
1152 }
1153
1154 le = &n->links[b->identity];
1155
1156 /* Prepare to validate requesting node's signature and media address */
1157 l = le->link;
1158 link_up = l && tipc_link_is_up(l);
1159 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1160 sign_match = (signature == n->signature);
1161
1162 /* These three flags give us eight permutations: */
1163
1164 if (sign_match && addr_match && link_up) {
1165 /* All is fine. Do nothing. */
1166 reset = false;
1167 /* Peer node is not a container/local namespace */
1168 if (!n->peer_hash_mix)
1169 n->peer_hash_mix = hash_mixes;
1170 } else if (sign_match && addr_match && !link_up) {
1171 /* Respond. The link will come up in due time */
1172 *respond = true;
1173 } else if (sign_match && !addr_match && link_up) {
1174 /* Peer has changed i/f address without rebooting.
1175 * If so, the link will reset soon, and the next
1176 * discovery will be accepted. So we can ignore it.
1177 * It may also be an cloned or malicious peer having
1178 * chosen the same node address and signature as an
1179 * existing one.
1180 * Ignore requests until the link goes down, if ever.
1181 */
1182 *dupl_addr = true;
1183 } else if (sign_match && !addr_match && !link_up) {
1184 /* Peer link has changed i/f address without rebooting.
1185 * It may also be a cloned or malicious peer; we can't
1186 * distinguish between the two.
1187 * The signature is correct, so we must accept.
1188 */
1189 accept_addr = true;
1190 *respond = true;
1191 } else if (!sign_match && addr_match && link_up) {
1192 /* Peer node rebooted. Two possibilities:
1193 * - Delayed re-discovery; this link endpoint has already
1194 * reset and re-established contact with the peer, before
1195 * receiving a discovery message from that node.
1196 * (The peer happened to receive one from this node first).
1197 * - The peer came back so fast that our side has not
1198 * discovered it yet. Probing from this side will soon
1199 * reset the link, since there can be no working link
1200 * endpoint at the peer end, and the link will re-establish.
1201 * Accept the signature, since it comes from a known peer.
1202 */
1203 n->signature = signature;
1204 } else if (!sign_match && addr_match && !link_up) {
1205 /* The peer node has rebooted.
1206 * Accept signature, since it is a known peer.
1207 */
1208 n->signature = signature;
1209 *respond = true;
1210 } else if (!sign_match && !addr_match && link_up) {
1211 /* Peer rebooted with new address, or a new/duplicate peer.
1212 * Ignore until the link goes down, if ever.
1213 */
1214 *dupl_addr = true;
1215 } else if (!sign_match && !addr_match && !link_up) {
1216 /* Peer rebooted with new address, or it is a new peer.
1217 * Accept signature and address.
1218 */
1219 n->signature = signature;
1220 accept_addr = true;
1221 *respond = true;
1222 }
1223
1224 if (!accept_addr)
1225 goto exit;
1226
1227 /* Now create new link if not already existing */
1228 if (!l) {
1229 if (n->link_cnt == 2)
1230 goto exit;
1231
1232 if_name = strchr(b->name, ':') + 1;
1233 get_random_bytes(&session, sizeof(u16));
1234 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1235 b->net_plane, b->mtu, b->priority,
1236 b->window, session,
1237 tipc_own_addr(net), addr, peer_id,
1238 n->capabilities,
1239 tipc_bc_sndlink(n->net), n->bc_entry.link,
1240 &le->inputq,
1241 &n->bc_entry.namedq, &l)) {
1242 *respond = false;
1243 goto exit;
1244 }
1245 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1246 tipc_link_reset(l);
1247 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1248 if (n->state == NODE_FAILINGOVER)
1249 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1250 le->link = l;
1251 n->link_cnt++;
1252 tipc_node_calculate_timer(n, l);
1253 if (n->link_cnt == 1) {
1254 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1255 if (!mod_timer(&n->timer, intv))
1256 tipc_node_get(n);
1257 }
1258 }
1259 memcpy(&le->maddr, maddr, sizeof(*maddr));
1260 exit:
1261 tipc_node_write_unlock(n);
1262 if (reset && l && !tipc_link_is_reset(l))
1263 tipc_node_link_down(n, b->identity, false);
1264 tipc_node_put(n);
1265 }
1266
1267 void tipc_node_delete_links(struct net *net, int bearer_id)
1268 {
1269 struct tipc_net *tn = net_generic(net, tipc_net_id);
1270 struct tipc_node *n;
1271
1272 rcu_read_lock();
1273 list_for_each_entry_rcu(n, &tn->node_list, list) {
1274 tipc_node_link_down(n, bearer_id, true);
1275 }
1276 rcu_read_unlock();
1277 }
1278
1279 static void tipc_node_reset_links(struct tipc_node *n)
1280 {
1281 int i;
1282
1283 pr_warn("Resetting all links to %x\n", n->addr);
1284
1285 trace_tipc_node_reset_links(n, true, " ");
1286 for (i = 0; i < MAX_BEARERS; i++) {
1287 tipc_node_link_down(n, i, false);
1288 }
1289 }
1290
1291 /* tipc_node_fsm_evt - node finite state machine
1292 * Determines when contact is allowed with peer node
1293 */
1294 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1295 {
1296 int state = n->state;
1297
1298 switch (state) {
1299 case SELF_DOWN_PEER_DOWN:
1300 switch (evt) {
1301 case SELF_ESTABL_CONTACT_EVT:
1302 state = SELF_UP_PEER_COMING;
1303 break;
1304 case PEER_ESTABL_CONTACT_EVT:
1305 state = SELF_COMING_PEER_UP;
1306 break;
1307 case SELF_LOST_CONTACT_EVT:
1308 case PEER_LOST_CONTACT_EVT:
1309 break;
1310 case NODE_SYNCH_END_EVT:
1311 case NODE_SYNCH_BEGIN_EVT:
1312 case NODE_FAILOVER_BEGIN_EVT:
1313 case NODE_FAILOVER_END_EVT:
1314 default:
1315 goto illegal_evt;
1316 }
1317 break;
1318 case SELF_UP_PEER_UP:
1319 switch (evt) {
1320 case SELF_LOST_CONTACT_EVT:
1321 state = SELF_DOWN_PEER_LEAVING;
1322 break;
1323 case PEER_LOST_CONTACT_EVT:
1324 state = SELF_LEAVING_PEER_DOWN;
1325 break;
1326 case NODE_SYNCH_BEGIN_EVT:
1327 state = NODE_SYNCHING;
1328 break;
1329 case NODE_FAILOVER_BEGIN_EVT:
1330 state = NODE_FAILINGOVER;
1331 break;
1332 case SELF_ESTABL_CONTACT_EVT:
1333 case PEER_ESTABL_CONTACT_EVT:
1334 case NODE_SYNCH_END_EVT:
1335 case NODE_FAILOVER_END_EVT:
1336 break;
1337 default:
1338 goto illegal_evt;
1339 }
1340 break;
1341 case SELF_DOWN_PEER_LEAVING:
1342 switch (evt) {
1343 case PEER_LOST_CONTACT_EVT:
1344 state = SELF_DOWN_PEER_DOWN;
1345 break;
1346 case SELF_ESTABL_CONTACT_EVT:
1347 case PEER_ESTABL_CONTACT_EVT:
1348 case SELF_LOST_CONTACT_EVT:
1349 break;
1350 case NODE_SYNCH_END_EVT:
1351 case NODE_SYNCH_BEGIN_EVT:
1352 case NODE_FAILOVER_BEGIN_EVT:
1353 case NODE_FAILOVER_END_EVT:
1354 default:
1355 goto illegal_evt;
1356 }
1357 break;
1358 case SELF_UP_PEER_COMING:
1359 switch (evt) {
1360 case PEER_ESTABL_CONTACT_EVT:
1361 state = SELF_UP_PEER_UP;
1362 break;
1363 case SELF_LOST_CONTACT_EVT:
1364 state = SELF_DOWN_PEER_DOWN;
1365 break;
1366 case SELF_ESTABL_CONTACT_EVT:
1367 case PEER_LOST_CONTACT_EVT:
1368 case NODE_SYNCH_END_EVT:
1369 case NODE_FAILOVER_BEGIN_EVT:
1370 break;
1371 case NODE_SYNCH_BEGIN_EVT:
1372 case NODE_FAILOVER_END_EVT:
1373 default:
1374 goto illegal_evt;
1375 }
1376 break;
1377 case SELF_COMING_PEER_UP:
1378 switch (evt) {
1379 case SELF_ESTABL_CONTACT_EVT:
1380 state = SELF_UP_PEER_UP;
1381 break;
1382 case PEER_LOST_CONTACT_EVT:
1383 state = SELF_DOWN_PEER_DOWN;
1384 break;
1385 case SELF_LOST_CONTACT_EVT:
1386 case PEER_ESTABL_CONTACT_EVT:
1387 break;
1388 case NODE_SYNCH_END_EVT:
1389 case NODE_SYNCH_BEGIN_EVT:
1390 case NODE_FAILOVER_BEGIN_EVT:
1391 case NODE_FAILOVER_END_EVT:
1392 default:
1393 goto illegal_evt;
1394 }
1395 break;
1396 case SELF_LEAVING_PEER_DOWN:
1397 switch (evt) {
1398 case SELF_LOST_CONTACT_EVT:
1399 state = SELF_DOWN_PEER_DOWN;
1400 break;
1401 case SELF_ESTABL_CONTACT_EVT:
1402 case PEER_ESTABL_CONTACT_EVT:
1403 case PEER_LOST_CONTACT_EVT:
1404 break;
1405 case NODE_SYNCH_END_EVT:
1406 case NODE_SYNCH_BEGIN_EVT:
1407 case NODE_FAILOVER_BEGIN_EVT:
1408 case NODE_FAILOVER_END_EVT:
1409 default:
1410 goto illegal_evt;
1411 }
1412 break;
1413 case NODE_FAILINGOVER:
1414 switch (evt) {
1415 case SELF_LOST_CONTACT_EVT:
1416 state = SELF_DOWN_PEER_LEAVING;
1417 break;
1418 case PEER_LOST_CONTACT_EVT:
1419 state = SELF_LEAVING_PEER_DOWN;
1420 break;
1421 case NODE_FAILOVER_END_EVT:
1422 state = SELF_UP_PEER_UP;
1423 break;
1424 case NODE_FAILOVER_BEGIN_EVT:
1425 case SELF_ESTABL_CONTACT_EVT:
1426 case PEER_ESTABL_CONTACT_EVT:
1427 break;
1428 case NODE_SYNCH_BEGIN_EVT:
1429 case NODE_SYNCH_END_EVT:
1430 default:
1431 goto illegal_evt;
1432 }
1433 break;
1434 case NODE_SYNCHING:
1435 switch (evt) {
1436 case SELF_LOST_CONTACT_EVT:
1437 state = SELF_DOWN_PEER_LEAVING;
1438 break;
1439 case PEER_LOST_CONTACT_EVT:
1440 state = SELF_LEAVING_PEER_DOWN;
1441 break;
1442 case NODE_SYNCH_END_EVT:
1443 state = SELF_UP_PEER_UP;
1444 break;
1445 case NODE_FAILOVER_BEGIN_EVT:
1446 state = NODE_FAILINGOVER;
1447 break;
1448 case NODE_SYNCH_BEGIN_EVT:
1449 case SELF_ESTABL_CONTACT_EVT:
1450 case PEER_ESTABL_CONTACT_EVT:
1451 break;
1452 case NODE_FAILOVER_END_EVT:
1453 default:
1454 goto illegal_evt;
1455 }
1456 break;
1457 default:
1458 pr_err("Unknown node fsm state %x\n", state);
1459 break;
1460 }
1461 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1462 n->state = state;
1463 return;
1464
1465 illegal_evt:
1466 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1467 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1468 }
1469
1470 static void node_lost_contact(struct tipc_node *n,
1471 struct sk_buff_head *inputq)
1472 {
1473 struct tipc_sock_conn *conn, *safe;
1474 struct tipc_link *l;
1475 struct list_head *conns = &n->conn_sks;
1476 struct sk_buff *skb;
1477 uint i;
1478
1479 pr_debug("Lost contact with %x\n", n->addr);
1480 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1481 trace_tipc_node_lost_contact(n, true, " ");
1482
1483 /* Clean up broadcast state */
1484 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1485
1486 /* Abort any ongoing link failover */
1487 for (i = 0; i < MAX_BEARERS; i++) {
1488 l = n->links[i].link;
1489 if (l)
1490 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1491 }
1492
1493 /* Notify publications from this node */
1494 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1495 n->peer_net = NULL;
1496 n->peer_hash_mix = 0;
1497 /* Notify sockets connected to node */
1498 list_for_each_entry_safe(conn, safe, conns, list) {
1499 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1500 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1501 conn->peer_node, conn->port,
1502 conn->peer_port, TIPC_ERR_NO_NODE);
1503 if (likely(skb))
1504 skb_queue_tail(inputq, skb);
1505 list_del(&conn->list);
1506 kfree(conn);
1507 }
1508 }
1509
1510 /**
1511 * tipc_node_get_linkname - get the name of a link
1512 *
1513 * @bearer_id: id of the bearer
1514 * @node: peer node address
1515 * @linkname: link name output buffer
1516 *
1517 * Returns 0 on success
1518 */
1519 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1520 char *linkname, size_t len)
1521 {
1522 struct tipc_link *link;
1523 int err = -EINVAL;
1524 struct tipc_node *node = tipc_node_find(net, addr);
1525
1526 if (!node)
1527 return err;
1528
1529 if (bearer_id >= MAX_BEARERS)
1530 goto exit;
1531
1532 tipc_node_read_lock(node);
1533 link = node->links[bearer_id].link;
1534 if (link) {
1535 strncpy(linkname, tipc_link_name(link), len);
1536 err = 0;
1537 }
1538 tipc_node_read_unlock(node);
1539 exit:
1540 tipc_node_put(node);
1541 return err;
1542 }
1543
1544 /* Caller should hold node lock for the passed node */
1545 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1546 {
1547 void *hdr;
1548 struct nlattr *attrs;
1549
1550 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1551 NLM_F_MULTI, TIPC_NL_NODE_GET);
1552 if (!hdr)
1553 return -EMSGSIZE;
1554
1555 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1556 if (!attrs)
1557 goto msg_full;
1558
1559 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1560 goto attr_msg_full;
1561 if (node_is_up(node))
1562 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1563 goto attr_msg_full;
1564
1565 nla_nest_end(msg->skb, attrs);
1566 genlmsg_end(msg->skb, hdr);
1567
1568 return 0;
1569
1570 attr_msg_full:
1571 nla_nest_cancel(msg->skb, attrs);
1572 msg_full:
1573 genlmsg_cancel(msg->skb, hdr);
1574
1575 return -EMSGSIZE;
1576 }
1577
1578 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1579 {
1580 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1581 struct sk_buff_head inputq;
1582
1583 switch (msg_user(hdr)) {
1584 case TIPC_LOW_IMPORTANCE:
1585 case TIPC_MEDIUM_IMPORTANCE:
1586 case TIPC_HIGH_IMPORTANCE:
1587 case TIPC_CRITICAL_IMPORTANCE:
1588 if (msg_connected(hdr) || msg_named(hdr)) {
1589 tipc_loopback_trace(peer_net, list);
1590 spin_lock_init(&list->lock);
1591 tipc_sk_rcv(peer_net, list);
1592 return;
1593 }
1594 if (msg_mcast(hdr)) {
1595 tipc_loopback_trace(peer_net, list);
1596 skb_queue_head_init(&inputq);
1597 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1598 __skb_queue_purge(list);
1599 skb_queue_purge(&inputq);
1600 return;
1601 }
1602 return;
1603 case MSG_FRAGMENTER:
1604 if (tipc_msg_assemble(list)) {
1605 tipc_loopback_trace(peer_net, list);
1606 skb_queue_head_init(&inputq);
1607 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1608 __skb_queue_purge(list);
1609 skb_queue_purge(&inputq);
1610 }
1611 return;
1612 case GROUP_PROTOCOL:
1613 case CONN_MANAGER:
1614 tipc_loopback_trace(peer_net, list);
1615 spin_lock_init(&list->lock);
1616 tipc_sk_rcv(peer_net, list);
1617 return;
1618 case LINK_PROTOCOL:
1619 case NAME_DISTRIBUTOR:
1620 case TUNNEL_PROTOCOL:
1621 case BCAST_PROTOCOL:
1622 return;
1623 default:
1624 return;
1625 };
1626 }
1627
1628 /**
1629 * tipc_node_xmit() is the general link level function for message sending
1630 * @net: the applicable net namespace
1631 * @list: chain of buffers containing message
1632 * @dnode: address of destination node
1633 * @selector: a number used for deterministic link selection
1634 * Consumes the buffer chain.
1635 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1636 */
1637 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1638 u32 dnode, int selector)
1639 {
1640 struct tipc_link_entry *le = NULL;
1641 struct tipc_node *n;
1642 struct sk_buff_head xmitq;
1643 bool node_up = false;
1644 int bearer_id;
1645 int rc;
1646
1647 if (in_own_node(net, dnode)) {
1648 tipc_loopback_trace(net, list);
1649 spin_lock_init(&list->lock);
1650 tipc_sk_rcv(net, list);
1651 return 0;
1652 }
1653
1654 n = tipc_node_find(net, dnode);
1655 if (unlikely(!n)) {
1656 __skb_queue_purge(list);
1657 return -EHOSTUNREACH;
1658 }
1659
1660 tipc_node_read_lock(n);
1661 node_up = node_is_up(n);
1662 if (node_up && n->peer_net && check_net(n->peer_net)) {
1663 /* xmit inner linux container */
1664 tipc_lxc_xmit(n->peer_net, list);
1665 if (likely(skb_queue_empty(list))) {
1666 tipc_node_read_unlock(n);
1667 tipc_node_put(n);
1668 return 0;
1669 }
1670 }
1671
1672 bearer_id = n->active_links[selector & 1];
1673 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1674 tipc_node_read_unlock(n);
1675 tipc_node_put(n);
1676 __skb_queue_purge(list);
1677 return -EHOSTUNREACH;
1678 }
1679
1680 __skb_queue_head_init(&xmitq);
1681 le = &n->links[bearer_id];
1682 spin_lock_bh(&le->lock);
1683 rc = tipc_link_xmit(le->link, list, &xmitq);
1684 spin_unlock_bh(&le->lock);
1685 tipc_node_read_unlock(n);
1686
1687 if (unlikely(rc == -ENOBUFS))
1688 tipc_node_link_down(n, bearer_id, false);
1689 else
1690 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1691
1692 tipc_node_put(n);
1693
1694 return rc;
1695 }
1696
1697 /* tipc_node_xmit_skb(): send single buffer to destination
1698 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1699 * messages, which will not be rejected
1700 * The only exception is datagram messages rerouted after secondary
1701 * lookup, which are rare and safe to dispose of anyway.
1702 */
1703 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1704 u32 selector)
1705 {
1706 struct sk_buff_head head;
1707
1708 __skb_queue_head_init(&head);
1709 __skb_queue_tail(&head, skb);
1710 tipc_node_xmit(net, &head, dnode, selector);
1711 return 0;
1712 }
1713
1714 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1715 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1716 */
1717 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1718 {
1719 struct sk_buff *skb;
1720 u32 selector, dnode;
1721
1722 while ((skb = __skb_dequeue(xmitq))) {
1723 selector = msg_origport(buf_msg(skb));
1724 dnode = msg_destnode(buf_msg(skb));
1725 tipc_node_xmit_skb(net, skb, dnode, selector);
1726 }
1727 return 0;
1728 }
1729
1730 void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1731 {
1732 struct sk_buff *txskb;
1733 struct tipc_node *n;
1734 u32 dst;
1735
1736 rcu_read_lock();
1737 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1738 dst = n->addr;
1739 if (in_own_node(net, dst))
1740 continue;
1741 if (!node_is_up(n))
1742 continue;
1743 txskb = pskb_copy(skb, GFP_ATOMIC);
1744 if (!txskb)
1745 break;
1746 msg_set_destnode(buf_msg(txskb), dst);
1747 tipc_node_xmit_skb(net, txskb, dst, 0);
1748 }
1749 rcu_read_unlock();
1750
1751 kfree_skb(skb);
1752 }
1753
1754 static void tipc_node_mcast_rcv(struct tipc_node *n)
1755 {
1756 struct tipc_bclink_entry *be = &n->bc_entry;
1757
1758 /* 'arrvq' is under inputq2's lock protection */
1759 spin_lock_bh(&be->inputq2.lock);
1760 spin_lock_bh(&be->inputq1.lock);
1761 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1762 spin_unlock_bh(&be->inputq1.lock);
1763 spin_unlock_bh(&be->inputq2.lock);
1764 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1765 }
1766
1767 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1768 int bearer_id, struct sk_buff_head *xmitq)
1769 {
1770 struct tipc_link *ucl;
1771 int rc;
1772
1773 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
1774
1775 if (rc & TIPC_LINK_DOWN_EVT) {
1776 tipc_node_reset_links(n);
1777 return;
1778 }
1779
1780 if (!(rc & TIPC_LINK_SND_STATE))
1781 return;
1782
1783 /* If probe message, a STATE response will be sent anyway */
1784 if (msg_probe(hdr))
1785 return;
1786
1787 /* Produce a STATE message carrying broadcast NACK */
1788 tipc_node_read_lock(n);
1789 ucl = n->links[bearer_id].link;
1790 if (ucl)
1791 tipc_link_build_state_msg(ucl, xmitq);
1792 tipc_node_read_unlock(n);
1793 }
1794
1795 /**
1796 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1797 * @net: the applicable net namespace
1798 * @skb: TIPC packet
1799 * @bearer_id: id of bearer message arrived on
1800 *
1801 * Invoked with no locks held.
1802 */
1803 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1804 {
1805 int rc;
1806 struct sk_buff_head xmitq;
1807 struct tipc_bclink_entry *be;
1808 struct tipc_link_entry *le;
1809 struct tipc_msg *hdr = buf_msg(skb);
1810 int usr = msg_user(hdr);
1811 u32 dnode = msg_destnode(hdr);
1812 struct tipc_node *n;
1813
1814 __skb_queue_head_init(&xmitq);
1815
1816 /* If NACK for other node, let rcv link for that node peek into it */
1817 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1818 n = tipc_node_find(net, dnode);
1819 else
1820 n = tipc_node_find(net, msg_prevnode(hdr));
1821 if (!n) {
1822 kfree_skb(skb);
1823 return;
1824 }
1825 be = &n->bc_entry;
1826 le = &n->links[bearer_id];
1827
1828 rc = tipc_bcast_rcv(net, be->link, skb);
1829
1830 /* Broadcast ACKs are sent on a unicast link */
1831 if (rc & TIPC_LINK_SND_STATE) {
1832 tipc_node_read_lock(n);
1833 tipc_link_build_state_msg(le->link, &xmitq);
1834 tipc_node_read_unlock(n);
1835 }
1836
1837 if (!skb_queue_empty(&xmitq))
1838 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1839
1840 if (!skb_queue_empty(&be->inputq1))
1841 tipc_node_mcast_rcv(n);
1842
1843 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1844 if (!skb_queue_empty(&n->bc_entry.namedq))
1845 tipc_named_rcv(net, &n->bc_entry.namedq);
1846
1847 /* If reassembly or retransmission failure => reset all links to peer */
1848 if (rc & TIPC_LINK_DOWN_EVT)
1849 tipc_node_reset_links(n);
1850
1851 tipc_node_put(n);
1852 }
1853
1854 /**
1855 * tipc_node_check_state - check and if necessary update node state
1856 * @skb: TIPC packet
1857 * @bearer_id: identity of bearer delivering the packet
1858 * Returns true if state and msg are ok, otherwise false
1859 */
1860 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1861 int bearer_id, struct sk_buff_head *xmitq)
1862 {
1863 struct tipc_msg *hdr = buf_msg(skb);
1864 int usr = msg_user(hdr);
1865 int mtyp = msg_type(hdr);
1866 u16 oseqno = msg_seqno(hdr);
1867 u16 exp_pkts = msg_msgcnt(hdr);
1868 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1869 int state = n->state;
1870 struct tipc_link *l, *tnl, *pl = NULL;
1871 struct tipc_media_addr *maddr;
1872 int pb_id;
1873
1874 if (trace_tipc_node_check_state_enabled()) {
1875 trace_tipc_skb_dump(skb, false, "skb for node state check");
1876 trace_tipc_node_check_state(n, true, " ");
1877 }
1878 l = n->links[bearer_id].link;
1879 if (!l)
1880 return false;
1881 rcv_nxt = tipc_link_rcv_nxt(l);
1882
1883
1884 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1885 return true;
1886
1887 /* Find parallel link, if any */
1888 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1889 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1890 pl = n->links[pb_id].link;
1891 break;
1892 }
1893 }
1894
1895 if (!tipc_link_validate_msg(l, hdr)) {
1896 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1897 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1898 return false;
1899 }
1900
1901 /* Check and update node accesibility if applicable */
1902 if (state == SELF_UP_PEER_COMING) {
1903 if (!tipc_link_is_up(l))
1904 return true;
1905 if (!msg_peer_link_is_up(hdr))
1906 return true;
1907 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1908 }
1909
1910 if (state == SELF_DOWN_PEER_LEAVING) {
1911 if (msg_peer_node_is_up(hdr))
1912 return false;
1913 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1914 return true;
1915 }
1916
1917 if (state == SELF_LEAVING_PEER_DOWN)
1918 return false;
1919
1920 /* Ignore duplicate packets */
1921 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1922 return true;
1923
1924 /* Initiate or update failover mode if applicable */
1925 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1926 syncpt = oseqno + exp_pkts - 1;
1927 if (pl && !tipc_link_is_reset(pl)) {
1928 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1929 trace_tipc_node_link_down(n, true,
1930 "node link down <- failover!");
1931 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1932 tipc_link_inputq(l));
1933 }
1934
1935 /* If parallel link was already down, and this happened before
1936 * the tunnel link came up, node failover was never started.
1937 * Ensure that a FAILOVER_MSG is sent to get peer out of
1938 * NODE_FAILINGOVER state, also this node must accept
1939 * TUNNEL_MSGs from peer.
1940 */
1941 if (n->state != NODE_FAILINGOVER)
1942 tipc_node_link_failover(n, pl, l, xmitq);
1943
1944 /* If pkts arrive out of order, use lowest calculated syncpt */
1945 if (less(syncpt, n->sync_point))
1946 n->sync_point = syncpt;
1947 }
1948
1949 /* Open parallel link when tunnel link reaches synch point */
1950 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
1951 if (!more(rcv_nxt, n->sync_point))
1952 return true;
1953 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1954 if (pl)
1955 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
1956 return true;
1957 }
1958
1959 /* No synching needed if only one link */
1960 if (!pl || !tipc_link_is_up(pl))
1961 return true;
1962
1963 /* Initiate synch mode if applicable */
1964 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1965 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
1966 syncpt = msg_syncpt(hdr);
1967 else
1968 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
1969 if (!tipc_link_is_up(l))
1970 __tipc_node_link_up(n, bearer_id, xmitq);
1971 if (n->state == SELF_UP_PEER_UP) {
1972 n->sync_point = syncpt;
1973 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1974 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1975 }
1976 }
1977
1978 /* Open tunnel link when parallel link reaches synch point */
1979 if (n->state == NODE_SYNCHING) {
1980 if (tipc_link_is_synching(l)) {
1981 tnl = l;
1982 } else {
1983 tnl = pl;
1984 pl = l;
1985 }
1986 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1987 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
1988 if (more(dlv_nxt, n->sync_point)) {
1989 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1990 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1991 return true;
1992 }
1993 if (l == pl)
1994 return true;
1995 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
1996 return true;
1997 if (usr == LINK_PROTOCOL)
1998 return true;
1999 return false;
2000 }
2001 return true;
2002 }
2003
2004 /**
2005 * tipc_rcv - process TIPC packets/messages arriving from off-node
2006 * @net: the applicable net namespace
2007 * @skb: TIPC packet
2008 * @bearer: pointer to bearer message arrived on
2009 *
2010 * Invoked with no locks held. Bearer pointer must point to a valid bearer
2011 * structure (i.e. cannot be NULL), but bearer can be inactive.
2012 */
2013 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2014 {
2015 struct sk_buff_head xmitq;
2016 struct tipc_link_entry *le;
2017 struct tipc_msg *hdr;
2018 struct tipc_node *n;
2019 int bearer_id = b->identity;
2020 u32 self = tipc_own_addr(net);
2021 int usr, rc = 0;
2022 u16 bc_ack;
2023 #ifdef CONFIG_TIPC_CRYPTO
2024 struct tipc_ehdr *ehdr;
2025
2026 /* Check if message must be decrypted first */
2027 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2028 goto rcv;
2029
2030 ehdr = (struct tipc_ehdr *)skb->data;
2031 if (likely(ehdr->user != LINK_CONFIG)) {
2032 n = tipc_node_find(net, ntohl(ehdr->addr));
2033 if (unlikely(!n))
2034 goto discard;
2035 } else {
2036 n = tipc_node_find_by_id(net, ehdr->id);
2037 }
2038 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2039 if (!skb)
2040 return;
2041
2042 rcv:
2043 #endif
2044 /* Ensure message is well-formed before touching the header */
2045 if (unlikely(!tipc_msg_validate(&skb)))
2046 goto discard;
2047 __skb_queue_head_init(&xmitq);
2048 hdr = buf_msg(skb);
2049 usr = msg_user(hdr);
2050 bc_ack = msg_bcast_ack(hdr);
2051
2052 /* Handle arrival of discovery or broadcast packet */
2053 if (unlikely(msg_non_seq(hdr))) {
2054 if (unlikely(usr == LINK_CONFIG))
2055 return tipc_disc_rcv(net, skb, b);
2056 else
2057 return tipc_node_bc_rcv(net, skb, bearer_id);
2058 }
2059
2060 /* Discard unicast link messages destined for another node */
2061 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2062 goto discard;
2063
2064 /* Locate neighboring node that sent packet */
2065 n = tipc_node_find(net, msg_prevnode(hdr));
2066 if (unlikely(!n))
2067 goto discard;
2068 le = &n->links[bearer_id];
2069
2070 /* Ensure broadcast reception is in synch with peer's send state */
2071 if (unlikely(usr == LINK_PROTOCOL))
2072 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2073 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
2074 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2075
2076 /* Receive packet directly if conditions permit */
2077 tipc_node_read_lock(n);
2078 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2079 spin_lock_bh(&le->lock);
2080 if (le->link) {
2081 rc = tipc_link_rcv(le->link, skb, &xmitq);
2082 skb = NULL;
2083 }
2084 spin_unlock_bh(&le->lock);
2085 }
2086 tipc_node_read_unlock(n);
2087
2088 /* Check/update node state before receiving */
2089 if (unlikely(skb)) {
2090 if (unlikely(skb_linearize(skb)))
2091 goto discard;
2092 tipc_node_write_lock(n);
2093 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2094 if (le->link) {
2095 rc = tipc_link_rcv(le->link, skb, &xmitq);
2096 skb = NULL;
2097 }
2098 }
2099 tipc_node_write_unlock(n);
2100 }
2101
2102 if (unlikely(rc & TIPC_LINK_UP_EVT))
2103 tipc_node_link_up(n, bearer_id, &xmitq);
2104
2105 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2106 tipc_node_link_down(n, bearer_id, false);
2107
2108 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2109 tipc_named_rcv(net, &n->bc_entry.namedq);
2110
2111 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2112 tipc_node_mcast_rcv(n);
2113
2114 if (!skb_queue_empty(&le->inputq))
2115 tipc_sk_rcv(net, &le->inputq);
2116
2117 if (!skb_queue_empty(&xmitq))
2118 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2119
2120 tipc_node_put(n);
2121 discard:
2122 kfree_skb(skb);
2123 }
2124
2125 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2126 int prop)
2127 {
2128 struct tipc_net *tn = tipc_net(net);
2129 int bearer_id = b->identity;
2130 struct sk_buff_head xmitq;
2131 struct tipc_link_entry *e;
2132 struct tipc_node *n;
2133
2134 __skb_queue_head_init(&xmitq);
2135
2136 rcu_read_lock();
2137
2138 list_for_each_entry_rcu(n, &tn->node_list, list) {
2139 tipc_node_write_lock(n);
2140 e = &n->links[bearer_id];
2141 if (e->link) {
2142 if (prop == TIPC_NLA_PROP_TOL)
2143 tipc_link_set_tolerance(e->link, b->tolerance,
2144 &xmitq);
2145 else if (prop == TIPC_NLA_PROP_MTU)
2146 tipc_link_set_mtu(e->link, b->mtu);
2147 }
2148 tipc_node_write_unlock(n);
2149 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2150 }
2151
2152 rcu_read_unlock();
2153 }
2154
2155 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2156 {
2157 struct net *net = sock_net(skb->sk);
2158 struct tipc_net *tn = net_generic(net, tipc_net_id);
2159 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2160 struct tipc_node *peer, *temp_node;
2161 u32 addr;
2162 int err;
2163
2164 /* We identify the peer by its net */
2165 if (!info->attrs[TIPC_NLA_NET])
2166 return -EINVAL;
2167
2168 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2169 info->attrs[TIPC_NLA_NET],
2170 tipc_nl_net_policy, info->extack);
2171 if (err)
2172 return err;
2173
2174 if (!attrs[TIPC_NLA_NET_ADDR])
2175 return -EINVAL;
2176
2177 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2178
2179 if (in_own_node(net, addr))
2180 return -ENOTSUPP;
2181
2182 spin_lock_bh(&tn->node_list_lock);
2183 peer = tipc_node_find(net, addr);
2184 if (!peer) {
2185 spin_unlock_bh(&tn->node_list_lock);
2186 return -ENXIO;
2187 }
2188
2189 tipc_node_write_lock(peer);
2190 if (peer->state != SELF_DOWN_PEER_DOWN &&
2191 peer->state != SELF_DOWN_PEER_LEAVING) {
2192 tipc_node_write_unlock(peer);
2193 err = -EBUSY;
2194 goto err_out;
2195 }
2196
2197 tipc_node_clear_links(peer);
2198 tipc_node_write_unlock(peer);
2199 tipc_node_delete(peer);
2200
2201 /* Calculate cluster capabilities */
2202 tn->capabilities = TIPC_NODE_CAPABILITIES;
2203 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2204 tn->capabilities &= temp_node->capabilities;
2205 }
2206 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2207 err = 0;
2208 err_out:
2209 tipc_node_put(peer);
2210 spin_unlock_bh(&tn->node_list_lock);
2211
2212 return err;
2213 }
2214
2215 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2216 {
2217 int err;
2218 struct net *net = sock_net(skb->sk);
2219 struct tipc_net *tn = net_generic(net, tipc_net_id);
2220 int done = cb->args[0];
2221 int last_addr = cb->args[1];
2222 struct tipc_node *node;
2223 struct tipc_nl_msg msg;
2224
2225 if (done)
2226 return 0;
2227
2228 msg.skb = skb;
2229 msg.portid = NETLINK_CB(cb->skb).portid;
2230 msg.seq = cb->nlh->nlmsg_seq;
2231
2232 rcu_read_lock();
2233 if (last_addr) {
2234 node = tipc_node_find(net, last_addr);
2235 if (!node) {
2236 rcu_read_unlock();
2237 /* We never set seq or call nl_dump_check_consistent()
2238 * this means that setting prev_seq here will cause the
2239 * consistence check to fail in the netlink callback
2240 * handler. Resulting in the NLMSG_DONE message having
2241 * the NLM_F_DUMP_INTR flag set if the node state
2242 * changed while we released the lock.
2243 */
2244 cb->prev_seq = 1;
2245 return -EPIPE;
2246 }
2247 tipc_node_put(node);
2248 }
2249
2250 list_for_each_entry_rcu(node, &tn->node_list, list) {
2251 if (node->preliminary)
2252 continue;
2253 if (last_addr) {
2254 if (node->addr == last_addr)
2255 last_addr = 0;
2256 else
2257 continue;
2258 }
2259
2260 tipc_node_read_lock(node);
2261 err = __tipc_nl_add_node(&msg, node);
2262 if (err) {
2263 last_addr = node->addr;
2264 tipc_node_read_unlock(node);
2265 goto out;
2266 }
2267
2268 tipc_node_read_unlock(node);
2269 }
2270 done = 1;
2271 out:
2272 cb->args[0] = done;
2273 cb->args[1] = last_addr;
2274 rcu_read_unlock();
2275
2276 return skb->len;
2277 }
2278
2279 /* tipc_node_find_by_name - locate owner node of link by link's name
2280 * @net: the applicable net namespace
2281 * @name: pointer to link name string
2282 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2283 *
2284 * Returns pointer to node owning the link, or 0 if no matching link is found.
2285 */
2286 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2287 const char *link_name,
2288 unsigned int *bearer_id)
2289 {
2290 struct tipc_net *tn = net_generic(net, tipc_net_id);
2291 struct tipc_link *l;
2292 struct tipc_node *n;
2293 struct tipc_node *found_node = NULL;
2294 int i;
2295
2296 *bearer_id = 0;
2297 rcu_read_lock();
2298 list_for_each_entry_rcu(n, &tn->node_list, list) {
2299 tipc_node_read_lock(n);
2300 for (i = 0; i < MAX_BEARERS; i++) {
2301 l = n->links[i].link;
2302 if (l && !strcmp(tipc_link_name(l), link_name)) {
2303 *bearer_id = i;
2304 found_node = n;
2305 break;
2306 }
2307 }
2308 tipc_node_read_unlock(n);
2309 if (found_node)
2310 break;
2311 }
2312 rcu_read_unlock();
2313
2314 return found_node;
2315 }
2316
2317 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2318 {
2319 int err;
2320 int res = 0;
2321 int bearer_id;
2322 char *name;
2323 struct tipc_link *link;
2324 struct tipc_node *node;
2325 struct sk_buff_head xmitq;
2326 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2327 struct net *net = sock_net(skb->sk);
2328
2329 __skb_queue_head_init(&xmitq);
2330
2331 if (!info->attrs[TIPC_NLA_LINK])
2332 return -EINVAL;
2333
2334 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2335 info->attrs[TIPC_NLA_LINK],
2336 tipc_nl_link_policy, info->extack);
2337 if (err)
2338 return err;
2339
2340 if (!attrs[TIPC_NLA_LINK_NAME])
2341 return -EINVAL;
2342
2343 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2344
2345 if (strcmp(name, tipc_bclink_name) == 0)
2346 return tipc_nl_bc_link_set(net, attrs);
2347
2348 node = tipc_node_find_by_name(net, name, &bearer_id);
2349 if (!node)
2350 return -EINVAL;
2351
2352 tipc_node_read_lock(node);
2353
2354 link = node->links[bearer_id].link;
2355 if (!link) {
2356 res = -EINVAL;
2357 goto out;
2358 }
2359
2360 if (attrs[TIPC_NLA_LINK_PROP]) {
2361 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2362
2363 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2364 props);
2365 if (err) {
2366 res = err;
2367 goto out;
2368 }
2369
2370 if (props[TIPC_NLA_PROP_TOL]) {
2371 u32 tol;
2372
2373 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2374 tipc_link_set_tolerance(link, tol, &xmitq);
2375 }
2376 if (props[TIPC_NLA_PROP_PRIO]) {
2377 u32 prio;
2378
2379 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2380 tipc_link_set_prio(link, prio, &xmitq);
2381 }
2382 if (props[TIPC_NLA_PROP_WIN]) {
2383 u32 win;
2384
2385 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2386 tipc_link_set_queue_limits(link, win);
2387 }
2388 }
2389
2390 out:
2391 tipc_node_read_unlock(node);
2392 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2393 NULL);
2394 return res;
2395 }
2396
2397 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2398 {
2399 struct net *net = genl_info_net(info);
2400 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2401 struct tipc_nl_msg msg;
2402 char *name;
2403 int err;
2404
2405 msg.portid = info->snd_portid;
2406 msg.seq = info->snd_seq;
2407
2408 if (!info->attrs[TIPC_NLA_LINK])
2409 return -EINVAL;
2410
2411 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2412 info->attrs[TIPC_NLA_LINK],
2413 tipc_nl_link_policy, info->extack);
2414 if (err)
2415 return err;
2416
2417 if (!attrs[TIPC_NLA_LINK_NAME])
2418 return -EINVAL;
2419
2420 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2421
2422 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2423 if (!msg.skb)
2424 return -ENOMEM;
2425
2426 if (strcmp(name, tipc_bclink_name) == 0) {
2427 err = tipc_nl_add_bc_link(net, &msg);
2428 if (err)
2429 goto err_free;
2430 } else {
2431 int bearer_id;
2432 struct tipc_node *node;
2433 struct tipc_link *link;
2434
2435 node = tipc_node_find_by_name(net, name, &bearer_id);
2436 if (!node) {
2437 err = -EINVAL;
2438 goto err_free;
2439 }
2440
2441 tipc_node_read_lock(node);
2442 link = node->links[bearer_id].link;
2443 if (!link) {
2444 tipc_node_read_unlock(node);
2445 err = -EINVAL;
2446 goto err_free;
2447 }
2448
2449 err = __tipc_nl_add_link(net, &msg, link, 0);
2450 tipc_node_read_unlock(node);
2451 if (err)
2452 goto err_free;
2453 }
2454
2455 return genlmsg_reply(msg.skb, info);
2456
2457 err_free:
2458 nlmsg_free(msg.skb);
2459 return err;
2460 }
2461
2462 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2463 {
2464 int err;
2465 char *link_name;
2466 unsigned int bearer_id;
2467 struct tipc_link *link;
2468 struct tipc_node *node;
2469 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2470 struct net *net = sock_net(skb->sk);
2471 struct tipc_link_entry *le;
2472
2473 if (!info->attrs[TIPC_NLA_LINK])
2474 return -EINVAL;
2475
2476 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2477 info->attrs[TIPC_NLA_LINK],
2478 tipc_nl_link_policy, info->extack);
2479 if (err)
2480 return err;
2481
2482 if (!attrs[TIPC_NLA_LINK_NAME])
2483 return -EINVAL;
2484
2485 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2486
2487 if (strcmp(link_name, tipc_bclink_name) == 0) {
2488 err = tipc_bclink_reset_stats(net);
2489 if (err)
2490 return err;
2491 return 0;
2492 }
2493
2494 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2495 if (!node)
2496 return -EINVAL;
2497
2498 le = &node->links[bearer_id];
2499 tipc_node_read_lock(node);
2500 spin_lock_bh(&le->lock);
2501 link = node->links[bearer_id].link;
2502 if (!link) {
2503 spin_unlock_bh(&le->lock);
2504 tipc_node_read_unlock(node);
2505 return -EINVAL;
2506 }
2507 tipc_link_reset_stats(link);
2508 spin_unlock_bh(&le->lock);
2509 tipc_node_read_unlock(node);
2510 return 0;
2511 }
2512
2513 /* Caller should hold node lock */
2514 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2515 struct tipc_node *node, u32 *prev_link)
2516 {
2517 u32 i;
2518 int err;
2519
2520 for (i = *prev_link; i < MAX_BEARERS; i++) {
2521 *prev_link = i;
2522
2523 if (!node->links[i].link)
2524 continue;
2525
2526 err = __tipc_nl_add_link(net, msg,
2527 node->links[i].link, NLM_F_MULTI);
2528 if (err)
2529 return err;
2530 }
2531 *prev_link = 0;
2532
2533 return 0;
2534 }
2535
2536 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2537 {
2538 struct net *net = sock_net(skb->sk);
2539 struct tipc_net *tn = net_generic(net, tipc_net_id);
2540 struct tipc_node *node;
2541 struct tipc_nl_msg msg;
2542 u32 prev_node = cb->args[0];
2543 u32 prev_link = cb->args[1];
2544 int done = cb->args[2];
2545 int err;
2546
2547 if (done)
2548 return 0;
2549
2550 msg.skb = skb;
2551 msg.portid = NETLINK_CB(cb->skb).portid;
2552 msg.seq = cb->nlh->nlmsg_seq;
2553
2554 rcu_read_lock();
2555 if (prev_node) {
2556 node = tipc_node_find(net, prev_node);
2557 if (!node) {
2558 /* We never set seq or call nl_dump_check_consistent()
2559 * this means that setting prev_seq here will cause the
2560 * consistence check to fail in the netlink callback
2561 * handler. Resulting in the last NLMSG_DONE message
2562 * having the NLM_F_DUMP_INTR flag set.
2563 */
2564 cb->prev_seq = 1;
2565 goto out;
2566 }
2567 tipc_node_put(node);
2568
2569 list_for_each_entry_continue_rcu(node, &tn->node_list,
2570 list) {
2571 tipc_node_read_lock(node);
2572 err = __tipc_nl_add_node_links(net, &msg, node,
2573 &prev_link);
2574 tipc_node_read_unlock(node);
2575 if (err)
2576 goto out;
2577
2578 prev_node = node->addr;
2579 }
2580 } else {
2581 err = tipc_nl_add_bc_link(net, &msg);
2582 if (err)
2583 goto out;
2584
2585 list_for_each_entry_rcu(node, &tn->node_list, list) {
2586 tipc_node_read_lock(node);
2587 err = __tipc_nl_add_node_links(net, &msg, node,
2588 &prev_link);
2589 tipc_node_read_unlock(node);
2590 if (err)
2591 goto out;
2592
2593 prev_node = node->addr;
2594 }
2595 }
2596 done = 1;
2597 out:
2598 rcu_read_unlock();
2599
2600 cb->args[0] = prev_node;
2601 cb->args[1] = prev_link;
2602 cb->args[2] = done;
2603
2604 return skb->len;
2605 }
2606
2607 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2608 {
2609 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2610 struct net *net = sock_net(skb->sk);
2611 int err;
2612
2613 if (!info->attrs[TIPC_NLA_MON])
2614 return -EINVAL;
2615
2616 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2617 info->attrs[TIPC_NLA_MON],
2618 tipc_nl_monitor_policy,
2619 info->extack);
2620 if (err)
2621 return err;
2622
2623 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2624 u32 val;
2625
2626 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2627 err = tipc_nl_monitor_set_threshold(net, val);
2628 if (err)
2629 return err;
2630 }
2631
2632 return 0;
2633 }
2634
2635 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2636 {
2637 struct nlattr *attrs;
2638 void *hdr;
2639 u32 val;
2640
2641 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2642 0, TIPC_NL_MON_GET);
2643 if (!hdr)
2644 return -EMSGSIZE;
2645
2646 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2647 if (!attrs)
2648 goto msg_full;
2649
2650 val = tipc_nl_monitor_get_threshold(net);
2651
2652 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2653 goto attr_msg_full;
2654
2655 nla_nest_end(msg->skb, attrs);
2656 genlmsg_end(msg->skb, hdr);
2657
2658 return 0;
2659
2660 attr_msg_full:
2661 nla_nest_cancel(msg->skb, attrs);
2662 msg_full:
2663 genlmsg_cancel(msg->skb, hdr);
2664
2665 return -EMSGSIZE;
2666 }
2667
2668 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2669 {
2670 struct net *net = sock_net(skb->sk);
2671 struct tipc_nl_msg msg;
2672 int err;
2673
2674 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2675 if (!msg.skb)
2676 return -ENOMEM;
2677 msg.portid = info->snd_portid;
2678 msg.seq = info->snd_seq;
2679
2680 err = __tipc_nl_add_monitor_prop(net, &msg);
2681 if (err) {
2682 nlmsg_free(msg.skb);
2683 return err;
2684 }
2685
2686 return genlmsg_reply(msg.skb, info);
2687 }
2688
2689 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2690 {
2691 struct net *net = sock_net(skb->sk);
2692 u32 prev_bearer = cb->args[0];
2693 struct tipc_nl_msg msg;
2694 int bearer_id;
2695 int err;
2696
2697 if (prev_bearer == MAX_BEARERS)
2698 return 0;
2699
2700 msg.skb = skb;
2701 msg.portid = NETLINK_CB(cb->skb).portid;
2702 msg.seq = cb->nlh->nlmsg_seq;
2703
2704 rtnl_lock();
2705 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2706 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2707 if (err)
2708 break;
2709 }
2710 rtnl_unlock();
2711 cb->args[0] = bearer_id;
2712
2713 return skb->len;
2714 }
2715
2716 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2717 struct netlink_callback *cb)
2718 {
2719 struct net *net = sock_net(skb->sk);
2720 u32 prev_node = cb->args[1];
2721 u32 bearer_id = cb->args[2];
2722 int done = cb->args[0];
2723 struct tipc_nl_msg msg;
2724 int err;
2725
2726 if (!prev_node) {
2727 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2728 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2729
2730 if (!attrs[TIPC_NLA_MON])
2731 return -EINVAL;
2732
2733 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2734 attrs[TIPC_NLA_MON],
2735 tipc_nl_monitor_policy,
2736 NULL);
2737 if (err)
2738 return err;
2739
2740 if (!mon[TIPC_NLA_MON_REF])
2741 return -EINVAL;
2742
2743 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2744
2745 if (bearer_id >= MAX_BEARERS)
2746 return -EINVAL;
2747 }
2748
2749 if (done)
2750 return 0;
2751
2752 msg.skb = skb;
2753 msg.portid = NETLINK_CB(cb->skb).portid;
2754 msg.seq = cb->nlh->nlmsg_seq;
2755
2756 rtnl_lock();
2757 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2758 if (!err)
2759 done = 1;
2760
2761 rtnl_unlock();
2762 cb->args[0] = done;
2763 cb->args[1] = prev_node;
2764 cb->args[2] = bearer_id;
2765
2766 return skb->len;
2767 }
2768
2769 #ifdef CONFIG_TIPC_CRYPTO
2770 static int tipc_nl_retrieve_key(struct nlattr **attrs,
2771 struct tipc_aead_key **key)
2772 {
2773 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2774
2775 if (!attr)
2776 return -ENODATA;
2777
2778 *key = (struct tipc_aead_key *)nla_data(attr);
2779 if (nla_len(attr) < tipc_aead_key_size(*key))
2780 return -EINVAL;
2781
2782 return 0;
2783 }
2784
2785 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2786 {
2787 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2788
2789 if (!attr)
2790 return -ENODATA;
2791
2792 if (nla_len(attr) < TIPC_NODEID_LEN)
2793 return -EINVAL;
2794
2795 *node_id = (u8 *)nla_data(attr);
2796 return 0;
2797 }
2798
2799 int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2800 {
2801 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2802 struct net *net = sock_net(skb->sk);
2803 struct tipc_net *tn = tipc_net(net);
2804 struct tipc_node *n = NULL;
2805 struct tipc_aead_key *ukey;
2806 struct tipc_crypto *c;
2807 u8 *id, *own_id;
2808 int rc = 0;
2809
2810 if (!info->attrs[TIPC_NLA_NODE])
2811 return -EINVAL;
2812
2813 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2814 info->attrs[TIPC_NLA_NODE],
2815 tipc_nl_node_policy, info->extack);
2816 if (rc)
2817 goto exit;
2818
2819 own_id = tipc_own_id(net);
2820 if (!own_id) {
2821 rc = -EPERM;
2822 goto exit;
2823 }
2824
2825 rc = tipc_nl_retrieve_key(attrs, &ukey);
2826 if (rc)
2827 goto exit;
2828
2829 rc = tipc_aead_key_validate(ukey);
2830 if (rc)
2831 goto exit;
2832
2833 rc = tipc_nl_retrieve_nodeid(attrs, &id);
2834 switch (rc) {
2835 case -ENODATA:
2836 /* Cluster key mode */
2837 rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
2838 break;
2839 case 0:
2840 /* Per-node key mode */
2841 if (!memcmp(id, own_id, NODE_ID_LEN)) {
2842 c = tn->crypto_tx;
2843 } else {
2844 n = tipc_node_find_by_id(net, id) ?:
2845 tipc_node_create(net, 0, id, 0xffffu, 0, true);
2846 if (unlikely(!n)) {
2847 rc = -ENOMEM;
2848 break;
2849 }
2850 c = n->crypto_rx;
2851 }
2852
2853 rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
2854 if (n)
2855 tipc_node_put(n);
2856 break;
2857 default:
2858 break;
2859 }
2860
2861 exit:
2862 return (rc < 0) ? rc : 0;
2863 }
2864
2865 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2866 {
2867 int err;
2868
2869 rtnl_lock();
2870 err = __tipc_nl_node_set_key(skb, info);
2871 rtnl_unlock();
2872
2873 return err;
2874 }
2875
2876 int __tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
2877 {
2878 struct net *net = sock_net(skb->sk);
2879 struct tipc_net *tn = tipc_net(net);
2880 struct tipc_node *n;
2881
2882 tipc_crypto_key_flush(tn->crypto_tx);
2883 rcu_read_lock();
2884 list_for_each_entry_rcu(n, &tn->node_list, list)
2885 tipc_crypto_key_flush(n->crypto_rx);
2886 rcu_read_unlock();
2887
2888 pr_info("All keys are flushed!\n");
2889 return 0;
2890 }
2891
2892 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
2893 {
2894 int err;
2895
2896 rtnl_lock();
2897 err = __tipc_nl_node_flush_key(skb, info);
2898 rtnl_unlock();
2899
2900 return err;
2901 }
2902 #endif
2903
2904 /**
2905 * tipc_node_dump - dump TIPC node data
2906 * @n: tipc node to be dumped
2907 * @more: dump more?
2908 * - false: dump only tipc node data
2909 * - true: dump node link data as well
2910 * @buf: returned buffer of dump data in format
2911 */
2912 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
2913 {
2914 int i = 0;
2915 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
2916
2917 if (!n) {
2918 i += scnprintf(buf, sz, "node data: (null)\n");
2919 return i;
2920 }
2921
2922 i += scnprintf(buf, sz, "node data: %x", n->addr);
2923 i += scnprintf(buf + i, sz - i, " %x", n->state);
2924 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
2925 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
2926 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
2927 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
2928 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
2929 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
2930 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
2931 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
2932 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
2933
2934 if (!more)
2935 return i;
2936
2937 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
2938 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
2939 i += scnprintf(buf + i, sz - i, " media: ");
2940 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
2941 i += scnprintf(buf + i, sz - i, "\n");
2942 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
2943 i += scnprintf(buf + i, sz - i, " inputq: ");
2944 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
2945
2946 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
2947 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
2948 i += scnprintf(buf + i, sz - i, " media: ");
2949 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
2950 i += scnprintf(buf + i, sz - i, "\n");
2951 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
2952 i += scnprintf(buf + i, sz - i, " inputq: ");
2953 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
2954
2955 i += scnprintf(buf + i, sz - i, "bclink:\n ");
2956 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
2957
2958 return i;
2959 }
2960
2961 void tipc_node_pre_cleanup_net(struct net *exit_net)
2962 {
2963 struct tipc_node *n;
2964 struct tipc_net *tn;
2965 struct net *tmp;
2966
2967 rcu_read_lock();
2968 for_each_net_rcu(tmp) {
2969 if (tmp == exit_net)
2970 continue;
2971 tn = tipc_net(tmp);
2972 if (!tn)
2973 continue;
2974 spin_lock_bh(&tn->node_list_lock);
2975 list_for_each_entry_rcu(n, &tn->node_list, list) {
2976 if (!n->peer_net)
2977 continue;
2978 if (n->peer_net != exit_net)
2979 continue;
2980 tipc_node_write_lock(n);
2981 n->peer_net = NULL;
2982 n->peer_hash_mix = 0;
2983 tipc_node_write_unlock_fast(n);
2984 break;
2985 }
2986 spin_unlock_bh(&tn->node_list_lock);
2987 }
2988 rcu_read_unlock();
2989 }