]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/tipc/node.c
Merge tag 'exfat-for-5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linki...
[mirror_ubuntu-jammy-kernel.git] / net / tipc / node.c
1 /*
2 * net/tipc/node.c: TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "link.h"
39 #include "node.h"
40 #include "name_distr.h"
41 #include "socket.h"
42 #include "bcast.h"
43 #include "monitor.h"
44 #include "discover.h"
45 #include "netlink.h"
46 #include "trace.h"
47 #include "crypto.h"
48
49 #define INVALID_NODE_SIG 0x10000
50 #define NODE_CLEANUP_AFTER 300000
51
52 /* Flags used to take different actions according to flag type
53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 */
57 enum {
58 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
59 TIPC_NOTIFY_NODE_UP = (1 << 4),
60 TIPC_NOTIFY_LINK_UP = (1 << 6),
61 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
62 };
63
64 struct tipc_link_entry {
65 struct tipc_link *link;
66 spinlock_t lock; /* per link */
67 u32 mtu;
68 struct sk_buff_head inputq;
69 struct tipc_media_addr maddr;
70 };
71
72 struct tipc_bclink_entry {
73 struct tipc_link *link;
74 struct sk_buff_head inputq1;
75 struct sk_buff_head arrvq;
76 struct sk_buff_head inputq2;
77 struct sk_buff_head namedq;
78 u16 named_rcv_nxt;
79 bool named_open;
80 };
81
82 /**
83 * struct tipc_node - TIPC node structure
84 * @addr: network address of node
85 * @kref: reference counter to node object
86 * @lock: rwlock governing access to structure
87 * @net: the applicable net namespace
88 * @hash: links to adjacent nodes in unsorted hash chain
89 * @inputq: pointer to input queue containing messages for msg event
90 * @namedq: pointer to name table input queue with name table messages
91 * @active_links: bearer ids of active links, used as index into links[] array
92 * @links: array containing references to all links to node
93 * @bc_entry: broadcast link entry
94 * @action_flags: bit mask of different types of node actions
95 * @state: connectivity state vs peer node
96 * @preliminary: a preliminary node or not
97 * @failover_sent: failover sent or not
98 * @sync_point: sequence number where synch/failover is finished
99 * @list: links to adjacent nodes in sorted list of cluster's nodes
100 * @working_links: number of working links to node (both active and standby)
101 * @link_cnt: number of links to node
102 * @capabilities: bitmap, indicating peer node's functional capabilities
103 * @signature: node instance identifier
104 * @link_id: local and remote bearer ids of changing link, if any
105 * @peer_id: 128-bit ID of peer
106 * @peer_id_string: ID string of peer
107 * @publ_list: list of publications
108 * @conn_sks: list of connections (FIXME)
109 * @timer: node's keepalive timer
110 * @keepalive_intv: keepalive interval in milliseconds
111 * @rcu: rcu struct for tipc_node
112 * @delete_at: indicates the time for deleting a down node
113 * @peer_net: peer's net namespace
114 * @peer_hash_mix: hash for this peer (FIXME)
115 * @crypto_rx: RX crypto handler
116 */
117 struct tipc_node {
118 u32 addr;
119 struct kref kref;
120 rwlock_t lock;
121 struct net *net;
122 struct hlist_node hash;
123 int active_links[2];
124 struct tipc_link_entry links[MAX_BEARERS];
125 struct tipc_bclink_entry bc_entry;
126 int action_flags;
127 struct list_head list;
128 int state;
129 bool preliminary;
130 bool failover_sent;
131 u16 sync_point;
132 int link_cnt;
133 u16 working_links;
134 u16 capabilities;
135 u32 signature;
136 u32 link_id;
137 u8 peer_id[16];
138 char peer_id_string[NODE_ID_STR_LEN];
139 struct list_head publ_list;
140 struct list_head conn_sks;
141 unsigned long keepalive_intv;
142 struct timer_list timer;
143 struct rcu_head rcu;
144 unsigned long delete_at;
145 struct net *peer_net;
146 u32 peer_hash_mix;
147 #ifdef CONFIG_TIPC_CRYPTO
148 struct tipc_crypto *crypto_rx;
149 #endif
150 };
151
152 /* Node FSM states and events:
153 */
154 enum {
155 SELF_DOWN_PEER_DOWN = 0xdd,
156 SELF_UP_PEER_UP = 0xaa,
157 SELF_DOWN_PEER_LEAVING = 0xd1,
158 SELF_UP_PEER_COMING = 0xac,
159 SELF_COMING_PEER_UP = 0xca,
160 SELF_LEAVING_PEER_DOWN = 0x1d,
161 NODE_FAILINGOVER = 0xf0,
162 NODE_SYNCHING = 0xcc
163 };
164
165 enum {
166 SELF_ESTABL_CONTACT_EVT = 0xece,
167 SELF_LOST_CONTACT_EVT = 0x1ce,
168 PEER_ESTABL_CONTACT_EVT = 0x9ece,
169 PEER_LOST_CONTACT_EVT = 0x91ce,
170 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
171 NODE_FAILOVER_END_EVT = 0xfee,
172 NODE_SYNCH_BEGIN_EVT = 0xcbe,
173 NODE_SYNCH_END_EVT = 0xcee
174 };
175
176 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
177 struct sk_buff_head *xmitq,
178 struct tipc_media_addr **maddr);
179 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
180 bool delete);
181 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
182 static void tipc_node_delete(struct tipc_node *node);
183 static void tipc_node_timeout(struct timer_list *t);
184 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
185 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
186 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
187 static bool node_is_up(struct tipc_node *n);
188 static void tipc_node_delete_from_list(struct tipc_node *node);
189
190 struct tipc_sock_conn {
191 u32 port;
192 u32 peer_port;
193 u32 peer_node;
194 struct list_head list;
195 };
196
197 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
198 {
199 int bearer_id = n->active_links[sel & 1];
200
201 if (unlikely(bearer_id == INVALID_BEARER_ID))
202 return NULL;
203
204 return n->links[bearer_id].link;
205 }
206
207 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
208 {
209 struct tipc_node *n;
210 int bearer_id;
211 unsigned int mtu = MAX_MSG_SIZE;
212
213 n = tipc_node_find(net, addr);
214 if (unlikely(!n))
215 return mtu;
216
217 /* Allow MAX_MSG_SIZE when building connection oriented message
218 * if they are in the same core network
219 */
220 if (n->peer_net && connected) {
221 tipc_node_put(n);
222 return mtu;
223 }
224
225 bearer_id = n->active_links[sel & 1];
226 if (likely(bearer_id != INVALID_BEARER_ID))
227 mtu = n->links[bearer_id].mtu;
228 tipc_node_put(n);
229 return mtu;
230 }
231
232 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
233 {
234 u8 *own_id = tipc_own_id(net);
235 struct tipc_node *n;
236
237 if (!own_id)
238 return true;
239
240 if (addr == tipc_own_addr(net)) {
241 memcpy(id, own_id, TIPC_NODEID_LEN);
242 return true;
243 }
244 n = tipc_node_find(net, addr);
245 if (!n)
246 return false;
247
248 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
249 tipc_node_put(n);
250 return true;
251 }
252
253 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
254 {
255 struct tipc_node *n;
256 u16 caps;
257
258 n = tipc_node_find(net, addr);
259 if (unlikely(!n))
260 return TIPC_NODE_CAPABILITIES;
261 caps = n->capabilities;
262 tipc_node_put(n);
263 return caps;
264 }
265
266 u32 tipc_node_get_addr(struct tipc_node *node)
267 {
268 return (node) ? node->addr : 0;
269 }
270
271 char *tipc_node_get_id_str(struct tipc_node *node)
272 {
273 return node->peer_id_string;
274 }
275
276 #ifdef CONFIG_TIPC_CRYPTO
277 /**
278 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
279 * @__n: target tipc_node
280 * Note: node ref counter must be held first!
281 */
282 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
283 {
284 return (__n) ? __n->crypto_rx : NULL;
285 }
286
287 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
288 {
289 return container_of(pos, struct tipc_node, list)->crypto_rx;
290 }
291
292 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
293 {
294 struct tipc_node *n;
295
296 n = tipc_node_find(net, addr);
297 return (n) ? n->crypto_rx : NULL;
298 }
299 #endif
300
301 static void tipc_node_free(struct rcu_head *rp)
302 {
303 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
304
305 #ifdef CONFIG_TIPC_CRYPTO
306 tipc_crypto_stop(&n->crypto_rx);
307 #endif
308 kfree(n);
309 }
310
311 static void tipc_node_kref_release(struct kref *kref)
312 {
313 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
314
315 kfree(n->bc_entry.link);
316 call_rcu(&n->rcu, tipc_node_free);
317 }
318
319 void tipc_node_put(struct tipc_node *node)
320 {
321 kref_put(&node->kref, tipc_node_kref_release);
322 }
323
324 void tipc_node_get(struct tipc_node *node)
325 {
326 kref_get(&node->kref);
327 }
328
329 /*
330 * tipc_node_find - locate specified node object, if it exists
331 */
332 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
333 {
334 struct tipc_net *tn = tipc_net(net);
335 struct tipc_node *node;
336 unsigned int thash = tipc_hashfn(addr);
337
338 rcu_read_lock();
339 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
340 if (node->addr != addr || node->preliminary)
341 continue;
342 if (!kref_get_unless_zero(&node->kref))
343 node = NULL;
344 break;
345 }
346 rcu_read_unlock();
347 return node;
348 }
349
350 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
351 * Note: this function is called only when a discovery request failed
352 * to find the node by its 32-bit id, and is not time critical
353 */
354 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
355 {
356 struct tipc_net *tn = tipc_net(net);
357 struct tipc_node *n;
358 bool found = false;
359
360 rcu_read_lock();
361 list_for_each_entry_rcu(n, &tn->node_list, list) {
362 read_lock_bh(&n->lock);
363 if (!memcmp(id, n->peer_id, 16) &&
364 kref_get_unless_zero(&n->kref))
365 found = true;
366 read_unlock_bh(&n->lock);
367 if (found)
368 break;
369 }
370 rcu_read_unlock();
371 return found ? n : NULL;
372 }
373
374 static void tipc_node_read_lock(struct tipc_node *n)
375 {
376 read_lock_bh(&n->lock);
377 }
378
379 static void tipc_node_read_unlock(struct tipc_node *n)
380 {
381 read_unlock_bh(&n->lock);
382 }
383
384 static void tipc_node_write_lock(struct tipc_node *n)
385 {
386 write_lock_bh(&n->lock);
387 }
388
389 static void tipc_node_write_unlock_fast(struct tipc_node *n)
390 {
391 write_unlock_bh(&n->lock);
392 }
393
394 static void tipc_node_write_unlock(struct tipc_node *n)
395 {
396 struct net *net = n->net;
397 u32 addr = 0;
398 u32 flags = n->action_flags;
399 u32 link_id = 0;
400 u32 bearer_id;
401 struct list_head *publ_list;
402
403 if (likely(!flags)) {
404 write_unlock_bh(&n->lock);
405 return;
406 }
407
408 addr = n->addr;
409 link_id = n->link_id;
410 bearer_id = link_id & 0xffff;
411 publ_list = &n->publ_list;
412
413 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
414 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
415
416 write_unlock_bh(&n->lock);
417
418 if (flags & TIPC_NOTIFY_NODE_DOWN)
419 tipc_publ_notify(net, publ_list, addr, n->capabilities);
420
421 if (flags & TIPC_NOTIFY_NODE_UP)
422 tipc_named_node_up(net, addr, n->capabilities);
423
424 if (flags & TIPC_NOTIFY_LINK_UP) {
425 tipc_mon_peer_up(net, addr, bearer_id);
426 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
427 TIPC_NODE_SCOPE, link_id, link_id);
428 }
429 if (flags & TIPC_NOTIFY_LINK_DOWN) {
430 tipc_mon_peer_down(net, addr, bearer_id);
431 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
432 addr, link_id);
433 }
434 }
435
436 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
437 {
438 int net_id = tipc_netid(n->net);
439 struct tipc_net *tn_peer;
440 struct net *tmp;
441 u32 hash_chk;
442
443 if (n->peer_net)
444 return;
445
446 for_each_net_rcu(tmp) {
447 tn_peer = tipc_net(tmp);
448 if (!tn_peer)
449 continue;
450 /* Integrity checking whether node exists in namespace or not */
451 if (tn_peer->net_id != net_id)
452 continue;
453 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
454 continue;
455 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
456 if (hash_mixes ^ hash_chk)
457 continue;
458 n->peer_net = tmp;
459 n->peer_hash_mix = hash_mixes;
460 break;
461 }
462 }
463
464 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
465 u16 capabilities, u32 hash_mixes,
466 bool preliminary)
467 {
468 struct tipc_net *tn = net_generic(net, tipc_net_id);
469 struct tipc_node *n, *temp_node;
470 struct tipc_link *l;
471 unsigned long intv;
472 int bearer_id;
473 int i;
474
475 spin_lock_bh(&tn->node_list_lock);
476 n = tipc_node_find(net, addr) ?:
477 tipc_node_find_by_id(net, peer_id);
478 if (n) {
479 if (!n->preliminary)
480 goto update;
481 if (preliminary)
482 goto exit;
483 /* A preliminary node becomes "real" now, refresh its data */
484 tipc_node_write_lock(n);
485 n->preliminary = false;
486 n->addr = addr;
487 hlist_del_rcu(&n->hash);
488 hlist_add_head_rcu(&n->hash,
489 &tn->node_htable[tipc_hashfn(addr)]);
490 list_del_rcu(&n->list);
491 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
492 if (n->addr < temp_node->addr)
493 break;
494 }
495 list_add_tail_rcu(&n->list, &temp_node->list);
496 tipc_node_write_unlock_fast(n);
497
498 update:
499 if (n->peer_hash_mix ^ hash_mixes)
500 tipc_node_assign_peer_net(n, hash_mixes);
501 if (n->capabilities == capabilities)
502 goto exit;
503 /* Same node may come back with new capabilities */
504 tipc_node_write_lock(n);
505 n->capabilities = capabilities;
506 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
507 l = n->links[bearer_id].link;
508 if (l)
509 tipc_link_update_caps(l, capabilities);
510 }
511 tipc_node_write_unlock_fast(n);
512
513 /* Calculate cluster capabilities */
514 tn->capabilities = TIPC_NODE_CAPABILITIES;
515 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
516 tn->capabilities &= temp_node->capabilities;
517 }
518
519 tipc_bcast_toggle_rcast(net,
520 (tn->capabilities & TIPC_BCAST_RCAST));
521
522 goto exit;
523 }
524 n = kzalloc(sizeof(*n), GFP_ATOMIC);
525 if (!n) {
526 pr_warn("Node creation failed, no memory\n");
527 goto exit;
528 }
529 tipc_nodeid2string(n->peer_id_string, peer_id);
530 #ifdef CONFIG_TIPC_CRYPTO
531 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
532 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
533 kfree(n);
534 n = NULL;
535 goto exit;
536 }
537 #endif
538 n->addr = addr;
539 n->preliminary = preliminary;
540 memcpy(&n->peer_id, peer_id, 16);
541 n->net = net;
542 n->peer_net = NULL;
543 n->peer_hash_mix = 0;
544 /* Assign kernel local namespace if exists */
545 tipc_node_assign_peer_net(n, hash_mixes);
546 n->capabilities = capabilities;
547 kref_init(&n->kref);
548 rwlock_init(&n->lock);
549 INIT_HLIST_NODE(&n->hash);
550 INIT_LIST_HEAD(&n->list);
551 INIT_LIST_HEAD(&n->publ_list);
552 INIT_LIST_HEAD(&n->conn_sks);
553 skb_queue_head_init(&n->bc_entry.namedq);
554 skb_queue_head_init(&n->bc_entry.inputq1);
555 __skb_queue_head_init(&n->bc_entry.arrvq);
556 skb_queue_head_init(&n->bc_entry.inputq2);
557 for (i = 0; i < MAX_BEARERS; i++)
558 spin_lock_init(&n->links[i].lock);
559 n->state = SELF_DOWN_PEER_LEAVING;
560 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
561 n->signature = INVALID_NODE_SIG;
562 n->active_links[0] = INVALID_BEARER_ID;
563 n->active_links[1] = INVALID_BEARER_ID;
564 n->bc_entry.link = NULL;
565 tipc_node_get(n);
566 timer_setup(&n->timer, tipc_node_timeout, 0);
567 /* Start a slow timer anyway, crypto needs it */
568 n->keepalive_intv = 10000;
569 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
570 if (!mod_timer(&n->timer, intv))
571 tipc_node_get(n);
572 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
573 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
574 if (n->addr < temp_node->addr)
575 break;
576 }
577 list_add_tail_rcu(&n->list, &temp_node->list);
578 /* Calculate cluster capabilities */
579 tn->capabilities = TIPC_NODE_CAPABILITIES;
580 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
581 tn->capabilities &= temp_node->capabilities;
582 }
583 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
584 trace_tipc_node_create(n, true, " ");
585 exit:
586 spin_unlock_bh(&tn->node_list_lock);
587 return n;
588 }
589
590 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
591 {
592 unsigned long tol = tipc_link_tolerance(l);
593 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
594
595 /* Link with lowest tolerance determines timer interval */
596 if (intv < n->keepalive_intv)
597 n->keepalive_intv = intv;
598
599 /* Ensure link's abort limit corresponds to current tolerance */
600 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
601 }
602
603 static void tipc_node_delete_from_list(struct tipc_node *node)
604 {
605 #ifdef CONFIG_TIPC_CRYPTO
606 tipc_crypto_key_flush(node->crypto_rx);
607 #endif
608 list_del_rcu(&node->list);
609 hlist_del_rcu(&node->hash);
610 tipc_node_put(node);
611 }
612
613 static void tipc_node_delete(struct tipc_node *node)
614 {
615 trace_tipc_node_delete(node, true, " ");
616 tipc_node_delete_from_list(node);
617
618 del_timer_sync(&node->timer);
619 tipc_node_put(node);
620 }
621
622 void tipc_node_stop(struct net *net)
623 {
624 struct tipc_net *tn = tipc_net(net);
625 struct tipc_node *node, *t_node;
626
627 spin_lock_bh(&tn->node_list_lock);
628 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
629 tipc_node_delete(node);
630 spin_unlock_bh(&tn->node_list_lock);
631 }
632
633 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
634 {
635 struct tipc_node *n;
636
637 if (in_own_node(net, addr))
638 return;
639
640 n = tipc_node_find(net, addr);
641 if (!n) {
642 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
643 return;
644 }
645 tipc_node_write_lock(n);
646 list_add_tail(subscr, &n->publ_list);
647 tipc_node_write_unlock_fast(n);
648 tipc_node_put(n);
649 }
650
651 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
652 {
653 struct tipc_node *n;
654
655 if (in_own_node(net, addr))
656 return;
657
658 n = tipc_node_find(net, addr);
659 if (!n) {
660 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
661 return;
662 }
663 tipc_node_write_lock(n);
664 list_del_init(subscr);
665 tipc_node_write_unlock_fast(n);
666 tipc_node_put(n);
667 }
668
669 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
670 {
671 struct tipc_node *node;
672 struct tipc_sock_conn *conn;
673 int err = 0;
674
675 if (in_own_node(net, dnode))
676 return 0;
677
678 node = tipc_node_find(net, dnode);
679 if (!node) {
680 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
681 return -EHOSTUNREACH;
682 }
683 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
684 if (!conn) {
685 err = -EHOSTUNREACH;
686 goto exit;
687 }
688 conn->peer_node = dnode;
689 conn->port = port;
690 conn->peer_port = peer_port;
691
692 tipc_node_write_lock(node);
693 list_add_tail(&conn->list, &node->conn_sks);
694 tipc_node_write_unlock(node);
695 exit:
696 tipc_node_put(node);
697 return err;
698 }
699
700 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
701 {
702 struct tipc_node *node;
703 struct tipc_sock_conn *conn, *safe;
704
705 if (in_own_node(net, dnode))
706 return;
707
708 node = tipc_node_find(net, dnode);
709 if (!node)
710 return;
711
712 tipc_node_write_lock(node);
713 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
714 if (port != conn->port)
715 continue;
716 list_del(&conn->list);
717 kfree(conn);
718 }
719 tipc_node_write_unlock(node);
720 tipc_node_put(node);
721 }
722
723 static void tipc_node_clear_links(struct tipc_node *node)
724 {
725 int i;
726
727 for (i = 0; i < MAX_BEARERS; i++) {
728 struct tipc_link_entry *le = &node->links[i];
729
730 if (le->link) {
731 kfree(le->link);
732 le->link = NULL;
733 node->link_cnt--;
734 }
735 }
736 }
737
738 /* tipc_node_cleanup - delete nodes that does not
739 * have active links for NODE_CLEANUP_AFTER time
740 */
741 static bool tipc_node_cleanup(struct tipc_node *peer)
742 {
743 struct tipc_node *temp_node;
744 struct tipc_net *tn = tipc_net(peer->net);
745 bool deleted = false;
746
747 /* If lock held by tipc_node_stop() the node will be deleted anyway */
748 if (!spin_trylock_bh(&tn->node_list_lock))
749 return false;
750
751 tipc_node_write_lock(peer);
752
753 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
754 tipc_node_clear_links(peer);
755 tipc_node_delete_from_list(peer);
756 deleted = true;
757 }
758 tipc_node_write_unlock(peer);
759
760 if (!deleted) {
761 spin_unlock_bh(&tn->node_list_lock);
762 return deleted;
763 }
764
765 /* Calculate cluster capabilities */
766 tn->capabilities = TIPC_NODE_CAPABILITIES;
767 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
768 tn->capabilities &= temp_node->capabilities;
769 }
770 tipc_bcast_toggle_rcast(peer->net,
771 (tn->capabilities & TIPC_BCAST_RCAST));
772 spin_unlock_bh(&tn->node_list_lock);
773 return deleted;
774 }
775
776 /* tipc_node_timeout - handle expiration of node timer
777 */
778 static void tipc_node_timeout(struct timer_list *t)
779 {
780 struct tipc_node *n = from_timer(n, t, timer);
781 struct tipc_link_entry *le;
782 struct sk_buff_head xmitq;
783 int remains = n->link_cnt;
784 int bearer_id;
785 int rc = 0;
786
787 trace_tipc_node_timeout(n, false, " ");
788 if (!node_is_up(n) && tipc_node_cleanup(n)) {
789 /*Removing the reference of Timer*/
790 tipc_node_put(n);
791 return;
792 }
793
794 #ifdef CONFIG_TIPC_CRYPTO
795 /* Take any crypto key related actions first */
796 tipc_crypto_timeout(n->crypto_rx);
797 #endif
798 __skb_queue_head_init(&xmitq);
799
800 /* Initial node interval to value larger (10 seconds), then it will be
801 * recalculated with link lowest tolerance
802 */
803 tipc_node_read_lock(n);
804 n->keepalive_intv = 10000;
805 tipc_node_read_unlock(n);
806 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
807 tipc_node_read_lock(n);
808 le = &n->links[bearer_id];
809 if (le->link) {
810 spin_lock_bh(&le->lock);
811 /* Link tolerance may change asynchronously: */
812 tipc_node_calculate_timer(n, le->link);
813 rc = tipc_link_timeout(le->link, &xmitq);
814 spin_unlock_bh(&le->lock);
815 remains--;
816 }
817 tipc_node_read_unlock(n);
818 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
819 if (rc & TIPC_LINK_DOWN_EVT)
820 tipc_node_link_down(n, bearer_id, false);
821 }
822 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
823 }
824
825 /**
826 * __tipc_node_link_up - handle addition of link
827 * @n: target tipc_node
828 * @bearer_id: id of the bearer
829 * @xmitq: queue for messages to be xmited on
830 * Node lock must be held by caller
831 * Link becomes active (alone or shared) or standby, depending on its priority.
832 */
833 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
834 struct sk_buff_head *xmitq)
835 {
836 int *slot0 = &n->active_links[0];
837 int *slot1 = &n->active_links[1];
838 struct tipc_link *ol = node_active_link(n, 0);
839 struct tipc_link *nl = n->links[bearer_id].link;
840
841 if (!nl || tipc_link_is_up(nl))
842 return;
843
844 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
845 if (!tipc_link_is_up(nl))
846 return;
847
848 n->working_links++;
849 n->action_flags |= TIPC_NOTIFY_LINK_UP;
850 n->link_id = tipc_link_id(nl);
851
852 /* Leave room for tunnel header when returning 'mtu' to users: */
853 n->links[bearer_id].mtu = tipc_link_mss(nl);
854
855 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
856 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
857
858 pr_debug("Established link <%s> on network plane %c\n",
859 tipc_link_name(nl), tipc_link_plane(nl));
860 trace_tipc_node_link_up(n, true, " ");
861
862 /* Ensure that a STATE message goes first */
863 tipc_link_build_state_msg(nl, xmitq);
864
865 /* First link? => give it both slots */
866 if (!ol) {
867 *slot0 = bearer_id;
868 *slot1 = bearer_id;
869 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
870 n->action_flags |= TIPC_NOTIFY_NODE_UP;
871 tipc_link_set_active(nl, true);
872 tipc_bcast_add_peer(n->net, nl, xmitq);
873 return;
874 }
875
876 /* Second link => redistribute slots */
877 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
878 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
879 *slot0 = bearer_id;
880 *slot1 = bearer_id;
881 tipc_link_set_active(nl, true);
882 tipc_link_set_active(ol, false);
883 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
884 tipc_link_set_active(nl, true);
885 *slot1 = bearer_id;
886 } else {
887 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
888 }
889
890 /* Prepare synchronization with first link */
891 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
892 }
893
894 /**
895 * tipc_node_link_up - handle addition of link
896 * @n: target tipc_node
897 * @bearer_id: id of the bearer
898 * @xmitq: queue for messages to be xmited on
899 *
900 * Link becomes active (alone or shared) or standby, depending on its priority.
901 */
902 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
903 struct sk_buff_head *xmitq)
904 {
905 struct tipc_media_addr *maddr;
906
907 tipc_node_write_lock(n);
908 __tipc_node_link_up(n, bearer_id, xmitq);
909 maddr = &n->links[bearer_id].maddr;
910 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
911 tipc_node_write_unlock(n);
912 }
913
914 /**
915 * tipc_node_link_failover() - start failover in case "half-failover"
916 *
917 * This function is only called in a very special situation where link
918 * failover can be already started on peer node but not on this node.
919 * This can happen when e.g.::
920 *
921 * 1. Both links <1A-2A>, <1B-2B> down
922 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
923 * disturbance, wrong session, etc.)
924 * 3. Link <1B-2B> up
925 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
926 * 5. Node 2 starts failover onto link <1B-2B>
927 *
928 * ==> Node 1 does never start link/node failover!
929 *
930 * @n: tipc node structure
931 * @l: link peer endpoint failingover (- can be NULL)
932 * @tnl: tunnel link
933 * @xmitq: queue for messages to be xmited on tnl link later
934 */
935 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
936 struct tipc_link *tnl,
937 struct sk_buff_head *xmitq)
938 {
939 /* Avoid to be "self-failover" that can never end */
940 if (!tipc_link_is_up(tnl))
941 return;
942
943 /* Don't rush, failure link may be in the process of resetting */
944 if (l && !tipc_link_is_reset(l))
945 return;
946
947 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
948 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
949
950 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
951 tipc_link_failover_prepare(l, tnl, xmitq);
952
953 if (l)
954 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
955 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
956 }
957
958 /**
959 * __tipc_node_link_down - handle loss of link
960 * @n: target tipc_node
961 * @bearer_id: id of the bearer
962 * @xmitq: queue for messages to be xmited on
963 * @maddr: output media address of the bearer
964 */
965 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
966 struct sk_buff_head *xmitq,
967 struct tipc_media_addr **maddr)
968 {
969 struct tipc_link_entry *le = &n->links[*bearer_id];
970 int *slot0 = &n->active_links[0];
971 int *slot1 = &n->active_links[1];
972 int i, highest = 0, prio;
973 struct tipc_link *l, *_l, *tnl;
974
975 l = n->links[*bearer_id].link;
976 if (!l || tipc_link_is_reset(l))
977 return;
978
979 n->working_links--;
980 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
981 n->link_id = tipc_link_id(l);
982
983 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
984
985 pr_debug("Lost link <%s> on network plane %c\n",
986 tipc_link_name(l), tipc_link_plane(l));
987
988 /* Select new active link if any available */
989 *slot0 = INVALID_BEARER_ID;
990 *slot1 = INVALID_BEARER_ID;
991 for (i = 0; i < MAX_BEARERS; i++) {
992 _l = n->links[i].link;
993 if (!_l || !tipc_link_is_up(_l))
994 continue;
995 if (_l == l)
996 continue;
997 prio = tipc_link_prio(_l);
998 if (prio < highest)
999 continue;
1000 if (prio > highest) {
1001 highest = prio;
1002 *slot0 = i;
1003 *slot1 = i;
1004 continue;
1005 }
1006 *slot1 = i;
1007 }
1008
1009 if (!node_is_up(n)) {
1010 if (tipc_link_peer_is_down(l))
1011 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1012 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
1013 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
1014 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1015 tipc_link_reset(l);
1016 tipc_link_build_reset_msg(l, xmitq);
1017 *maddr = &n->links[*bearer_id].maddr;
1018 node_lost_contact(n, &le->inputq);
1019 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1020 return;
1021 }
1022 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1023
1024 /* There is still a working link => initiate failover */
1025 *bearer_id = n->active_links[0];
1026 tnl = n->links[*bearer_id].link;
1027 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1028 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1029 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
1030 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1031 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1032 tipc_link_reset(l);
1033 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1034 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1035 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1036 *maddr = &n->links[*bearer_id].maddr;
1037 }
1038
1039 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1040 {
1041 struct tipc_link_entry *le = &n->links[bearer_id];
1042 struct tipc_media_addr *maddr = NULL;
1043 struct tipc_link *l = le->link;
1044 int old_bearer_id = bearer_id;
1045 struct sk_buff_head xmitq;
1046
1047 if (!l)
1048 return;
1049
1050 __skb_queue_head_init(&xmitq);
1051
1052 tipc_node_write_lock(n);
1053 if (!tipc_link_is_establishing(l)) {
1054 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1055 } else {
1056 /* Defuse pending tipc_node_link_up() */
1057 tipc_link_reset(l);
1058 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1059 }
1060 if (delete) {
1061 kfree(l);
1062 le->link = NULL;
1063 n->link_cnt--;
1064 }
1065 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1066 tipc_node_write_unlock(n);
1067 if (delete)
1068 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1069 if (!skb_queue_empty(&xmitq))
1070 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1071 tipc_sk_rcv(n->net, &le->inputq);
1072 }
1073
1074 static bool node_is_up(struct tipc_node *n)
1075 {
1076 return n->active_links[0] != INVALID_BEARER_ID;
1077 }
1078
1079 bool tipc_node_is_up(struct net *net, u32 addr)
1080 {
1081 struct tipc_node *n;
1082 bool retval = false;
1083
1084 if (in_own_node(net, addr))
1085 return true;
1086
1087 n = tipc_node_find(net, addr);
1088 if (!n)
1089 return false;
1090 retval = node_is_up(n);
1091 tipc_node_put(n);
1092 return retval;
1093 }
1094
1095 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1096 {
1097 struct tipc_node *n;
1098
1099 addr ^= tipc_net(net)->random;
1100 while ((n = tipc_node_find(net, addr))) {
1101 tipc_node_put(n);
1102 addr++;
1103 }
1104 return addr;
1105 }
1106
1107 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1108 * Returns suggested address if any, otherwise 0
1109 */
1110 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1111 {
1112 struct tipc_net *tn = tipc_net(net);
1113 struct tipc_node *n;
1114 bool preliminary;
1115 u32 sugg_addr;
1116
1117 /* Suggest new address if some other peer is using this one */
1118 n = tipc_node_find(net, addr);
1119 if (n) {
1120 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1121 addr = 0;
1122 tipc_node_put(n);
1123 if (!addr)
1124 return 0;
1125 return tipc_node_suggest_addr(net, addr);
1126 }
1127
1128 /* Suggest previously used address if peer is known */
1129 n = tipc_node_find_by_id(net, id);
1130 if (n) {
1131 sugg_addr = n->addr;
1132 preliminary = n->preliminary;
1133 tipc_node_put(n);
1134 if (!preliminary)
1135 return sugg_addr;
1136 }
1137
1138 /* Even this node may be in conflict */
1139 if (tn->trial_addr == addr)
1140 return tipc_node_suggest_addr(net, addr);
1141
1142 return 0;
1143 }
1144
1145 void tipc_node_check_dest(struct net *net, u32 addr,
1146 u8 *peer_id, struct tipc_bearer *b,
1147 u16 capabilities, u32 signature, u32 hash_mixes,
1148 struct tipc_media_addr *maddr,
1149 bool *respond, bool *dupl_addr)
1150 {
1151 struct tipc_node *n;
1152 struct tipc_link *l, *snd_l;
1153 struct tipc_link_entry *le;
1154 bool addr_match = false;
1155 bool sign_match = false;
1156 bool link_up = false;
1157 bool accept_addr = false;
1158 bool reset = true;
1159 char *if_name;
1160 unsigned long intv;
1161 u16 session;
1162
1163 *dupl_addr = false;
1164 *respond = false;
1165
1166 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1167 false);
1168 if (!n)
1169 return;
1170
1171 tipc_node_write_lock(n);
1172 if (unlikely(!n->bc_entry.link)) {
1173 snd_l = tipc_bc_sndlink(net);
1174 if (!tipc_link_bc_create(net, tipc_own_addr(net),
1175 addr, peer_id, U16_MAX,
1176 tipc_link_min_win(snd_l),
1177 tipc_link_max_win(snd_l),
1178 n->capabilities,
1179 &n->bc_entry.inputq1,
1180 &n->bc_entry.namedq, snd_l,
1181 &n->bc_entry.link)) {
1182 pr_warn("Broadcast rcv link creation failed, no mem\n");
1183 tipc_node_write_unlock_fast(n);
1184 tipc_node_put(n);
1185 return;
1186 }
1187 }
1188
1189 le = &n->links[b->identity];
1190
1191 /* Prepare to validate requesting node's signature and media address */
1192 l = le->link;
1193 link_up = l && tipc_link_is_up(l);
1194 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1195 sign_match = (signature == n->signature);
1196
1197 /* These three flags give us eight permutations: */
1198
1199 if (sign_match && addr_match && link_up) {
1200 /* All is fine. Do nothing. */
1201 reset = false;
1202 /* Peer node is not a container/local namespace */
1203 if (!n->peer_hash_mix)
1204 n->peer_hash_mix = hash_mixes;
1205 } else if (sign_match && addr_match && !link_up) {
1206 /* Respond. The link will come up in due time */
1207 *respond = true;
1208 } else if (sign_match && !addr_match && link_up) {
1209 /* Peer has changed i/f address without rebooting.
1210 * If so, the link will reset soon, and the next
1211 * discovery will be accepted. So we can ignore it.
1212 * It may also be an cloned or malicious peer having
1213 * chosen the same node address and signature as an
1214 * existing one.
1215 * Ignore requests until the link goes down, if ever.
1216 */
1217 *dupl_addr = true;
1218 } else if (sign_match && !addr_match && !link_up) {
1219 /* Peer link has changed i/f address without rebooting.
1220 * It may also be a cloned or malicious peer; we can't
1221 * distinguish between the two.
1222 * The signature is correct, so we must accept.
1223 */
1224 accept_addr = true;
1225 *respond = true;
1226 } else if (!sign_match && addr_match && link_up) {
1227 /* Peer node rebooted. Two possibilities:
1228 * - Delayed re-discovery; this link endpoint has already
1229 * reset and re-established contact with the peer, before
1230 * receiving a discovery message from that node.
1231 * (The peer happened to receive one from this node first).
1232 * - The peer came back so fast that our side has not
1233 * discovered it yet. Probing from this side will soon
1234 * reset the link, since there can be no working link
1235 * endpoint at the peer end, and the link will re-establish.
1236 * Accept the signature, since it comes from a known peer.
1237 */
1238 n->signature = signature;
1239 } else if (!sign_match && addr_match && !link_up) {
1240 /* The peer node has rebooted.
1241 * Accept signature, since it is a known peer.
1242 */
1243 n->signature = signature;
1244 *respond = true;
1245 } else if (!sign_match && !addr_match && link_up) {
1246 /* Peer rebooted with new address, or a new/duplicate peer.
1247 * Ignore until the link goes down, if ever.
1248 */
1249 *dupl_addr = true;
1250 } else if (!sign_match && !addr_match && !link_up) {
1251 /* Peer rebooted with new address, or it is a new peer.
1252 * Accept signature and address.
1253 */
1254 n->signature = signature;
1255 accept_addr = true;
1256 *respond = true;
1257 }
1258
1259 if (!accept_addr)
1260 goto exit;
1261
1262 /* Now create new link if not already existing */
1263 if (!l) {
1264 if (n->link_cnt == 2)
1265 goto exit;
1266
1267 if_name = strchr(b->name, ':') + 1;
1268 get_random_bytes(&session, sizeof(u16));
1269 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1270 b->net_plane, b->mtu, b->priority,
1271 b->min_win, b->max_win, session,
1272 tipc_own_addr(net), addr, peer_id,
1273 n->capabilities,
1274 tipc_bc_sndlink(n->net), n->bc_entry.link,
1275 &le->inputq,
1276 &n->bc_entry.namedq, &l)) {
1277 *respond = false;
1278 goto exit;
1279 }
1280 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1281 tipc_link_reset(l);
1282 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1283 if (n->state == NODE_FAILINGOVER)
1284 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1285 le->link = l;
1286 n->link_cnt++;
1287 tipc_node_calculate_timer(n, l);
1288 if (n->link_cnt == 1) {
1289 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1290 if (!mod_timer(&n->timer, intv))
1291 tipc_node_get(n);
1292 }
1293 }
1294 memcpy(&le->maddr, maddr, sizeof(*maddr));
1295 exit:
1296 tipc_node_write_unlock(n);
1297 if (reset && l && !tipc_link_is_reset(l))
1298 tipc_node_link_down(n, b->identity, false);
1299 tipc_node_put(n);
1300 }
1301
1302 void tipc_node_delete_links(struct net *net, int bearer_id)
1303 {
1304 struct tipc_net *tn = net_generic(net, tipc_net_id);
1305 struct tipc_node *n;
1306
1307 rcu_read_lock();
1308 list_for_each_entry_rcu(n, &tn->node_list, list) {
1309 tipc_node_link_down(n, bearer_id, true);
1310 }
1311 rcu_read_unlock();
1312 }
1313
1314 static void tipc_node_reset_links(struct tipc_node *n)
1315 {
1316 int i;
1317
1318 pr_warn("Resetting all links to %x\n", n->addr);
1319
1320 trace_tipc_node_reset_links(n, true, " ");
1321 for (i = 0; i < MAX_BEARERS; i++) {
1322 tipc_node_link_down(n, i, false);
1323 }
1324 }
1325
1326 /* tipc_node_fsm_evt - node finite state machine
1327 * Determines when contact is allowed with peer node
1328 */
1329 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1330 {
1331 int state = n->state;
1332
1333 switch (state) {
1334 case SELF_DOWN_PEER_DOWN:
1335 switch (evt) {
1336 case SELF_ESTABL_CONTACT_EVT:
1337 state = SELF_UP_PEER_COMING;
1338 break;
1339 case PEER_ESTABL_CONTACT_EVT:
1340 state = SELF_COMING_PEER_UP;
1341 break;
1342 case SELF_LOST_CONTACT_EVT:
1343 case PEER_LOST_CONTACT_EVT:
1344 break;
1345 case NODE_SYNCH_END_EVT:
1346 case NODE_SYNCH_BEGIN_EVT:
1347 case NODE_FAILOVER_BEGIN_EVT:
1348 case NODE_FAILOVER_END_EVT:
1349 default:
1350 goto illegal_evt;
1351 }
1352 break;
1353 case SELF_UP_PEER_UP:
1354 switch (evt) {
1355 case SELF_LOST_CONTACT_EVT:
1356 state = SELF_DOWN_PEER_LEAVING;
1357 break;
1358 case PEER_LOST_CONTACT_EVT:
1359 state = SELF_LEAVING_PEER_DOWN;
1360 break;
1361 case NODE_SYNCH_BEGIN_EVT:
1362 state = NODE_SYNCHING;
1363 break;
1364 case NODE_FAILOVER_BEGIN_EVT:
1365 state = NODE_FAILINGOVER;
1366 break;
1367 case SELF_ESTABL_CONTACT_EVT:
1368 case PEER_ESTABL_CONTACT_EVT:
1369 case NODE_SYNCH_END_EVT:
1370 case NODE_FAILOVER_END_EVT:
1371 break;
1372 default:
1373 goto illegal_evt;
1374 }
1375 break;
1376 case SELF_DOWN_PEER_LEAVING:
1377 switch (evt) {
1378 case PEER_LOST_CONTACT_EVT:
1379 state = SELF_DOWN_PEER_DOWN;
1380 break;
1381 case SELF_ESTABL_CONTACT_EVT:
1382 case PEER_ESTABL_CONTACT_EVT:
1383 case SELF_LOST_CONTACT_EVT:
1384 break;
1385 case NODE_SYNCH_END_EVT:
1386 case NODE_SYNCH_BEGIN_EVT:
1387 case NODE_FAILOVER_BEGIN_EVT:
1388 case NODE_FAILOVER_END_EVT:
1389 default:
1390 goto illegal_evt;
1391 }
1392 break;
1393 case SELF_UP_PEER_COMING:
1394 switch (evt) {
1395 case PEER_ESTABL_CONTACT_EVT:
1396 state = SELF_UP_PEER_UP;
1397 break;
1398 case SELF_LOST_CONTACT_EVT:
1399 state = SELF_DOWN_PEER_DOWN;
1400 break;
1401 case SELF_ESTABL_CONTACT_EVT:
1402 case PEER_LOST_CONTACT_EVT:
1403 case NODE_SYNCH_END_EVT:
1404 case NODE_FAILOVER_BEGIN_EVT:
1405 break;
1406 case NODE_SYNCH_BEGIN_EVT:
1407 case NODE_FAILOVER_END_EVT:
1408 default:
1409 goto illegal_evt;
1410 }
1411 break;
1412 case SELF_COMING_PEER_UP:
1413 switch (evt) {
1414 case SELF_ESTABL_CONTACT_EVT:
1415 state = SELF_UP_PEER_UP;
1416 break;
1417 case PEER_LOST_CONTACT_EVT:
1418 state = SELF_DOWN_PEER_DOWN;
1419 break;
1420 case SELF_LOST_CONTACT_EVT:
1421 case PEER_ESTABL_CONTACT_EVT:
1422 break;
1423 case NODE_SYNCH_END_EVT:
1424 case NODE_SYNCH_BEGIN_EVT:
1425 case NODE_FAILOVER_BEGIN_EVT:
1426 case NODE_FAILOVER_END_EVT:
1427 default:
1428 goto illegal_evt;
1429 }
1430 break;
1431 case SELF_LEAVING_PEER_DOWN:
1432 switch (evt) {
1433 case SELF_LOST_CONTACT_EVT:
1434 state = SELF_DOWN_PEER_DOWN;
1435 break;
1436 case SELF_ESTABL_CONTACT_EVT:
1437 case PEER_ESTABL_CONTACT_EVT:
1438 case PEER_LOST_CONTACT_EVT:
1439 break;
1440 case NODE_SYNCH_END_EVT:
1441 case NODE_SYNCH_BEGIN_EVT:
1442 case NODE_FAILOVER_BEGIN_EVT:
1443 case NODE_FAILOVER_END_EVT:
1444 default:
1445 goto illegal_evt;
1446 }
1447 break;
1448 case NODE_FAILINGOVER:
1449 switch (evt) {
1450 case SELF_LOST_CONTACT_EVT:
1451 state = SELF_DOWN_PEER_LEAVING;
1452 break;
1453 case PEER_LOST_CONTACT_EVT:
1454 state = SELF_LEAVING_PEER_DOWN;
1455 break;
1456 case NODE_FAILOVER_END_EVT:
1457 state = SELF_UP_PEER_UP;
1458 break;
1459 case NODE_FAILOVER_BEGIN_EVT:
1460 case SELF_ESTABL_CONTACT_EVT:
1461 case PEER_ESTABL_CONTACT_EVT:
1462 break;
1463 case NODE_SYNCH_BEGIN_EVT:
1464 case NODE_SYNCH_END_EVT:
1465 default:
1466 goto illegal_evt;
1467 }
1468 break;
1469 case NODE_SYNCHING:
1470 switch (evt) {
1471 case SELF_LOST_CONTACT_EVT:
1472 state = SELF_DOWN_PEER_LEAVING;
1473 break;
1474 case PEER_LOST_CONTACT_EVT:
1475 state = SELF_LEAVING_PEER_DOWN;
1476 break;
1477 case NODE_SYNCH_END_EVT:
1478 state = SELF_UP_PEER_UP;
1479 break;
1480 case NODE_FAILOVER_BEGIN_EVT:
1481 state = NODE_FAILINGOVER;
1482 break;
1483 case NODE_SYNCH_BEGIN_EVT:
1484 case SELF_ESTABL_CONTACT_EVT:
1485 case PEER_ESTABL_CONTACT_EVT:
1486 break;
1487 case NODE_FAILOVER_END_EVT:
1488 default:
1489 goto illegal_evt;
1490 }
1491 break;
1492 default:
1493 pr_err("Unknown node fsm state %x\n", state);
1494 break;
1495 }
1496 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1497 n->state = state;
1498 return;
1499
1500 illegal_evt:
1501 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1502 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1503 }
1504
1505 static void node_lost_contact(struct tipc_node *n,
1506 struct sk_buff_head *inputq)
1507 {
1508 struct tipc_sock_conn *conn, *safe;
1509 struct tipc_link *l;
1510 struct list_head *conns = &n->conn_sks;
1511 struct sk_buff *skb;
1512 uint i;
1513
1514 pr_debug("Lost contact with %x\n", n->addr);
1515 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1516 trace_tipc_node_lost_contact(n, true, " ");
1517
1518 /* Clean up broadcast state */
1519 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1520 skb_queue_purge(&n->bc_entry.namedq);
1521
1522 /* Abort any ongoing link failover */
1523 for (i = 0; i < MAX_BEARERS; i++) {
1524 l = n->links[i].link;
1525 if (l)
1526 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1527 }
1528
1529 /* Notify publications from this node */
1530 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1531 n->peer_net = NULL;
1532 n->peer_hash_mix = 0;
1533 /* Notify sockets connected to node */
1534 list_for_each_entry_safe(conn, safe, conns, list) {
1535 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1536 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1537 conn->peer_node, conn->port,
1538 conn->peer_port, TIPC_ERR_NO_NODE);
1539 if (likely(skb))
1540 skb_queue_tail(inputq, skb);
1541 list_del(&conn->list);
1542 kfree(conn);
1543 }
1544 }
1545
1546 /**
1547 * tipc_node_get_linkname - get the name of a link
1548 *
1549 * @net: the applicable net namespace
1550 * @bearer_id: id of the bearer
1551 * @addr: peer node address
1552 * @linkname: link name output buffer
1553 * @len: size of @linkname output buffer
1554 *
1555 * Return: 0 on success
1556 */
1557 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1558 char *linkname, size_t len)
1559 {
1560 struct tipc_link *link;
1561 int err = -EINVAL;
1562 struct tipc_node *node = tipc_node_find(net, addr);
1563
1564 if (!node)
1565 return err;
1566
1567 if (bearer_id >= MAX_BEARERS)
1568 goto exit;
1569
1570 tipc_node_read_lock(node);
1571 link = node->links[bearer_id].link;
1572 if (link) {
1573 strncpy(linkname, tipc_link_name(link), len);
1574 err = 0;
1575 }
1576 tipc_node_read_unlock(node);
1577 exit:
1578 tipc_node_put(node);
1579 return err;
1580 }
1581
1582 /* Caller should hold node lock for the passed node */
1583 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1584 {
1585 void *hdr;
1586 struct nlattr *attrs;
1587
1588 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1589 NLM_F_MULTI, TIPC_NL_NODE_GET);
1590 if (!hdr)
1591 return -EMSGSIZE;
1592
1593 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1594 if (!attrs)
1595 goto msg_full;
1596
1597 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1598 goto attr_msg_full;
1599 if (node_is_up(node))
1600 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1601 goto attr_msg_full;
1602
1603 nla_nest_end(msg->skb, attrs);
1604 genlmsg_end(msg->skb, hdr);
1605
1606 return 0;
1607
1608 attr_msg_full:
1609 nla_nest_cancel(msg->skb, attrs);
1610 msg_full:
1611 genlmsg_cancel(msg->skb, hdr);
1612
1613 return -EMSGSIZE;
1614 }
1615
1616 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1617 {
1618 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1619 struct sk_buff_head inputq;
1620
1621 switch (msg_user(hdr)) {
1622 case TIPC_LOW_IMPORTANCE:
1623 case TIPC_MEDIUM_IMPORTANCE:
1624 case TIPC_HIGH_IMPORTANCE:
1625 case TIPC_CRITICAL_IMPORTANCE:
1626 if (msg_connected(hdr) || msg_named(hdr) ||
1627 msg_direct(hdr)) {
1628 tipc_loopback_trace(peer_net, list);
1629 spin_lock_init(&list->lock);
1630 tipc_sk_rcv(peer_net, list);
1631 return;
1632 }
1633 if (msg_mcast(hdr)) {
1634 tipc_loopback_trace(peer_net, list);
1635 skb_queue_head_init(&inputq);
1636 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1637 __skb_queue_purge(list);
1638 skb_queue_purge(&inputq);
1639 return;
1640 }
1641 return;
1642 case MSG_FRAGMENTER:
1643 if (tipc_msg_assemble(list)) {
1644 tipc_loopback_trace(peer_net, list);
1645 skb_queue_head_init(&inputq);
1646 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1647 __skb_queue_purge(list);
1648 skb_queue_purge(&inputq);
1649 }
1650 return;
1651 case GROUP_PROTOCOL:
1652 case CONN_MANAGER:
1653 tipc_loopback_trace(peer_net, list);
1654 spin_lock_init(&list->lock);
1655 tipc_sk_rcv(peer_net, list);
1656 return;
1657 case LINK_PROTOCOL:
1658 case NAME_DISTRIBUTOR:
1659 case TUNNEL_PROTOCOL:
1660 case BCAST_PROTOCOL:
1661 return;
1662 default:
1663 return;
1664 }
1665 }
1666
1667 /**
1668 * tipc_node_xmit() - general link level function for message sending
1669 * @net: the applicable net namespace
1670 * @list: chain of buffers containing message
1671 * @dnode: address of destination node
1672 * @selector: a number used for deterministic link selection
1673 * Consumes the buffer chain.
1674 * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1675 */
1676 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1677 u32 dnode, int selector)
1678 {
1679 struct tipc_link_entry *le = NULL;
1680 struct tipc_node *n;
1681 struct sk_buff_head xmitq;
1682 bool node_up = false;
1683 int bearer_id;
1684 int rc;
1685
1686 if (in_own_node(net, dnode)) {
1687 tipc_loopback_trace(net, list);
1688 spin_lock_init(&list->lock);
1689 tipc_sk_rcv(net, list);
1690 return 0;
1691 }
1692
1693 n = tipc_node_find(net, dnode);
1694 if (unlikely(!n)) {
1695 __skb_queue_purge(list);
1696 return -EHOSTUNREACH;
1697 }
1698
1699 tipc_node_read_lock(n);
1700 node_up = node_is_up(n);
1701 if (node_up && n->peer_net && check_net(n->peer_net)) {
1702 /* xmit inner linux container */
1703 tipc_lxc_xmit(n->peer_net, list);
1704 if (likely(skb_queue_empty(list))) {
1705 tipc_node_read_unlock(n);
1706 tipc_node_put(n);
1707 return 0;
1708 }
1709 }
1710
1711 bearer_id = n->active_links[selector & 1];
1712 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1713 tipc_node_read_unlock(n);
1714 tipc_node_put(n);
1715 __skb_queue_purge(list);
1716 return -EHOSTUNREACH;
1717 }
1718
1719 __skb_queue_head_init(&xmitq);
1720 le = &n->links[bearer_id];
1721 spin_lock_bh(&le->lock);
1722 rc = tipc_link_xmit(le->link, list, &xmitq);
1723 spin_unlock_bh(&le->lock);
1724 tipc_node_read_unlock(n);
1725
1726 if (unlikely(rc == -ENOBUFS))
1727 tipc_node_link_down(n, bearer_id, false);
1728 else
1729 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1730
1731 tipc_node_put(n);
1732
1733 return rc;
1734 }
1735
1736 /* tipc_node_xmit_skb(): send single buffer to destination
1737 * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
1738 * messages, which will not be rejected
1739 * The only exception is datagram messages rerouted after secondary
1740 * lookup, which are rare and safe to dispose of anyway.
1741 */
1742 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1743 u32 selector)
1744 {
1745 struct sk_buff_head head;
1746
1747 __skb_queue_head_init(&head);
1748 __skb_queue_tail(&head, skb);
1749 tipc_node_xmit(net, &head, dnode, selector);
1750 return 0;
1751 }
1752
1753 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1754 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1755 */
1756 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1757 {
1758 struct sk_buff *skb;
1759 u32 selector, dnode;
1760
1761 while ((skb = __skb_dequeue(xmitq))) {
1762 selector = msg_origport(buf_msg(skb));
1763 dnode = msg_destnode(buf_msg(skb));
1764 tipc_node_xmit_skb(net, skb, dnode, selector);
1765 }
1766 return 0;
1767 }
1768
1769 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1770 {
1771 struct sk_buff_head xmitq;
1772 struct sk_buff *txskb;
1773 struct tipc_node *n;
1774 u16 dummy;
1775 u32 dst;
1776
1777 /* Use broadcast if all nodes support it */
1778 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1779 __skb_queue_head_init(&xmitq);
1780 __skb_queue_tail(&xmitq, skb);
1781 tipc_bcast_xmit(net, &xmitq, &dummy);
1782 return;
1783 }
1784
1785 /* Otherwise use legacy replicast method */
1786 rcu_read_lock();
1787 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1788 dst = n->addr;
1789 if (in_own_node(net, dst))
1790 continue;
1791 if (!node_is_up(n))
1792 continue;
1793 txskb = pskb_copy(skb, GFP_ATOMIC);
1794 if (!txskb)
1795 break;
1796 msg_set_destnode(buf_msg(txskb), dst);
1797 tipc_node_xmit_skb(net, txskb, dst, 0);
1798 }
1799 rcu_read_unlock();
1800 kfree_skb(skb);
1801 }
1802
1803 static void tipc_node_mcast_rcv(struct tipc_node *n)
1804 {
1805 struct tipc_bclink_entry *be = &n->bc_entry;
1806
1807 /* 'arrvq' is under inputq2's lock protection */
1808 spin_lock_bh(&be->inputq2.lock);
1809 spin_lock_bh(&be->inputq1.lock);
1810 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1811 spin_unlock_bh(&be->inputq1.lock);
1812 spin_unlock_bh(&be->inputq2.lock);
1813 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1814 }
1815
1816 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1817 int bearer_id, struct sk_buff_head *xmitq)
1818 {
1819 struct tipc_link *ucl;
1820 int rc;
1821
1822 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1823
1824 if (rc & TIPC_LINK_DOWN_EVT) {
1825 tipc_node_reset_links(n);
1826 return;
1827 }
1828
1829 if (!(rc & TIPC_LINK_SND_STATE))
1830 return;
1831
1832 /* If probe message, a STATE response will be sent anyway */
1833 if (msg_probe(hdr))
1834 return;
1835
1836 /* Produce a STATE message carrying broadcast NACK */
1837 tipc_node_read_lock(n);
1838 ucl = n->links[bearer_id].link;
1839 if (ucl)
1840 tipc_link_build_state_msg(ucl, xmitq);
1841 tipc_node_read_unlock(n);
1842 }
1843
1844 /**
1845 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1846 * @net: the applicable net namespace
1847 * @skb: TIPC packet
1848 * @bearer_id: id of bearer message arrived on
1849 *
1850 * Invoked with no locks held.
1851 */
1852 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1853 {
1854 int rc;
1855 struct sk_buff_head xmitq;
1856 struct tipc_bclink_entry *be;
1857 struct tipc_link_entry *le;
1858 struct tipc_msg *hdr = buf_msg(skb);
1859 int usr = msg_user(hdr);
1860 u32 dnode = msg_destnode(hdr);
1861 struct tipc_node *n;
1862
1863 __skb_queue_head_init(&xmitq);
1864
1865 /* If NACK for other node, let rcv link for that node peek into it */
1866 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1867 n = tipc_node_find(net, dnode);
1868 else
1869 n = tipc_node_find(net, msg_prevnode(hdr));
1870 if (!n) {
1871 kfree_skb(skb);
1872 return;
1873 }
1874 be = &n->bc_entry;
1875 le = &n->links[bearer_id];
1876
1877 rc = tipc_bcast_rcv(net, be->link, skb);
1878
1879 /* Broadcast ACKs are sent on a unicast link */
1880 if (rc & TIPC_LINK_SND_STATE) {
1881 tipc_node_read_lock(n);
1882 tipc_link_build_state_msg(le->link, &xmitq);
1883 tipc_node_read_unlock(n);
1884 }
1885
1886 if (!skb_queue_empty(&xmitq))
1887 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1888
1889 if (!skb_queue_empty(&be->inputq1))
1890 tipc_node_mcast_rcv(n);
1891
1892 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1893 if (!skb_queue_empty(&n->bc_entry.namedq))
1894 tipc_named_rcv(net, &n->bc_entry.namedq,
1895 &n->bc_entry.named_rcv_nxt,
1896 &n->bc_entry.named_open);
1897
1898 /* If reassembly or retransmission failure => reset all links to peer */
1899 if (rc & TIPC_LINK_DOWN_EVT)
1900 tipc_node_reset_links(n);
1901
1902 tipc_node_put(n);
1903 }
1904
1905 /**
1906 * tipc_node_check_state - check and if necessary update node state
1907 * @n: target tipc_node
1908 * @skb: TIPC packet
1909 * @bearer_id: identity of bearer delivering the packet
1910 * @xmitq: queue for messages to be xmited on
1911 * Return: true if state and msg are ok, otherwise false
1912 */
1913 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1914 int bearer_id, struct sk_buff_head *xmitq)
1915 {
1916 struct tipc_msg *hdr = buf_msg(skb);
1917 int usr = msg_user(hdr);
1918 int mtyp = msg_type(hdr);
1919 u16 oseqno = msg_seqno(hdr);
1920 u16 exp_pkts = msg_msgcnt(hdr);
1921 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1922 int state = n->state;
1923 struct tipc_link *l, *tnl, *pl = NULL;
1924 struct tipc_media_addr *maddr;
1925 int pb_id;
1926
1927 if (trace_tipc_node_check_state_enabled()) {
1928 trace_tipc_skb_dump(skb, false, "skb for node state check");
1929 trace_tipc_node_check_state(n, true, " ");
1930 }
1931 l = n->links[bearer_id].link;
1932 if (!l)
1933 return false;
1934 rcv_nxt = tipc_link_rcv_nxt(l);
1935
1936
1937 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1938 return true;
1939
1940 /* Find parallel link, if any */
1941 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1942 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1943 pl = n->links[pb_id].link;
1944 break;
1945 }
1946 }
1947
1948 if (!tipc_link_validate_msg(l, hdr)) {
1949 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1950 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1951 return false;
1952 }
1953
1954 /* Check and update node accesibility if applicable */
1955 if (state == SELF_UP_PEER_COMING) {
1956 if (!tipc_link_is_up(l))
1957 return true;
1958 if (!msg_peer_link_is_up(hdr))
1959 return true;
1960 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1961 }
1962
1963 if (state == SELF_DOWN_PEER_LEAVING) {
1964 if (msg_peer_node_is_up(hdr))
1965 return false;
1966 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1967 return true;
1968 }
1969
1970 if (state == SELF_LEAVING_PEER_DOWN)
1971 return false;
1972
1973 /* Ignore duplicate packets */
1974 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1975 return true;
1976
1977 /* Initiate or update failover mode if applicable */
1978 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1979 syncpt = oseqno + exp_pkts - 1;
1980 if (pl && !tipc_link_is_reset(pl)) {
1981 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1982 trace_tipc_node_link_down(n, true,
1983 "node link down <- failover!");
1984 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1985 tipc_link_inputq(l));
1986 }
1987
1988 /* If parallel link was already down, and this happened before
1989 * the tunnel link came up, node failover was never started.
1990 * Ensure that a FAILOVER_MSG is sent to get peer out of
1991 * NODE_FAILINGOVER state, also this node must accept
1992 * TUNNEL_MSGs from peer.
1993 */
1994 if (n->state != NODE_FAILINGOVER)
1995 tipc_node_link_failover(n, pl, l, xmitq);
1996
1997 /* If pkts arrive out of order, use lowest calculated syncpt */
1998 if (less(syncpt, n->sync_point))
1999 n->sync_point = syncpt;
2000 }
2001
2002 /* Open parallel link when tunnel link reaches synch point */
2003 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
2004 if (!more(rcv_nxt, n->sync_point))
2005 return true;
2006 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
2007 if (pl)
2008 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
2009 return true;
2010 }
2011
2012 /* No synching needed if only one link */
2013 if (!pl || !tipc_link_is_up(pl))
2014 return true;
2015
2016 /* Initiate synch mode if applicable */
2017 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
2018 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
2019 syncpt = msg_syncpt(hdr);
2020 else
2021 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
2022 if (!tipc_link_is_up(l))
2023 __tipc_node_link_up(n, bearer_id, xmitq);
2024 if (n->state == SELF_UP_PEER_UP) {
2025 n->sync_point = syncpt;
2026 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
2027 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
2028 }
2029 }
2030
2031 /* Open tunnel link when parallel link reaches synch point */
2032 if (n->state == NODE_SYNCHING) {
2033 if (tipc_link_is_synching(l)) {
2034 tnl = l;
2035 } else {
2036 tnl = pl;
2037 pl = l;
2038 }
2039 inputq_len = skb_queue_len(tipc_link_inputq(pl));
2040 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2041 if (more(dlv_nxt, n->sync_point)) {
2042 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2043 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2044 return true;
2045 }
2046 if (l == pl)
2047 return true;
2048 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2049 return true;
2050 if (usr == LINK_PROTOCOL)
2051 return true;
2052 return false;
2053 }
2054 return true;
2055 }
2056
2057 /**
2058 * tipc_rcv - process TIPC packets/messages arriving from off-node
2059 * @net: the applicable net namespace
2060 * @skb: TIPC packet
2061 * @b: pointer to bearer message arrived on
2062 *
2063 * Invoked with no locks held. Bearer pointer must point to a valid bearer
2064 * structure (i.e. cannot be NULL), but bearer can be inactive.
2065 */
2066 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2067 {
2068 struct sk_buff_head xmitq;
2069 struct tipc_link_entry *le;
2070 struct tipc_msg *hdr;
2071 struct tipc_node *n;
2072 int bearer_id = b->identity;
2073 u32 self = tipc_own_addr(net);
2074 int usr, rc = 0;
2075 u16 bc_ack;
2076 #ifdef CONFIG_TIPC_CRYPTO
2077 struct tipc_ehdr *ehdr;
2078
2079 /* Check if message must be decrypted first */
2080 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2081 goto rcv;
2082
2083 ehdr = (struct tipc_ehdr *)skb->data;
2084 if (likely(ehdr->user != LINK_CONFIG)) {
2085 n = tipc_node_find(net, ntohl(ehdr->addr));
2086 if (unlikely(!n))
2087 goto discard;
2088 } else {
2089 n = tipc_node_find_by_id(net, ehdr->id);
2090 }
2091 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2092 if (!skb)
2093 return;
2094
2095 rcv:
2096 #endif
2097 /* Ensure message is well-formed before touching the header */
2098 if (unlikely(!tipc_msg_validate(&skb)))
2099 goto discard;
2100 __skb_queue_head_init(&xmitq);
2101 hdr = buf_msg(skb);
2102 usr = msg_user(hdr);
2103 bc_ack = msg_bcast_ack(hdr);
2104
2105 /* Handle arrival of discovery or broadcast packet */
2106 if (unlikely(msg_non_seq(hdr))) {
2107 if (unlikely(usr == LINK_CONFIG))
2108 return tipc_disc_rcv(net, skb, b);
2109 else
2110 return tipc_node_bc_rcv(net, skb, bearer_id);
2111 }
2112
2113 /* Discard unicast link messages destined for another node */
2114 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2115 goto discard;
2116
2117 /* Locate neighboring node that sent packet */
2118 n = tipc_node_find(net, msg_prevnode(hdr));
2119 if (unlikely(!n))
2120 goto discard;
2121 le = &n->links[bearer_id];
2122
2123 /* Ensure broadcast reception is in synch with peer's send state */
2124 if (unlikely(usr == LINK_PROTOCOL)) {
2125 if (unlikely(skb_linearize(skb))) {
2126 tipc_node_put(n);
2127 goto discard;
2128 }
2129 hdr = buf_msg(skb);
2130 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2131 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2132 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2133 }
2134
2135 /* Receive packet directly if conditions permit */
2136 tipc_node_read_lock(n);
2137 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2138 spin_lock_bh(&le->lock);
2139 if (le->link) {
2140 rc = tipc_link_rcv(le->link, skb, &xmitq);
2141 skb = NULL;
2142 }
2143 spin_unlock_bh(&le->lock);
2144 }
2145 tipc_node_read_unlock(n);
2146
2147 /* Check/update node state before receiving */
2148 if (unlikely(skb)) {
2149 if (unlikely(skb_linearize(skb)))
2150 goto out_node_put;
2151 tipc_node_write_lock(n);
2152 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2153 if (le->link) {
2154 rc = tipc_link_rcv(le->link, skb, &xmitq);
2155 skb = NULL;
2156 }
2157 }
2158 tipc_node_write_unlock(n);
2159 }
2160
2161 if (unlikely(rc & TIPC_LINK_UP_EVT))
2162 tipc_node_link_up(n, bearer_id, &xmitq);
2163
2164 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2165 tipc_node_link_down(n, bearer_id, false);
2166
2167 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2168 tipc_named_rcv(net, &n->bc_entry.namedq,
2169 &n->bc_entry.named_rcv_nxt,
2170 &n->bc_entry.named_open);
2171
2172 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2173 tipc_node_mcast_rcv(n);
2174
2175 if (!skb_queue_empty(&le->inputq))
2176 tipc_sk_rcv(net, &le->inputq);
2177
2178 if (!skb_queue_empty(&xmitq))
2179 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2180
2181 out_node_put:
2182 tipc_node_put(n);
2183 discard:
2184 kfree_skb(skb);
2185 }
2186
2187 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2188 int prop)
2189 {
2190 struct tipc_net *tn = tipc_net(net);
2191 int bearer_id = b->identity;
2192 struct sk_buff_head xmitq;
2193 struct tipc_link_entry *e;
2194 struct tipc_node *n;
2195
2196 __skb_queue_head_init(&xmitq);
2197
2198 rcu_read_lock();
2199
2200 list_for_each_entry_rcu(n, &tn->node_list, list) {
2201 tipc_node_write_lock(n);
2202 e = &n->links[bearer_id];
2203 if (e->link) {
2204 if (prop == TIPC_NLA_PROP_TOL)
2205 tipc_link_set_tolerance(e->link, b->tolerance,
2206 &xmitq);
2207 else if (prop == TIPC_NLA_PROP_MTU)
2208 tipc_link_set_mtu(e->link, b->mtu);
2209
2210 /* Update MTU for node link entry */
2211 e->mtu = tipc_link_mss(e->link);
2212 }
2213
2214 tipc_node_write_unlock(n);
2215 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2216 }
2217
2218 rcu_read_unlock();
2219 }
2220
2221 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2222 {
2223 struct net *net = sock_net(skb->sk);
2224 struct tipc_net *tn = net_generic(net, tipc_net_id);
2225 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2226 struct tipc_node *peer, *temp_node;
2227 u8 node_id[NODE_ID_LEN];
2228 u64 *w0 = (u64 *)&node_id[0];
2229 u64 *w1 = (u64 *)&node_id[8];
2230 u32 addr;
2231 int err;
2232
2233 /* We identify the peer by its net */
2234 if (!info->attrs[TIPC_NLA_NET])
2235 return -EINVAL;
2236
2237 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2238 info->attrs[TIPC_NLA_NET],
2239 tipc_nl_net_policy, info->extack);
2240 if (err)
2241 return err;
2242
2243 /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
2244 * mutually exclusive cases
2245 */
2246 if (attrs[TIPC_NLA_NET_ADDR]) {
2247 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2248 if (!addr)
2249 return -EINVAL;
2250 }
2251
2252 if (attrs[TIPC_NLA_NET_NODEID]) {
2253 if (!attrs[TIPC_NLA_NET_NODEID_W1])
2254 return -EINVAL;
2255 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
2256 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
2257 addr = hash128to32(node_id);
2258 }
2259
2260 if (in_own_node(net, addr))
2261 return -ENOTSUPP;
2262
2263 spin_lock_bh(&tn->node_list_lock);
2264 peer = tipc_node_find(net, addr);
2265 if (!peer) {
2266 spin_unlock_bh(&tn->node_list_lock);
2267 return -ENXIO;
2268 }
2269
2270 tipc_node_write_lock(peer);
2271 if (peer->state != SELF_DOWN_PEER_DOWN &&
2272 peer->state != SELF_DOWN_PEER_LEAVING) {
2273 tipc_node_write_unlock(peer);
2274 err = -EBUSY;
2275 goto err_out;
2276 }
2277
2278 tipc_node_clear_links(peer);
2279 tipc_node_write_unlock(peer);
2280 tipc_node_delete(peer);
2281
2282 /* Calculate cluster capabilities */
2283 tn->capabilities = TIPC_NODE_CAPABILITIES;
2284 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2285 tn->capabilities &= temp_node->capabilities;
2286 }
2287 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2288 err = 0;
2289 err_out:
2290 tipc_node_put(peer);
2291 spin_unlock_bh(&tn->node_list_lock);
2292
2293 return err;
2294 }
2295
2296 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2297 {
2298 int err;
2299 struct net *net = sock_net(skb->sk);
2300 struct tipc_net *tn = net_generic(net, tipc_net_id);
2301 int done = cb->args[0];
2302 int last_addr = cb->args[1];
2303 struct tipc_node *node;
2304 struct tipc_nl_msg msg;
2305
2306 if (done)
2307 return 0;
2308
2309 msg.skb = skb;
2310 msg.portid = NETLINK_CB(cb->skb).portid;
2311 msg.seq = cb->nlh->nlmsg_seq;
2312
2313 rcu_read_lock();
2314 if (last_addr) {
2315 node = tipc_node_find(net, last_addr);
2316 if (!node) {
2317 rcu_read_unlock();
2318 /* We never set seq or call nl_dump_check_consistent()
2319 * this means that setting prev_seq here will cause the
2320 * consistence check to fail in the netlink callback
2321 * handler. Resulting in the NLMSG_DONE message having
2322 * the NLM_F_DUMP_INTR flag set if the node state
2323 * changed while we released the lock.
2324 */
2325 cb->prev_seq = 1;
2326 return -EPIPE;
2327 }
2328 tipc_node_put(node);
2329 }
2330
2331 list_for_each_entry_rcu(node, &tn->node_list, list) {
2332 if (node->preliminary)
2333 continue;
2334 if (last_addr) {
2335 if (node->addr == last_addr)
2336 last_addr = 0;
2337 else
2338 continue;
2339 }
2340
2341 tipc_node_read_lock(node);
2342 err = __tipc_nl_add_node(&msg, node);
2343 if (err) {
2344 last_addr = node->addr;
2345 tipc_node_read_unlock(node);
2346 goto out;
2347 }
2348
2349 tipc_node_read_unlock(node);
2350 }
2351 done = 1;
2352 out:
2353 cb->args[0] = done;
2354 cb->args[1] = last_addr;
2355 rcu_read_unlock();
2356
2357 return skb->len;
2358 }
2359
2360 /* tipc_node_find_by_name - locate owner node of link by link's name
2361 * @net: the applicable net namespace
2362 * @name: pointer to link name string
2363 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2364 *
2365 * Returns pointer to node owning the link, or 0 if no matching link is found.
2366 */
2367 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2368 const char *link_name,
2369 unsigned int *bearer_id)
2370 {
2371 struct tipc_net *tn = net_generic(net, tipc_net_id);
2372 struct tipc_link *l;
2373 struct tipc_node *n;
2374 struct tipc_node *found_node = NULL;
2375 int i;
2376
2377 *bearer_id = 0;
2378 rcu_read_lock();
2379 list_for_each_entry_rcu(n, &tn->node_list, list) {
2380 tipc_node_read_lock(n);
2381 for (i = 0; i < MAX_BEARERS; i++) {
2382 l = n->links[i].link;
2383 if (l && !strcmp(tipc_link_name(l), link_name)) {
2384 *bearer_id = i;
2385 found_node = n;
2386 break;
2387 }
2388 }
2389 tipc_node_read_unlock(n);
2390 if (found_node)
2391 break;
2392 }
2393 rcu_read_unlock();
2394
2395 return found_node;
2396 }
2397
2398 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2399 {
2400 int err;
2401 int res = 0;
2402 int bearer_id;
2403 char *name;
2404 struct tipc_link *link;
2405 struct tipc_node *node;
2406 struct sk_buff_head xmitq;
2407 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2408 struct net *net = sock_net(skb->sk);
2409
2410 __skb_queue_head_init(&xmitq);
2411
2412 if (!info->attrs[TIPC_NLA_LINK])
2413 return -EINVAL;
2414
2415 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2416 info->attrs[TIPC_NLA_LINK],
2417 tipc_nl_link_policy, info->extack);
2418 if (err)
2419 return err;
2420
2421 if (!attrs[TIPC_NLA_LINK_NAME])
2422 return -EINVAL;
2423
2424 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2425
2426 if (strcmp(name, tipc_bclink_name) == 0)
2427 return tipc_nl_bc_link_set(net, attrs);
2428
2429 node = tipc_node_find_by_name(net, name, &bearer_id);
2430 if (!node)
2431 return -EINVAL;
2432
2433 tipc_node_read_lock(node);
2434
2435 link = node->links[bearer_id].link;
2436 if (!link) {
2437 res = -EINVAL;
2438 goto out;
2439 }
2440
2441 if (attrs[TIPC_NLA_LINK_PROP]) {
2442 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2443
2444 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2445 if (err) {
2446 res = err;
2447 goto out;
2448 }
2449
2450 if (props[TIPC_NLA_PROP_TOL]) {
2451 u32 tol;
2452
2453 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2454 tipc_link_set_tolerance(link, tol, &xmitq);
2455 }
2456 if (props[TIPC_NLA_PROP_PRIO]) {
2457 u32 prio;
2458
2459 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2460 tipc_link_set_prio(link, prio, &xmitq);
2461 }
2462 if (props[TIPC_NLA_PROP_WIN]) {
2463 u32 max_win;
2464
2465 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2466 tipc_link_set_queue_limits(link,
2467 tipc_link_min_win(link),
2468 max_win);
2469 }
2470 }
2471
2472 out:
2473 tipc_node_read_unlock(node);
2474 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2475 NULL);
2476 return res;
2477 }
2478
2479 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2480 {
2481 struct net *net = genl_info_net(info);
2482 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2483 struct tipc_nl_msg msg;
2484 char *name;
2485 int err;
2486
2487 msg.portid = info->snd_portid;
2488 msg.seq = info->snd_seq;
2489
2490 if (!info->attrs[TIPC_NLA_LINK])
2491 return -EINVAL;
2492
2493 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2494 info->attrs[TIPC_NLA_LINK],
2495 tipc_nl_link_policy, info->extack);
2496 if (err)
2497 return err;
2498
2499 if (!attrs[TIPC_NLA_LINK_NAME])
2500 return -EINVAL;
2501
2502 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2503
2504 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2505 if (!msg.skb)
2506 return -ENOMEM;
2507
2508 if (strcmp(name, tipc_bclink_name) == 0) {
2509 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2510 if (err)
2511 goto err_free;
2512 } else {
2513 int bearer_id;
2514 struct tipc_node *node;
2515 struct tipc_link *link;
2516
2517 node = tipc_node_find_by_name(net, name, &bearer_id);
2518 if (!node) {
2519 err = -EINVAL;
2520 goto err_free;
2521 }
2522
2523 tipc_node_read_lock(node);
2524 link = node->links[bearer_id].link;
2525 if (!link) {
2526 tipc_node_read_unlock(node);
2527 err = -EINVAL;
2528 goto err_free;
2529 }
2530
2531 err = __tipc_nl_add_link(net, &msg, link, 0);
2532 tipc_node_read_unlock(node);
2533 if (err)
2534 goto err_free;
2535 }
2536
2537 return genlmsg_reply(msg.skb, info);
2538
2539 err_free:
2540 nlmsg_free(msg.skb);
2541 return err;
2542 }
2543
2544 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2545 {
2546 int err;
2547 char *link_name;
2548 unsigned int bearer_id;
2549 struct tipc_link *link;
2550 struct tipc_node *node;
2551 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2552 struct net *net = sock_net(skb->sk);
2553 struct tipc_net *tn = tipc_net(net);
2554 struct tipc_link_entry *le;
2555
2556 if (!info->attrs[TIPC_NLA_LINK])
2557 return -EINVAL;
2558
2559 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2560 info->attrs[TIPC_NLA_LINK],
2561 tipc_nl_link_policy, info->extack);
2562 if (err)
2563 return err;
2564
2565 if (!attrs[TIPC_NLA_LINK_NAME])
2566 return -EINVAL;
2567
2568 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2569
2570 err = -EINVAL;
2571 if (!strcmp(link_name, tipc_bclink_name)) {
2572 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2573 if (err)
2574 return err;
2575 return 0;
2576 } else if (strstr(link_name, tipc_bclink_name)) {
2577 rcu_read_lock();
2578 list_for_each_entry_rcu(node, &tn->node_list, list) {
2579 tipc_node_read_lock(node);
2580 link = node->bc_entry.link;
2581 if (link && !strcmp(link_name, tipc_link_name(link))) {
2582 err = tipc_bclink_reset_stats(net, link);
2583 tipc_node_read_unlock(node);
2584 break;
2585 }
2586 tipc_node_read_unlock(node);
2587 }
2588 rcu_read_unlock();
2589 return err;
2590 }
2591
2592 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2593 if (!node)
2594 return -EINVAL;
2595
2596 le = &node->links[bearer_id];
2597 tipc_node_read_lock(node);
2598 spin_lock_bh(&le->lock);
2599 link = node->links[bearer_id].link;
2600 if (!link) {
2601 spin_unlock_bh(&le->lock);
2602 tipc_node_read_unlock(node);
2603 return -EINVAL;
2604 }
2605 tipc_link_reset_stats(link);
2606 spin_unlock_bh(&le->lock);
2607 tipc_node_read_unlock(node);
2608 return 0;
2609 }
2610
2611 /* Caller should hold node lock */
2612 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2613 struct tipc_node *node, u32 *prev_link,
2614 bool bc_link)
2615 {
2616 u32 i;
2617 int err;
2618
2619 for (i = *prev_link; i < MAX_BEARERS; i++) {
2620 *prev_link = i;
2621
2622 if (!node->links[i].link)
2623 continue;
2624
2625 err = __tipc_nl_add_link(net, msg,
2626 node->links[i].link, NLM_F_MULTI);
2627 if (err)
2628 return err;
2629 }
2630
2631 if (bc_link) {
2632 *prev_link = i;
2633 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2634 if (err)
2635 return err;
2636 }
2637
2638 *prev_link = 0;
2639
2640 return 0;
2641 }
2642
2643 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2644 {
2645 struct net *net = sock_net(skb->sk);
2646 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2647 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2648 struct tipc_net *tn = net_generic(net, tipc_net_id);
2649 struct tipc_node *node;
2650 struct tipc_nl_msg msg;
2651 u32 prev_node = cb->args[0];
2652 u32 prev_link = cb->args[1];
2653 int done = cb->args[2];
2654 bool bc_link = cb->args[3];
2655 int err;
2656
2657 if (done)
2658 return 0;
2659
2660 if (!prev_node) {
2661 /* Check if broadcast-receiver links dumping is needed */
2662 if (attrs && attrs[TIPC_NLA_LINK]) {
2663 err = nla_parse_nested_deprecated(link,
2664 TIPC_NLA_LINK_MAX,
2665 attrs[TIPC_NLA_LINK],
2666 tipc_nl_link_policy,
2667 NULL);
2668 if (unlikely(err))
2669 return err;
2670 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2671 return -EINVAL;
2672 bc_link = true;
2673 }
2674 }
2675
2676 msg.skb = skb;
2677 msg.portid = NETLINK_CB(cb->skb).portid;
2678 msg.seq = cb->nlh->nlmsg_seq;
2679
2680 rcu_read_lock();
2681 if (prev_node) {
2682 node = tipc_node_find(net, prev_node);
2683 if (!node) {
2684 /* We never set seq or call nl_dump_check_consistent()
2685 * this means that setting prev_seq here will cause the
2686 * consistence check to fail in the netlink callback
2687 * handler. Resulting in the last NLMSG_DONE message
2688 * having the NLM_F_DUMP_INTR flag set.
2689 */
2690 cb->prev_seq = 1;
2691 goto out;
2692 }
2693 tipc_node_put(node);
2694
2695 list_for_each_entry_continue_rcu(node, &tn->node_list,
2696 list) {
2697 tipc_node_read_lock(node);
2698 err = __tipc_nl_add_node_links(net, &msg, node,
2699 &prev_link, bc_link);
2700 tipc_node_read_unlock(node);
2701 if (err)
2702 goto out;
2703
2704 prev_node = node->addr;
2705 }
2706 } else {
2707 err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2708 if (err)
2709 goto out;
2710
2711 list_for_each_entry_rcu(node, &tn->node_list, list) {
2712 tipc_node_read_lock(node);
2713 err = __tipc_nl_add_node_links(net, &msg, node,
2714 &prev_link, bc_link);
2715 tipc_node_read_unlock(node);
2716 if (err)
2717 goto out;
2718
2719 prev_node = node->addr;
2720 }
2721 }
2722 done = 1;
2723 out:
2724 rcu_read_unlock();
2725
2726 cb->args[0] = prev_node;
2727 cb->args[1] = prev_link;
2728 cb->args[2] = done;
2729 cb->args[3] = bc_link;
2730
2731 return skb->len;
2732 }
2733
2734 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2735 {
2736 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2737 struct net *net = sock_net(skb->sk);
2738 int err;
2739
2740 if (!info->attrs[TIPC_NLA_MON])
2741 return -EINVAL;
2742
2743 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2744 info->attrs[TIPC_NLA_MON],
2745 tipc_nl_monitor_policy,
2746 info->extack);
2747 if (err)
2748 return err;
2749
2750 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2751 u32 val;
2752
2753 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2754 err = tipc_nl_monitor_set_threshold(net, val);
2755 if (err)
2756 return err;
2757 }
2758
2759 return 0;
2760 }
2761
2762 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2763 {
2764 struct nlattr *attrs;
2765 void *hdr;
2766 u32 val;
2767
2768 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2769 0, TIPC_NL_MON_GET);
2770 if (!hdr)
2771 return -EMSGSIZE;
2772
2773 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2774 if (!attrs)
2775 goto msg_full;
2776
2777 val = tipc_nl_monitor_get_threshold(net);
2778
2779 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2780 goto attr_msg_full;
2781
2782 nla_nest_end(msg->skb, attrs);
2783 genlmsg_end(msg->skb, hdr);
2784
2785 return 0;
2786
2787 attr_msg_full:
2788 nla_nest_cancel(msg->skb, attrs);
2789 msg_full:
2790 genlmsg_cancel(msg->skb, hdr);
2791
2792 return -EMSGSIZE;
2793 }
2794
2795 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2796 {
2797 struct net *net = sock_net(skb->sk);
2798 struct tipc_nl_msg msg;
2799 int err;
2800
2801 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2802 if (!msg.skb)
2803 return -ENOMEM;
2804 msg.portid = info->snd_portid;
2805 msg.seq = info->snd_seq;
2806
2807 err = __tipc_nl_add_monitor_prop(net, &msg);
2808 if (err) {
2809 nlmsg_free(msg.skb);
2810 return err;
2811 }
2812
2813 return genlmsg_reply(msg.skb, info);
2814 }
2815
2816 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2817 {
2818 struct net *net = sock_net(skb->sk);
2819 u32 prev_bearer = cb->args[0];
2820 struct tipc_nl_msg msg;
2821 int bearer_id;
2822 int err;
2823
2824 if (prev_bearer == MAX_BEARERS)
2825 return 0;
2826
2827 msg.skb = skb;
2828 msg.portid = NETLINK_CB(cb->skb).portid;
2829 msg.seq = cb->nlh->nlmsg_seq;
2830
2831 rtnl_lock();
2832 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2833 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2834 if (err)
2835 break;
2836 }
2837 rtnl_unlock();
2838 cb->args[0] = bearer_id;
2839
2840 return skb->len;
2841 }
2842
2843 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2844 struct netlink_callback *cb)
2845 {
2846 struct net *net = sock_net(skb->sk);
2847 u32 prev_node = cb->args[1];
2848 u32 bearer_id = cb->args[2];
2849 int done = cb->args[0];
2850 struct tipc_nl_msg msg;
2851 int err;
2852
2853 if (!prev_node) {
2854 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2855 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2856
2857 if (!attrs[TIPC_NLA_MON])
2858 return -EINVAL;
2859
2860 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2861 attrs[TIPC_NLA_MON],
2862 tipc_nl_monitor_policy,
2863 NULL);
2864 if (err)
2865 return err;
2866
2867 if (!mon[TIPC_NLA_MON_REF])
2868 return -EINVAL;
2869
2870 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2871
2872 if (bearer_id >= MAX_BEARERS)
2873 return -EINVAL;
2874 }
2875
2876 if (done)
2877 return 0;
2878
2879 msg.skb = skb;
2880 msg.portid = NETLINK_CB(cb->skb).portid;
2881 msg.seq = cb->nlh->nlmsg_seq;
2882
2883 rtnl_lock();
2884 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2885 if (!err)
2886 done = 1;
2887
2888 rtnl_unlock();
2889 cb->args[0] = done;
2890 cb->args[1] = prev_node;
2891 cb->args[2] = bearer_id;
2892
2893 return skb->len;
2894 }
2895
2896 #ifdef CONFIG_TIPC_CRYPTO
2897 static int tipc_nl_retrieve_key(struct nlattr **attrs,
2898 struct tipc_aead_key **pkey)
2899 {
2900 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2901 struct tipc_aead_key *key;
2902
2903 if (!attr)
2904 return -ENODATA;
2905
2906 if (nla_len(attr) < sizeof(*key))
2907 return -EINVAL;
2908 key = (struct tipc_aead_key *)nla_data(attr);
2909 if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
2910 nla_len(attr) < tipc_aead_key_size(key))
2911 return -EINVAL;
2912
2913 *pkey = key;
2914 return 0;
2915 }
2916
2917 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2918 {
2919 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2920
2921 if (!attr)
2922 return -ENODATA;
2923
2924 if (nla_len(attr) < TIPC_NODEID_LEN)
2925 return -EINVAL;
2926
2927 *node_id = (u8 *)nla_data(attr);
2928 return 0;
2929 }
2930
2931 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
2932 {
2933 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
2934
2935 if (!attr)
2936 return -ENODATA;
2937
2938 *intv = nla_get_u32(attr);
2939 return 0;
2940 }
2941
2942 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2943 {
2944 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2945 struct net *net = sock_net(skb->sk);
2946 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
2947 struct tipc_node *n = NULL;
2948 struct tipc_aead_key *ukey;
2949 bool rekeying = true, master_key = false;
2950 u8 *id, *own_id, mode;
2951 u32 intv = 0;
2952 int rc = 0;
2953
2954 if (!info->attrs[TIPC_NLA_NODE])
2955 return -EINVAL;
2956
2957 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2958 info->attrs[TIPC_NLA_NODE],
2959 tipc_nl_node_policy, info->extack);
2960 if (rc)
2961 return rc;
2962
2963 own_id = tipc_own_id(net);
2964 if (!own_id) {
2965 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
2966 return -EPERM;
2967 }
2968
2969 rc = tipc_nl_retrieve_rekeying(attrs, &intv);
2970 if (rc == -ENODATA)
2971 rekeying = false;
2972
2973 rc = tipc_nl_retrieve_key(attrs, &ukey);
2974 if (rc == -ENODATA && rekeying)
2975 goto rekeying;
2976 else if (rc)
2977 return rc;
2978
2979 rc = tipc_aead_key_validate(ukey, info);
2980 if (rc)
2981 return rc;
2982
2983 rc = tipc_nl_retrieve_nodeid(attrs, &id);
2984 switch (rc) {
2985 case -ENODATA:
2986 mode = CLUSTER_KEY;
2987 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
2988 break;
2989 case 0:
2990 mode = PER_NODE_KEY;
2991 if (memcmp(id, own_id, NODE_ID_LEN)) {
2992 n = tipc_node_find_by_id(net, id) ?:
2993 tipc_node_create(net, 0, id, 0xffffu, 0, true);
2994 if (unlikely(!n))
2995 return -ENOMEM;
2996 c = n->crypto_rx;
2997 }
2998 break;
2999 default:
3000 return rc;
3001 }
3002
3003 /* Initiate the TX/RX key */
3004 rc = tipc_crypto_key_init(c, ukey, mode, master_key);
3005 if (n)
3006 tipc_node_put(n);
3007
3008 if (unlikely(rc < 0)) {
3009 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
3010 return rc;
3011 } else if (c == tx) {
3012 /* Distribute TX key but not master one */
3013 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
3014 GENL_SET_ERR_MSG(info, "failed to replicate new key");
3015 rekeying:
3016 /* Schedule TX rekeying if needed */
3017 tipc_crypto_rekeying_sched(tx, rekeying, intv);
3018 }
3019
3020 return 0;
3021 }
3022
3023 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
3024 {
3025 int err;
3026
3027 rtnl_lock();
3028 err = __tipc_nl_node_set_key(skb, info);
3029 rtnl_unlock();
3030
3031 return err;
3032 }
3033
3034 static int __tipc_nl_node_flush_key(struct sk_buff *skb,
3035 struct genl_info *info)
3036 {
3037 struct net *net = sock_net(skb->sk);
3038 struct tipc_net *tn = tipc_net(net);
3039 struct tipc_node *n;
3040
3041 tipc_crypto_key_flush(tn->crypto_tx);
3042 rcu_read_lock();
3043 list_for_each_entry_rcu(n, &tn->node_list, list)
3044 tipc_crypto_key_flush(n->crypto_rx);
3045 rcu_read_unlock();
3046
3047 return 0;
3048 }
3049
3050 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
3051 {
3052 int err;
3053
3054 rtnl_lock();
3055 err = __tipc_nl_node_flush_key(skb, info);
3056 rtnl_unlock();
3057
3058 return err;
3059 }
3060 #endif
3061
3062 /**
3063 * tipc_node_dump - dump TIPC node data
3064 * @n: tipc node to be dumped
3065 * @more: dump more?
3066 * - false: dump only tipc node data
3067 * - true: dump node link data as well
3068 * @buf: returned buffer of dump data in format
3069 */
3070 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
3071 {
3072 int i = 0;
3073 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
3074
3075 if (!n) {
3076 i += scnprintf(buf, sz, "node data: (null)\n");
3077 return i;
3078 }
3079
3080 i += scnprintf(buf, sz, "node data: %x", n->addr);
3081 i += scnprintf(buf + i, sz - i, " %x", n->state);
3082 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
3083 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
3084 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3085 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3086 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3087 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3088 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3089 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3090 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3091
3092 if (!more)
3093 return i;
3094
3095 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3096 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3097 i += scnprintf(buf + i, sz - i, " media: ");
3098 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3099 i += scnprintf(buf + i, sz - i, "\n");
3100 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3101 i += scnprintf(buf + i, sz - i, " inputq: ");
3102 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3103
3104 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3105 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3106 i += scnprintf(buf + i, sz - i, " media: ");
3107 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3108 i += scnprintf(buf + i, sz - i, "\n");
3109 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3110 i += scnprintf(buf + i, sz - i, " inputq: ");
3111 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3112
3113 i += scnprintf(buf + i, sz - i, "bclink:\n ");
3114 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3115
3116 return i;
3117 }
3118
3119 void tipc_node_pre_cleanup_net(struct net *exit_net)
3120 {
3121 struct tipc_node *n;
3122 struct tipc_net *tn;
3123 struct net *tmp;
3124
3125 rcu_read_lock();
3126 for_each_net_rcu(tmp) {
3127 if (tmp == exit_net)
3128 continue;
3129 tn = tipc_net(tmp);
3130 if (!tn)
3131 continue;
3132 spin_lock_bh(&tn->node_list_lock);
3133 list_for_each_entry_rcu(n, &tn->node_list, list) {
3134 if (!n->peer_net)
3135 continue;
3136 if (n->peer_net != exit_net)
3137 continue;
3138 tipc_node_write_lock(n);
3139 n->peer_net = NULL;
3140 n->peer_hash_mix = 0;
3141 tipc_node_write_unlock_fast(n);
3142 break;
3143 }
3144 spin_unlock_bh(&tn->node_list_lock);
3145 }
3146 rcu_read_unlock();
3147 }