]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/tipc/node.c
vsock/virtio: read the negotiated features before using VQs
[mirror_ubuntu-jammy-kernel.git] / net / tipc / node.c
1 /*
2 * net/tipc/node.c: TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "link.h"
39 #include "node.h"
40 #include "name_distr.h"
41 #include "socket.h"
42 #include "bcast.h"
43 #include "monitor.h"
44 #include "discover.h"
45 #include "netlink.h"
46 #include "trace.h"
47 #include "crypto.h"
48
49 #define INVALID_NODE_SIG 0x10000
50 #define NODE_CLEANUP_AFTER 300000
51
52 /* Flags used to take different actions according to flag type
53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 */
57 enum {
58 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
59 TIPC_NOTIFY_NODE_UP = (1 << 4),
60 TIPC_NOTIFY_LINK_UP = (1 << 6),
61 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
62 };
63
64 struct tipc_link_entry {
65 struct tipc_link *link;
66 spinlock_t lock; /* per link */
67 u32 mtu;
68 struct sk_buff_head inputq;
69 struct tipc_media_addr maddr;
70 };
71
72 struct tipc_bclink_entry {
73 struct tipc_link *link;
74 struct sk_buff_head inputq1;
75 struct sk_buff_head arrvq;
76 struct sk_buff_head inputq2;
77 struct sk_buff_head namedq;
78 u16 named_rcv_nxt;
79 bool named_open;
80 };
81
82 /**
83 * struct tipc_node - TIPC node structure
84 * @addr: network address of node
85 * @kref: reference counter to node object
86 * @lock: rwlock governing access to structure
87 * @net: the applicable net namespace
88 * @hash: links to adjacent nodes in unsorted hash chain
89 * @inputq: pointer to input queue containing messages for msg event
90 * @namedq: pointer to name table input queue with name table messages
91 * @active_links: bearer ids of active links, used as index into links[] array
92 * @links: array containing references to all links to node
93 * @bc_entry: broadcast link entry
94 * @action_flags: bit mask of different types of node actions
95 * @state: connectivity state vs peer node
96 * @preliminary: a preliminary node or not
97 * @failover_sent: failover sent or not
98 * @sync_point: sequence number where synch/failover is finished
99 * @list: links to adjacent nodes in sorted list of cluster's nodes
100 * @working_links: number of working links to node (both active and standby)
101 * @link_cnt: number of links to node
102 * @capabilities: bitmap, indicating peer node's functional capabilities
103 * @signature: node instance identifier
104 * @link_id: local and remote bearer ids of changing link, if any
105 * @peer_id: 128-bit ID of peer
106 * @peer_id_string: ID string of peer
107 * @publ_list: list of publications
108 * @conn_sks: list of connections (FIXME)
109 * @timer: node's keepalive timer
110 * @keepalive_intv: keepalive interval in milliseconds
111 * @rcu: rcu struct for tipc_node
112 * @delete_at: indicates the time for deleting a down node
113 * @peer_net: peer's net namespace
114 * @peer_hash_mix: hash for this peer (FIXME)
115 * @crypto_rx: RX crypto handler
116 */
117 struct tipc_node {
118 u32 addr;
119 struct kref kref;
120 rwlock_t lock;
121 struct net *net;
122 struct hlist_node hash;
123 int active_links[2];
124 struct tipc_link_entry links[MAX_BEARERS];
125 struct tipc_bclink_entry bc_entry;
126 int action_flags;
127 struct list_head list;
128 int state;
129 bool preliminary;
130 bool failover_sent;
131 u16 sync_point;
132 int link_cnt;
133 u16 working_links;
134 u16 capabilities;
135 u32 signature;
136 u32 link_id;
137 u8 peer_id[16];
138 char peer_id_string[NODE_ID_STR_LEN];
139 struct list_head publ_list;
140 struct list_head conn_sks;
141 unsigned long keepalive_intv;
142 struct timer_list timer;
143 struct rcu_head rcu;
144 unsigned long delete_at;
145 struct net *peer_net;
146 u32 peer_hash_mix;
147 #ifdef CONFIG_TIPC_CRYPTO
148 struct tipc_crypto *crypto_rx;
149 #endif
150 };
151
152 /* Node FSM states and events:
153 */
154 enum {
155 SELF_DOWN_PEER_DOWN = 0xdd,
156 SELF_UP_PEER_UP = 0xaa,
157 SELF_DOWN_PEER_LEAVING = 0xd1,
158 SELF_UP_PEER_COMING = 0xac,
159 SELF_COMING_PEER_UP = 0xca,
160 SELF_LEAVING_PEER_DOWN = 0x1d,
161 NODE_FAILINGOVER = 0xf0,
162 NODE_SYNCHING = 0xcc
163 };
164
165 enum {
166 SELF_ESTABL_CONTACT_EVT = 0xece,
167 SELF_LOST_CONTACT_EVT = 0x1ce,
168 PEER_ESTABL_CONTACT_EVT = 0x9ece,
169 PEER_LOST_CONTACT_EVT = 0x91ce,
170 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
171 NODE_FAILOVER_END_EVT = 0xfee,
172 NODE_SYNCH_BEGIN_EVT = 0xcbe,
173 NODE_SYNCH_END_EVT = 0xcee
174 };
175
176 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
177 struct sk_buff_head *xmitq,
178 struct tipc_media_addr **maddr);
179 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
180 bool delete);
181 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
182 static void tipc_node_delete(struct tipc_node *node);
183 static void tipc_node_timeout(struct timer_list *t);
184 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
185 static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
186 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
187 static bool node_is_up(struct tipc_node *n);
188 static void tipc_node_delete_from_list(struct tipc_node *node);
189
190 struct tipc_sock_conn {
191 u32 port;
192 u32 peer_port;
193 u32 peer_node;
194 struct list_head list;
195 };
196
197 static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
198 {
199 int bearer_id = n->active_links[sel & 1];
200
201 if (unlikely(bearer_id == INVALID_BEARER_ID))
202 return NULL;
203
204 return n->links[bearer_id].link;
205 }
206
207 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
208 {
209 struct tipc_node *n;
210 int bearer_id;
211 unsigned int mtu = MAX_MSG_SIZE;
212
213 n = tipc_node_find(net, addr);
214 if (unlikely(!n))
215 return mtu;
216
217 /* Allow MAX_MSG_SIZE when building connection oriented message
218 * if they are in the same core network
219 */
220 if (n->peer_net && connected) {
221 tipc_node_put(n);
222 return mtu;
223 }
224
225 bearer_id = n->active_links[sel & 1];
226 if (likely(bearer_id != INVALID_BEARER_ID))
227 mtu = n->links[bearer_id].mtu;
228 tipc_node_put(n);
229 return mtu;
230 }
231
232 bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
233 {
234 u8 *own_id = tipc_own_id(net);
235 struct tipc_node *n;
236
237 if (!own_id)
238 return true;
239
240 if (addr == tipc_own_addr(net)) {
241 memcpy(id, own_id, TIPC_NODEID_LEN);
242 return true;
243 }
244 n = tipc_node_find(net, addr);
245 if (!n)
246 return false;
247
248 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
249 tipc_node_put(n);
250 return true;
251 }
252
253 u16 tipc_node_get_capabilities(struct net *net, u32 addr)
254 {
255 struct tipc_node *n;
256 u16 caps;
257
258 n = tipc_node_find(net, addr);
259 if (unlikely(!n))
260 return TIPC_NODE_CAPABILITIES;
261 caps = n->capabilities;
262 tipc_node_put(n);
263 return caps;
264 }
265
266 u32 tipc_node_get_addr(struct tipc_node *node)
267 {
268 return (node) ? node->addr : 0;
269 }
270
271 char *tipc_node_get_id_str(struct tipc_node *node)
272 {
273 return node->peer_id_string;
274 }
275
276 #ifdef CONFIG_TIPC_CRYPTO
277 /**
278 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
279 * @__n: target tipc_node
280 * Note: node ref counter must be held first!
281 */
282 struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
283 {
284 return (__n) ? __n->crypto_rx : NULL;
285 }
286
287 struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
288 {
289 return container_of(pos, struct tipc_node, list)->crypto_rx;
290 }
291
292 struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
293 {
294 struct tipc_node *n;
295
296 n = tipc_node_find(net, addr);
297 return (n) ? n->crypto_rx : NULL;
298 }
299 #endif
300
301 static void tipc_node_free(struct rcu_head *rp)
302 {
303 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
304
305 #ifdef CONFIG_TIPC_CRYPTO
306 tipc_crypto_stop(&n->crypto_rx);
307 #endif
308 kfree(n);
309 }
310
311 static void tipc_node_kref_release(struct kref *kref)
312 {
313 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
314
315 kfree(n->bc_entry.link);
316 call_rcu(&n->rcu, tipc_node_free);
317 }
318
319 void tipc_node_put(struct tipc_node *node)
320 {
321 kref_put(&node->kref, tipc_node_kref_release);
322 }
323
324 void tipc_node_get(struct tipc_node *node)
325 {
326 kref_get(&node->kref);
327 }
328
329 /*
330 * tipc_node_find - locate specified node object, if it exists
331 */
332 static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
333 {
334 struct tipc_net *tn = tipc_net(net);
335 struct tipc_node *node;
336 unsigned int thash = tipc_hashfn(addr);
337
338 rcu_read_lock();
339 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
340 if (node->addr != addr || node->preliminary)
341 continue;
342 if (!kref_get_unless_zero(&node->kref))
343 node = NULL;
344 break;
345 }
346 rcu_read_unlock();
347 return node;
348 }
349
350 /* tipc_node_find_by_id - locate specified node object by its 128-bit id
351 * Note: this function is called only when a discovery request failed
352 * to find the node by its 32-bit id, and is not time critical
353 */
354 static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
355 {
356 struct tipc_net *tn = tipc_net(net);
357 struct tipc_node *n;
358 bool found = false;
359
360 rcu_read_lock();
361 list_for_each_entry_rcu(n, &tn->node_list, list) {
362 read_lock_bh(&n->lock);
363 if (!memcmp(id, n->peer_id, 16) &&
364 kref_get_unless_zero(&n->kref))
365 found = true;
366 read_unlock_bh(&n->lock);
367 if (found)
368 break;
369 }
370 rcu_read_unlock();
371 return found ? n : NULL;
372 }
373
374 static void tipc_node_read_lock(struct tipc_node *n)
375 __acquires(n->lock)
376 {
377 read_lock_bh(&n->lock);
378 }
379
380 static void tipc_node_read_unlock(struct tipc_node *n)
381 __releases(n->lock)
382 {
383 read_unlock_bh(&n->lock);
384 }
385
386 static void tipc_node_write_lock(struct tipc_node *n)
387 __acquires(n->lock)
388 {
389 write_lock_bh(&n->lock);
390 }
391
392 static void tipc_node_write_unlock_fast(struct tipc_node *n)
393 __releases(n->lock)
394 {
395 write_unlock_bh(&n->lock);
396 }
397
398 static void tipc_node_write_unlock(struct tipc_node *n)
399 __releases(n->lock)
400 {
401 struct tipc_socket_addr sk;
402 struct net *net = n->net;
403 u32 flags = n->action_flags;
404 struct list_head *publ_list;
405 struct tipc_uaddr ua;
406 u32 bearer_id, node;
407
408 if (likely(!flags)) {
409 write_unlock_bh(&n->lock);
410 return;
411 }
412
413 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
414 TIPC_LINK_STATE, n->addr, n->addr);
415 sk.ref = n->link_id;
416 sk.node = tipc_own_addr(net);
417 node = n->addr;
418 bearer_id = n->link_id & 0xffff;
419 publ_list = &n->publ_list;
420
421 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
422 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
423
424 write_unlock_bh(&n->lock);
425
426 if (flags & TIPC_NOTIFY_NODE_DOWN)
427 tipc_publ_notify(net, publ_list, node, n->capabilities);
428
429 if (flags & TIPC_NOTIFY_NODE_UP)
430 tipc_named_node_up(net, node, n->capabilities);
431
432 if (flags & TIPC_NOTIFY_LINK_UP) {
433 tipc_mon_peer_up(net, node, bearer_id);
434 tipc_nametbl_publish(net, &ua, &sk, sk.ref);
435 }
436 if (flags & TIPC_NOTIFY_LINK_DOWN) {
437 tipc_mon_peer_down(net, node, bearer_id);
438 tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
439 }
440 }
441
442 static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
443 {
444 int net_id = tipc_netid(n->net);
445 struct tipc_net *tn_peer;
446 struct net *tmp;
447 u32 hash_chk;
448
449 if (n->peer_net)
450 return;
451
452 for_each_net_rcu(tmp) {
453 tn_peer = tipc_net(tmp);
454 if (!tn_peer)
455 continue;
456 /* Integrity checking whether node exists in namespace or not */
457 if (tn_peer->net_id != net_id)
458 continue;
459 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
460 continue;
461 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
462 if (hash_mixes ^ hash_chk)
463 continue;
464 n->peer_net = tmp;
465 n->peer_hash_mix = hash_mixes;
466 break;
467 }
468 }
469
470 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
471 u16 capabilities, u32 hash_mixes,
472 bool preliminary)
473 {
474 struct tipc_net *tn = net_generic(net, tipc_net_id);
475 struct tipc_node *n, *temp_node;
476 struct tipc_link *l;
477 unsigned long intv;
478 int bearer_id;
479 int i;
480
481 spin_lock_bh(&tn->node_list_lock);
482 n = tipc_node_find(net, addr) ?:
483 tipc_node_find_by_id(net, peer_id);
484 if (n) {
485 if (!n->preliminary)
486 goto update;
487 if (preliminary)
488 goto exit;
489 /* A preliminary node becomes "real" now, refresh its data */
490 tipc_node_write_lock(n);
491 n->preliminary = false;
492 n->addr = addr;
493 hlist_del_rcu(&n->hash);
494 hlist_add_head_rcu(&n->hash,
495 &tn->node_htable[tipc_hashfn(addr)]);
496 list_del_rcu(&n->list);
497 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
498 if (n->addr < temp_node->addr)
499 break;
500 }
501 list_add_tail_rcu(&n->list, &temp_node->list);
502 tipc_node_write_unlock_fast(n);
503
504 update:
505 if (n->peer_hash_mix ^ hash_mixes)
506 tipc_node_assign_peer_net(n, hash_mixes);
507 if (n->capabilities == capabilities)
508 goto exit;
509 /* Same node may come back with new capabilities */
510 tipc_node_write_lock(n);
511 n->capabilities = capabilities;
512 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
513 l = n->links[bearer_id].link;
514 if (l)
515 tipc_link_update_caps(l, capabilities);
516 }
517 tipc_node_write_unlock_fast(n);
518
519 /* Calculate cluster capabilities */
520 tn->capabilities = TIPC_NODE_CAPABILITIES;
521 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
522 tn->capabilities &= temp_node->capabilities;
523 }
524
525 tipc_bcast_toggle_rcast(net,
526 (tn->capabilities & TIPC_BCAST_RCAST));
527
528 goto exit;
529 }
530 n = kzalloc(sizeof(*n), GFP_ATOMIC);
531 if (!n) {
532 pr_warn("Node creation failed, no memory\n");
533 goto exit;
534 }
535 tipc_nodeid2string(n->peer_id_string, peer_id);
536 #ifdef CONFIG_TIPC_CRYPTO
537 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
538 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
539 kfree(n);
540 n = NULL;
541 goto exit;
542 }
543 #endif
544 n->addr = addr;
545 n->preliminary = preliminary;
546 memcpy(&n->peer_id, peer_id, 16);
547 n->net = net;
548 n->peer_net = NULL;
549 n->peer_hash_mix = 0;
550 /* Assign kernel local namespace if exists */
551 tipc_node_assign_peer_net(n, hash_mixes);
552 n->capabilities = capabilities;
553 kref_init(&n->kref);
554 rwlock_init(&n->lock);
555 INIT_HLIST_NODE(&n->hash);
556 INIT_LIST_HEAD(&n->list);
557 INIT_LIST_HEAD(&n->publ_list);
558 INIT_LIST_HEAD(&n->conn_sks);
559 skb_queue_head_init(&n->bc_entry.namedq);
560 skb_queue_head_init(&n->bc_entry.inputq1);
561 __skb_queue_head_init(&n->bc_entry.arrvq);
562 skb_queue_head_init(&n->bc_entry.inputq2);
563 for (i = 0; i < MAX_BEARERS; i++)
564 spin_lock_init(&n->links[i].lock);
565 n->state = SELF_DOWN_PEER_LEAVING;
566 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
567 n->signature = INVALID_NODE_SIG;
568 n->active_links[0] = INVALID_BEARER_ID;
569 n->active_links[1] = INVALID_BEARER_ID;
570 n->bc_entry.link = NULL;
571 tipc_node_get(n);
572 timer_setup(&n->timer, tipc_node_timeout, 0);
573 /* Start a slow timer anyway, crypto needs it */
574 n->keepalive_intv = 10000;
575 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
576 if (!mod_timer(&n->timer, intv))
577 tipc_node_get(n);
578 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
579 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
580 if (n->addr < temp_node->addr)
581 break;
582 }
583 list_add_tail_rcu(&n->list, &temp_node->list);
584 /* Calculate cluster capabilities */
585 tn->capabilities = TIPC_NODE_CAPABILITIES;
586 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
587 tn->capabilities &= temp_node->capabilities;
588 }
589 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
590 trace_tipc_node_create(n, true, " ");
591 exit:
592 spin_unlock_bh(&tn->node_list_lock);
593 return n;
594 }
595
596 static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
597 {
598 unsigned long tol = tipc_link_tolerance(l);
599 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
600
601 /* Link with lowest tolerance determines timer interval */
602 if (intv < n->keepalive_intv)
603 n->keepalive_intv = intv;
604
605 /* Ensure link's abort limit corresponds to current tolerance */
606 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
607 }
608
609 static void tipc_node_delete_from_list(struct tipc_node *node)
610 {
611 #ifdef CONFIG_TIPC_CRYPTO
612 tipc_crypto_key_flush(node->crypto_rx);
613 #endif
614 list_del_rcu(&node->list);
615 hlist_del_rcu(&node->hash);
616 tipc_node_put(node);
617 }
618
619 static void tipc_node_delete(struct tipc_node *node)
620 {
621 trace_tipc_node_delete(node, true, " ");
622 tipc_node_delete_from_list(node);
623
624 del_timer_sync(&node->timer);
625 tipc_node_put(node);
626 }
627
628 void tipc_node_stop(struct net *net)
629 {
630 struct tipc_net *tn = tipc_net(net);
631 struct tipc_node *node, *t_node;
632
633 spin_lock_bh(&tn->node_list_lock);
634 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
635 tipc_node_delete(node);
636 spin_unlock_bh(&tn->node_list_lock);
637 }
638
639 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
640 {
641 struct tipc_node *n;
642
643 if (in_own_node(net, addr))
644 return;
645
646 n = tipc_node_find(net, addr);
647 if (!n) {
648 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
649 return;
650 }
651 tipc_node_write_lock(n);
652 list_add_tail(subscr, &n->publ_list);
653 tipc_node_write_unlock_fast(n);
654 tipc_node_put(n);
655 }
656
657 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
658 {
659 struct tipc_node *n;
660
661 if (in_own_node(net, addr))
662 return;
663
664 n = tipc_node_find(net, addr);
665 if (!n) {
666 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
667 return;
668 }
669 tipc_node_write_lock(n);
670 list_del_init(subscr);
671 tipc_node_write_unlock_fast(n);
672 tipc_node_put(n);
673 }
674
675 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
676 {
677 struct tipc_node *node;
678 struct tipc_sock_conn *conn;
679 int err = 0;
680
681 if (in_own_node(net, dnode))
682 return 0;
683
684 node = tipc_node_find(net, dnode);
685 if (!node) {
686 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
687 return -EHOSTUNREACH;
688 }
689 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
690 if (!conn) {
691 err = -EHOSTUNREACH;
692 goto exit;
693 }
694 conn->peer_node = dnode;
695 conn->port = port;
696 conn->peer_port = peer_port;
697
698 tipc_node_write_lock(node);
699 list_add_tail(&conn->list, &node->conn_sks);
700 tipc_node_write_unlock(node);
701 exit:
702 tipc_node_put(node);
703 return err;
704 }
705
706 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
707 {
708 struct tipc_node *node;
709 struct tipc_sock_conn *conn, *safe;
710
711 if (in_own_node(net, dnode))
712 return;
713
714 node = tipc_node_find(net, dnode);
715 if (!node)
716 return;
717
718 tipc_node_write_lock(node);
719 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
720 if (port != conn->port)
721 continue;
722 list_del(&conn->list);
723 kfree(conn);
724 }
725 tipc_node_write_unlock(node);
726 tipc_node_put(node);
727 }
728
729 static void tipc_node_clear_links(struct tipc_node *node)
730 {
731 int i;
732
733 for (i = 0; i < MAX_BEARERS; i++) {
734 struct tipc_link_entry *le = &node->links[i];
735
736 if (le->link) {
737 kfree(le->link);
738 le->link = NULL;
739 node->link_cnt--;
740 }
741 }
742 }
743
744 /* tipc_node_cleanup - delete nodes that does not
745 * have active links for NODE_CLEANUP_AFTER time
746 */
747 static bool tipc_node_cleanup(struct tipc_node *peer)
748 {
749 struct tipc_node *temp_node;
750 struct tipc_net *tn = tipc_net(peer->net);
751 bool deleted = false;
752
753 /* If lock held by tipc_node_stop() the node will be deleted anyway */
754 if (!spin_trylock_bh(&tn->node_list_lock))
755 return false;
756
757 tipc_node_write_lock(peer);
758
759 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
760 tipc_node_clear_links(peer);
761 tipc_node_delete_from_list(peer);
762 deleted = true;
763 }
764 tipc_node_write_unlock(peer);
765
766 if (!deleted) {
767 spin_unlock_bh(&tn->node_list_lock);
768 return deleted;
769 }
770
771 /* Calculate cluster capabilities */
772 tn->capabilities = TIPC_NODE_CAPABILITIES;
773 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
774 tn->capabilities &= temp_node->capabilities;
775 }
776 tipc_bcast_toggle_rcast(peer->net,
777 (tn->capabilities & TIPC_BCAST_RCAST));
778 spin_unlock_bh(&tn->node_list_lock);
779 return deleted;
780 }
781
782 /* tipc_node_timeout - handle expiration of node timer
783 */
784 static void tipc_node_timeout(struct timer_list *t)
785 {
786 struct tipc_node *n = from_timer(n, t, timer);
787 struct tipc_link_entry *le;
788 struct sk_buff_head xmitq;
789 int remains = n->link_cnt;
790 int bearer_id;
791 int rc = 0;
792
793 trace_tipc_node_timeout(n, false, " ");
794 if (!node_is_up(n) && tipc_node_cleanup(n)) {
795 /*Removing the reference of Timer*/
796 tipc_node_put(n);
797 return;
798 }
799
800 #ifdef CONFIG_TIPC_CRYPTO
801 /* Take any crypto key related actions first */
802 tipc_crypto_timeout(n->crypto_rx);
803 #endif
804 __skb_queue_head_init(&xmitq);
805
806 /* Initial node interval to value larger (10 seconds), then it will be
807 * recalculated with link lowest tolerance
808 */
809 tipc_node_read_lock(n);
810 n->keepalive_intv = 10000;
811 tipc_node_read_unlock(n);
812 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
813 tipc_node_read_lock(n);
814 le = &n->links[bearer_id];
815 if (le->link) {
816 spin_lock_bh(&le->lock);
817 /* Link tolerance may change asynchronously: */
818 tipc_node_calculate_timer(n, le->link);
819 rc = tipc_link_timeout(le->link, &xmitq);
820 spin_unlock_bh(&le->lock);
821 remains--;
822 }
823 tipc_node_read_unlock(n);
824 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
825 if (rc & TIPC_LINK_DOWN_EVT)
826 tipc_node_link_down(n, bearer_id, false);
827 }
828 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
829 }
830
831 /**
832 * __tipc_node_link_up - handle addition of link
833 * @n: target tipc_node
834 * @bearer_id: id of the bearer
835 * @xmitq: queue for messages to be xmited on
836 * Node lock must be held by caller
837 * Link becomes active (alone or shared) or standby, depending on its priority.
838 */
839 static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
840 struct sk_buff_head *xmitq)
841 {
842 int *slot0 = &n->active_links[0];
843 int *slot1 = &n->active_links[1];
844 struct tipc_link *ol = node_active_link(n, 0);
845 struct tipc_link *nl = n->links[bearer_id].link;
846
847 if (!nl || tipc_link_is_up(nl))
848 return;
849
850 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
851 if (!tipc_link_is_up(nl))
852 return;
853
854 n->working_links++;
855 n->action_flags |= TIPC_NOTIFY_LINK_UP;
856 n->link_id = tipc_link_id(nl);
857
858 /* Leave room for tunnel header when returning 'mtu' to users: */
859 n->links[bearer_id].mtu = tipc_link_mss(nl);
860
861 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
862 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
863
864 pr_debug("Established link <%s> on network plane %c\n",
865 tipc_link_name(nl), tipc_link_plane(nl));
866 trace_tipc_node_link_up(n, true, " ");
867
868 /* Ensure that a STATE message goes first */
869 tipc_link_build_state_msg(nl, xmitq);
870
871 /* First link? => give it both slots */
872 if (!ol) {
873 *slot0 = bearer_id;
874 *slot1 = bearer_id;
875 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
876 n->action_flags |= TIPC_NOTIFY_NODE_UP;
877 tipc_link_set_active(nl, true);
878 tipc_bcast_add_peer(n->net, nl, xmitq);
879 return;
880 }
881
882 /* Second link => redistribute slots */
883 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
884 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
885 *slot0 = bearer_id;
886 *slot1 = bearer_id;
887 tipc_link_set_active(nl, true);
888 tipc_link_set_active(ol, false);
889 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
890 tipc_link_set_active(nl, true);
891 *slot1 = bearer_id;
892 } else {
893 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
894 }
895
896 /* Prepare synchronization with first link */
897 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
898 }
899
900 /**
901 * tipc_node_link_up - handle addition of link
902 * @n: target tipc_node
903 * @bearer_id: id of the bearer
904 * @xmitq: queue for messages to be xmited on
905 *
906 * Link becomes active (alone or shared) or standby, depending on its priority.
907 */
908 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
909 struct sk_buff_head *xmitq)
910 {
911 struct tipc_media_addr *maddr;
912
913 tipc_node_write_lock(n);
914 __tipc_node_link_up(n, bearer_id, xmitq);
915 maddr = &n->links[bearer_id].maddr;
916 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
917 tipc_node_write_unlock(n);
918 }
919
920 /**
921 * tipc_node_link_failover() - start failover in case "half-failover"
922 *
923 * This function is only called in a very special situation where link
924 * failover can be already started on peer node but not on this node.
925 * This can happen when e.g.::
926 *
927 * 1. Both links <1A-2A>, <1B-2B> down
928 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
929 * disturbance, wrong session, etc.)
930 * 3. Link <1B-2B> up
931 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
932 * 5. Node 2 starts failover onto link <1B-2B>
933 *
934 * ==> Node 1 does never start link/node failover!
935 *
936 * @n: tipc node structure
937 * @l: link peer endpoint failingover (- can be NULL)
938 * @tnl: tunnel link
939 * @xmitq: queue for messages to be xmited on tnl link later
940 */
941 static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
942 struct tipc_link *tnl,
943 struct sk_buff_head *xmitq)
944 {
945 /* Avoid to be "self-failover" that can never end */
946 if (!tipc_link_is_up(tnl))
947 return;
948
949 /* Don't rush, failure link may be in the process of resetting */
950 if (l && !tipc_link_is_reset(l))
951 return;
952
953 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
954 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
955
956 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
957 tipc_link_failover_prepare(l, tnl, xmitq);
958
959 if (l)
960 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
961 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
962 }
963
964 /**
965 * __tipc_node_link_down - handle loss of link
966 * @n: target tipc_node
967 * @bearer_id: id of the bearer
968 * @xmitq: queue for messages to be xmited on
969 * @maddr: output media address of the bearer
970 */
971 static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
972 struct sk_buff_head *xmitq,
973 struct tipc_media_addr **maddr)
974 {
975 struct tipc_link_entry *le = &n->links[*bearer_id];
976 int *slot0 = &n->active_links[0];
977 int *slot1 = &n->active_links[1];
978 int i, highest = 0, prio;
979 struct tipc_link *l, *_l, *tnl;
980
981 l = n->links[*bearer_id].link;
982 if (!l || tipc_link_is_reset(l))
983 return;
984
985 n->working_links--;
986 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
987 n->link_id = tipc_link_id(l);
988
989 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
990
991 pr_debug("Lost link <%s> on network plane %c\n",
992 tipc_link_name(l), tipc_link_plane(l));
993
994 /* Select new active link if any available */
995 *slot0 = INVALID_BEARER_ID;
996 *slot1 = INVALID_BEARER_ID;
997 for (i = 0; i < MAX_BEARERS; i++) {
998 _l = n->links[i].link;
999 if (!_l || !tipc_link_is_up(_l))
1000 continue;
1001 if (_l == l)
1002 continue;
1003 prio = tipc_link_prio(_l);
1004 if (prio < highest)
1005 continue;
1006 if (prio > highest) {
1007 highest = prio;
1008 *slot0 = i;
1009 *slot1 = i;
1010 continue;
1011 }
1012 *slot1 = i;
1013 }
1014
1015 if (!node_is_up(n)) {
1016 if (tipc_link_peer_is_down(l))
1017 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1018 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
1019 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
1020 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1021 tipc_link_reset(l);
1022 tipc_link_build_reset_msg(l, xmitq);
1023 *maddr = &n->links[*bearer_id].maddr;
1024 node_lost_contact(n, &le->inputq);
1025 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1026 return;
1027 }
1028 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
1029
1030 /* There is still a working link => initiate failover */
1031 *bearer_id = n->active_links[0];
1032 tnl = n->links[*bearer_id].link;
1033 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1034 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1035 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
1036 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1037 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
1038 tipc_link_reset(l);
1039 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1040 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1041 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1042 *maddr = &n->links[*bearer_id].maddr;
1043 }
1044
1045 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1046 {
1047 struct tipc_link_entry *le = &n->links[bearer_id];
1048 struct tipc_media_addr *maddr = NULL;
1049 struct tipc_link *l = le->link;
1050 int old_bearer_id = bearer_id;
1051 struct sk_buff_head xmitq;
1052
1053 if (!l)
1054 return;
1055
1056 __skb_queue_head_init(&xmitq);
1057
1058 tipc_node_write_lock(n);
1059 if (!tipc_link_is_establishing(l)) {
1060 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1061 } else {
1062 /* Defuse pending tipc_node_link_up() */
1063 tipc_link_reset(l);
1064 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1065 }
1066 if (delete) {
1067 kfree(l);
1068 le->link = NULL;
1069 n->link_cnt--;
1070 }
1071 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1072 tipc_node_write_unlock(n);
1073 if (delete)
1074 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1075 if (!skb_queue_empty(&xmitq))
1076 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1077 tipc_sk_rcv(n->net, &le->inputq);
1078 }
1079
1080 static bool node_is_up(struct tipc_node *n)
1081 {
1082 return n->active_links[0] != INVALID_BEARER_ID;
1083 }
1084
1085 bool tipc_node_is_up(struct net *net, u32 addr)
1086 {
1087 struct tipc_node *n;
1088 bool retval = false;
1089
1090 if (in_own_node(net, addr))
1091 return true;
1092
1093 n = tipc_node_find(net, addr);
1094 if (!n)
1095 return false;
1096 retval = node_is_up(n);
1097 tipc_node_put(n);
1098 return retval;
1099 }
1100
1101 static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1102 {
1103 struct tipc_node *n;
1104
1105 addr ^= tipc_net(net)->random;
1106 while ((n = tipc_node_find(net, addr))) {
1107 tipc_node_put(n);
1108 addr++;
1109 }
1110 return addr;
1111 }
1112
1113 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1114 * Returns suggested address if any, otherwise 0
1115 */
1116 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1117 {
1118 struct tipc_net *tn = tipc_net(net);
1119 struct tipc_node *n;
1120 bool preliminary;
1121 u32 sugg_addr;
1122
1123 /* Suggest new address if some other peer is using this one */
1124 n = tipc_node_find(net, addr);
1125 if (n) {
1126 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1127 addr = 0;
1128 tipc_node_put(n);
1129 if (!addr)
1130 return 0;
1131 return tipc_node_suggest_addr(net, addr);
1132 }
1133
1134 /* Suggest previously used address if peer is known */
1135 n = tipc_node_find_by_id(net, id);
1136 if (n) {
1137 sugg_addr = n->addr;
1138 preliminary = n->preliminary;
1139 tipc_node_put(n);
1140 if (!preliminary)
1141 return sugg_addr;
1142 }
1143
1144 /* Even this node may be in conflict */
1145 if (tn->trial_addr == addr)
1146 return tipc_node_suggest_addr(net, addr);
1147
1148 return 0;
1149 }
1150
1151 void tipc_node_check_dest(struct net *net, u32 addr,
1152 u8 *peer_id, struct tipc_bearer *b,
1153 u16 capabilities, u32 signature, u32 hash_mixes,
1154 struct tipc_media_addr *maddr,
1155 bool *respond, bool *dupl_addr)
1156 {
1157 struct tipc_node *n;
1158 struct tipc_link *l, *snd_l;
1159 struct tipc_link_entry *le;
1160 bool addr_match = false;
1161 bool sign_match = false;
1162 bool link_up = false;
1163 bool accept_addr = false;
1164 bool reset = true;
1165 char *if_name;
1166 unsigned long intv;
1167 u16 session;
1168
1169 *dupl_addr = false;
1170 *respond = false;
1171
1172 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1173 false);
1174 if (!n)
1175 return;
1176
1177 tipc_node_write_lock(n);
1178 if (unlikely(!n->bc_entry.link)) {
1179 snd_l = tipc_bc_sndlink(net);
1180 if (!tipc_link_bc_create(net, tipc_own_addr(net),
1181 addr, peer_id, U16_MAX,
1182 tipc_link_min_win(snd_l),
1183 tipc_link_max_win(snd_l),
1184 n->capabilities,
1185 &n->bc_entry.inputq1,
1186 &n->bc_entry.namedq, snd_l,
1187 &n->bc_entry.link)) {
1188 pr_warn("Broadcast rcv link creation failed, no mem\n");
1189 tipc_node_write_unlock_fast(n);
1190 tipc_node_put(n);
1191 return;
1192 }
1193 }
1194
1195 le = &n->links[b->identity];
1196
1197 /* Prepare to validate requesting node's signature and media address */
1198 l = le->link;
1199 link_up = l && tipc_link_is_up(l);
1200 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1201 sign_match = (signature == n->signature);
1202
1203 /* These three flags give us eight permutations: */
1204
1205 if (sign_match && addr_match && link_up) {
1206 /* All is fine. Do nothing. */
1207 reset = false;
1208 /* Peer node is not a container/local namespace */
1209 if (!n->peer_hash_mix)
1210 n->peer_hash_mix = hash_mixes;
1211 } else if (sign_match && addr_match && !link_up) {
1212 /* Respond. The link will come up in due time */
1213 *respond = true;
1214 } else if (sign_match && !addr_match && link_up) {
1215 /* Peer has changed i/f address without rebooting.
1216 * If so, the link will reset soon, and the next
1217 * discovery will be accepted. So we can ignore it.
1218 * It may also be a cloned or malicious peer having
1219 * chosen the same node address and signature as an
1220 * existing one.
1221 * Ignore requests until the link goes down, if ever.
1222 */
1223 *dupl_addr = true;
1224 } else if (sign_match && !addr_match && !link_up) {
1225 /* Peer link has changed i/f address without rebooting.
1226 * It may also be a cloned or malicious peer; we can't
1227 * distinguish between the two.
1228 * The signature is correct, so we must accept.
1229 */
1230 accept_addr = true;
1231 *respond = true;
1232 } else if (!sign_match && addr_match && link_up) {
1233 /* Peer node rebooted. Two possibilities:
1234 * - Delayed re-discovery; this link endpoint has already
1235 * reset and re-established contact with the peer, before
1236 * receiving a discovery message from that node.
1237 * (The peer happened to receive one from this node first).
1238 * - The peer came back so fast that our side has not
1239 * discovered it yet. Probing from this side will soon
1240 * reset the link, since there can be no working link
1241 * endpoint at the peer end, and the link will re-establish.
1242 * Accept the signature, since it comes from a known peer.
1243 */
1244 n->signature = signature;
1245 } else if (!sign_match && addr_match && !link_up) {
1246 /* The peer node has rebooted.
1247 * Accept signature, since it is a known peer.
1248 */
1249 n->signature = signature;
1250 *respond = true;
1251 } else if (!sign_match && !addr_match && link_up) {
1252 /* Peer rebooted with new address, or a new/duplicate peer.
1253 * Ignore until the link goes down, if ever.
1254 */
1255 *dupl_addr = true;
1256 } else if (!sign_match && !addr_match && !link_up) {
1257 /* Peer rebooted with new address, or it is a new peer.
1258 * Accept signature and address.
1259 */
1260 n->signature = signature;
1261 accept_addr = true;
1262 *respond = true;
1263 }
1264
1265 if (!accept_addr)
1266 goto exit;
1267
1268 /* Now create new link if not already existing */
1269 if (!l) {
1270 if (n->link_cnt == 2)
1271 goto exit;
1272
1273 if_name = strchr(b->name, ':') + 1;
1274 get_random_bytes(&session, sizeof(u16));
1275 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1276 b->net_plane, b->mtu, b->priority,
1277 b->min_win, b->max_win, session,
1278 tipc_own_addr(net), addr, peer_id,
1279 n->capabilities,
1280 tipc_bc_sndlink(n->net), n->bc_entry.link,
1281 &le->inputq,
1282 &n->bc_entry.namedq, &l)) {
1283 *respond = false;
1284 goto exit;
1285 }
1286 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1287 tipc_link_reset(l);
1288 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1289 if (n->state == NODE_FAILINGOVER)
1290 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1291 le->link = l;
1292 n->link_cnt++;
1293 tipc_node_calculate_timer(n, l);
1294 if (n->link_cnt == 1) {
1295 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1296 if (!mod_timer(&n->timer, intv))
1297 tipc_node_get(n);
1298 }
1299 }
1300 memcpy(&le->maddr, maddr, sizeof(*maddr));
1301 exit:
1302 tipc_node_write_unlock(n);
1303 if (reset && l && !tipc_link_is_reset(l))
1304 tipc_node_link_down(n, b->identity, false);
1305 tipc_node_put(n);
1306 }
1307
1308 void tipc_node_delete_links(struct net *net, int bearer_id)
1309 {
1310 struct tipc_net *tn = net_generic(net, tipc_net_id);
1311 struct tipc_node *n;
1312
1313 rcu_read_lock();
1314 list_for_each_entry_rcu(n, &tn->node_list, list) {
1315 tipc_node_link_down(n, bearer_id, true);
1316 }
1317 rcu_read_unlock();
1318 }
1319
1320 static void tipc_node_reset_links(struct tipc_node *n)
1321 {
1322 int i;
1323
1324 pr_warn("Resetting all links to %x\n", n->addr);
1325
1326 trace_tipc_node_reset_links(n, true, " ");
1327 for (i = 0; i < MAX_BEARERS; i++) {
1328 tipc_node_link_down(n, i, false);
1329 }
1330 }
1331
1332 /* tipc_node_fsm_evt - node finite state machine
1333 * Determines when contact is allowed with peer node
1334 */
1335 static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1336 {
1337 int state = n->state;
1338
1339 switch (state) {
1340 case SELF_DOWN_PEER_DOWN:
1341 switch (evt) {
1342 case SELF_ESTABL_CONTACT_EVT:
1343 state = SELF_UP_PEER_COMING;
1344 break;
1345 case PEER_ESTABL_CONTACT_EVT:
1346 state = SELF_COMING_PEER_UP;
1347 break;
1348 case SELF_LOST_CONTACT_EVT:
1349 case PEER_LOST_CONTACT_EVT:
1350 break;
1351 case NODE_SYNCH_END_EVT:
1352 case NODE_SYNCH_BEGIN_EVT:
1353 case NODE_FAILOVER_BEGIN_EVT:
1354 case NODE_FAILOVER_END_EVT:
1355 default:
1356 goto illegal_evt;
1357 }
1358 break;
1359 case SELF_UP_PEER_UP:
1360 switch (evt) {
1361 case SELF_LOST_CONTACT_EVT:
1362 state = SELF_DOWN_PEER_LEAVING;
1363 break;
1364 case PEER_LOST_CONTACT_EVT:
1365 state = SELF_LEAVING_PEER_DOWN;
1366 break;
1367 case NODE_SYNCH_BEGIN_EVT:
1368 state = NODE_SYNCHING;
1369 break;
1370 case NODE_FAILOVER_BEGIN_EVT:
1371 state = NODE_FAILINGOVER;
1372 break;
1373 case SELF_ESTABL_CONTACT_EVT:
1374 case PEER_ESTABL_CONTACT_EVT:
1375 case NODE_SYNCH_END_EVT:
1376 case NODE_FAILOVER_END_EVT:
1377 break;
1378 default:
1379 goto illegal_evt;
1380 }
1381 break;
1382 case SELF_DOWN_PEER_LEAVING:
1383 switch (evt) {
1384 case PEER_LOST_CONTACT_EVT:
1385 state = SELF_DOWN_PEER_DOWN;
1386 break;
1387 case SELF_ESTABL_CONTACT_EVT:
1388 case PEER_ESTABL_CONTACT_EVT:
1389 case SELF_LOST_CONTACT_EVT:
1390 break;
1391 case NODE_SYNCH_END_EVT:
1392 case NODE_SYNCH_BEGIN_EVT:
1393 case NODE_FAILOVER_BEGIN_EVT:
1394 case NODE_FAILOVER_END_EVT:
1395 default:
1396 goto illegal_evt;
1397 }
1398 break;
1399 case SELF_UP_PEER_COMING:
1400 switch (evt) {
1401 case PEER_ESTABL_CONTACT_EVT:
1402 state = SELF_UP_PEER_UP;
1403 break;
1404 case SELF_LOST_CONTACT_EVT:
1405 state = SELF_DOWN_PEER_DOWN;
1406 break;
1407 case SELF_ESTABL_CONTACT_EVT:
1408 case PEER_LOST_CONTACT_EVT:
1409 case NODE_SYNCH_END_EVT:
1410 case NODE_FAILOVER_BEGIN_EVT:
1411 break;
1412 case NODE_SYNCH_BEGIN_EVT:
1413 case NODE_FAILOVER_END_EVT:
1414 default:
1415 goto illegal_evt;
1416 }
1417 break;
1418 case SELF_COMING_PEER_UP:
1419 switch (evt) {
1420 case SELF_ESTABL_CONTACT_EVT:
1421 state = SELF_UP_PEER_UP;
1422 break;
1423 case PEER_LOST_CONTACT_EVT:
1424 state = SELF_DOWN_PEER_DOWN;
1425 break;
1426 case SELF_LOST_CONTACT_EVT:
1427 case PEER_ESTABL_CONTACT_EVT:
1428 break;
1429 case NODE_SYNCH_END_EVT:
1430 case NODE_SYNCH_BEGIN_EVT:
1431 case NODE_FAILOVER_BEGIN_EVT:
1432 case NODE_FAILOVER_END_EVT:
1433 default:
1434 goto illegal_evt;
1435 }
1436 break;
1437 case SELF_LEAVING_PEER_DOWN:
1438 switch (evt) {
1439 case SELF_LOST_CONTACT_EVT:
1440 state = SELF_DOWN_PEER_DOWN;
1441 break;
1442 case SELF_ESTABL_CONTACT_EVT:
1443 case PEER_ESTABL_CONTACT_EVT:
1444 case PEER_LOST_CONTACT_EVT:
1445 break;
1446 case NODE_SYNCH_END_EVT:
1447 case NODE_SYNCH_BEGIN_EVT:
1448 case NODE_FAILOVER_BEGIN_EVT:
1449 case NODE_FAILOVER_END_EVT:
1450 default:
1451 goto illegal_evt;
1452 }
1453 break;
1454 case NODE_FAILINGOVER:
1455 switch (evt) {
1456 case SELF_LOST_CONTACT_EVT:
1457 state = SELF_DOWN_PEER_LEAVING;
1458 break;
1459 case PEER_LOST_CONTACT_EVT:
1460 state = SELF_LEAVING_PEER_DOWN;
1461 break;
1462 case NODE_FAILOVER_END_EVT:
1463 state = SELF_UP_PEER_UP;
1464 break;
1465 case NODE_FAILOVER_BEGIN_EVT:
1466 case SELF_ESTABL_CONTACT_EVT:
1467 case PEER_ESTABL_CONTACT_EVT:
1468 break;
1469 case NODE_SYNCH_BEGIN_EVT:
1470 case NODE_SYNCH_END_EVT:
1471 default:
1472 goto illegal_evt;
1473 }
1474 break;
1475 case NODE_SYNCHING:
1476 switch (evt) {
1477 case SELF_LOST_CONTACT_EVT:
1478 state = SELF_DOWN_PEER_LEAVING;
1479 break;
1480 case PEER_LOST_CONTACT_EVT:
1481 state = SELF_LEAVING_PEER_DOWN;
1482 break;
1483 case NODE_SYNCH_END_EVT:
1484 state = SELF_UP_PEER_UP;
1485 break;
1486 case NODE_FAILOVER_BEGIN_EVT:
1487 state = NODE_FAILINGOVER;
1488 break;
1489 case NODE_SYNCH_BEGIN_EVT:
1490 case SELF_ESTABL_CONTACT_EVT:
1491 case PEER_ESTABL_CONTACT_EVT:
1492 break;
1493 case NODE_FAILOVER_END_EVT:
1494 default:
1495 goto illegal_evt;
1496 }
1497 break;
1498 default:
1499 pr_err("Unknown node fsm state %x\n", state);
1500 break;
1501 }
1502 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1503 n->state = state;
1504 return;
1505
1506 illegal_evt:
1507 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1508 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1509 }
1510
1511 static void node_lost_contact(struct tipc_node *n,
1512 struct sk_buff_head *inputq)
1513 {
1514 struct tipc_sock_conn *conn, *safe;
1515 struct tipc_link *l;
1516 struct list_head *conns = &n->conn_sks;
1517 struct sk_buff *skb;
1518 uint i;
1519
1520 pr_debug("Lost contact with %x\n", n->addr);
1521 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1522 trace_tipc_node_lost_contact(n, true, " ");
1523
1524 /* Clean up broadcast state */
1525 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1526 skb_queue_purge(&n->bc_entry.namedq);
1527
1528 /* Abort any ongoing link failover */
1529 for (i = 0; i < MAX_BEARERS; i++) {
1530 l = n->links[i].link;
1531 if (l)
1532 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1533 }
1534
1535 /* Notify publications from this node */
1536 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1537 n->peer_net = NULL;
1538 n->peer_hash_mix = 0;
1539 /* Notify sockets connected to node */
1540 list_for_each_entry_safe(conn, safe, conns, list) {
1541 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1542 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1543 conn->peer_node, conn->port,
1544 conn->peer_port, TIPC_ERR_NO_NODE);
1545 if (likely(skb))
1546 skb_queue_tail(inputq, skb);
1547 list_del(&conn->list);
1548 kfree(conn);
1549 }
1550 }
1551
1552 /**
1553 * tipc_node_get_linkname - get the name of a link
1554 *
1555 * @net: the applicable net namespace
1556 * @bearer_id: id of the bearer
1557 * @addr: peer node address
1558 * @linkname: link name output buffer
1559 * @len: size of @linkname output buffer
1560 *
1561 * Return: 0 on success
1562 */
1563 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1564 char *linkname, size_t len)
1565 {
1566 struct tipc_link *link;
1567 int err = -EINVAL;
1568 struct tipc_node *node = tipc_node_find(net, addr);
1569
1570 if (!node)
1571 return err;
1572
1573 if (bearer_id >= MAX_BEARERS)
1574 goto exit;
1575
1576 tipc_node_read_lock(node);
1577 link = node->links[bearer_id].link;
1578 if (link) {
1579 strncpy(linkname, tipc_link_name(link), len);
1580 err = 0;
1581 }
1582 tipc_node_read_unlock(node);
1583 exit:
1584 tipc_node_put(node);
1585 return err;
1586 }
1587
1588 /* Caller should hold node lock for the passed node */
1589 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1590 {
1591 void *hdr;
1592 struct nlattr *attrs;
1593
1594 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1595 NLM_F_MULTI, TIPC_NL_NODE_GET);
1596 if (!hdr)
1597 return -EMSGSIZE;
1598
1599 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1600 if (!attrs)
1601 goto msg_full;
1602
1603 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1604 goto attr_msg_full;
1605 if (node_is_up(node))
1606 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1607 goto attr_msg_full;
1608
1609 nla_nest_end(msg->skb, attrs);
1610 genlmsg_end(msg->skb, hdr);
1611
1612 return 0;
1613
1614 attr_msg_full:
1615 nla_nest_cancel(msg->skb, attrs);
1616 msg_full:
1617 genlmsg_cancel(msg->skb, hdr);
1618
1619 return -EMSGSIZE;
1620 }
1621
1622 static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1623 {
1624 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1625 struct sk_buff_head inputq;
1626
1627 switch (msg_user(hdr)) {
1628 case TIPC_LOW_IMPORTANCE:
1629 case TIPC_MEDIUM_IMPORTANCE:
1630 case TIPC_HIGH_IMPORTANCE:
1631 case TIPC_CRITICAL_IMPORTANCE:
1632 if (msg_connected(hdr) || msg_named(hdr) ||
1633 msg_direct(hdr)) {
1634 tipc_loopback_trace(peer_net, list);
1635 spin_lock_init(&list->lock);
1636 tipc_sk_rcv(peer_net, list);
1637 return;
1638 }
1639 if (msg_mcast(hdr)) {
1640 tipc_loopback_trace(peer_net, list);
1641 skb_queue_head_init(&inputq);
1642 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1643 __skb_queue_purge(list);
1644 skb_queue_purge(&inputq);
1645 return;
1646 }
1647 return;
1648 case MSG_FRAGMENTER:
1649 if (tipc_msg_assemble(list)) {
1650 tipc_loopback_trace(peer_net, list);
1651 skb_queue_head_init(&inputq);
1652 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1653 __skb_queue_purge(list);
1654 skb_queue_purge(&inputq);
1655 }
1656 return;
1657 case GROUP_PROTOCOL:
1658 case CONN_MANAGER:
1659 tipc_loopback_trace(peer_net, list);
1660 spin_lock_init(&list->lock);
1661 tipc_sk_rcv(peer_net, list);
1662 return;
1663 case LINK_PROTOCOL:
1664 case NAME_DISTRIBUTOR:
1665 case TUNNEL_PROTOCOL:
1666 case BCAST_PROTOCOL:
1667 return;
1668 default:
1669 return;
1670 }
1671 }
1672
1673 /**
1674 * tipc_node_xmit() - general link level function for message sending
1675 * @net: the applicable net namespace
1676 * @list: chain of buffers containing message
1677 * @dnode: address of destination node
1678 * @selector: a number used for deterministic link selection
1679 * Consumes the buffer chain.
1680 * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1681 */
1682 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1683 u32 dnode, int selector)
1684 {
1685 struct tipc_link_entry *le = NULL;
1686 struct tipc_node *n;
1687 struct sk_buff_head xmitq;
1688 bool node_up = false;
1689 int bearer_id;
1690 int rc;
1691
1692 if (in_own_node(net, dnode)) {
1693 tipc_loopback_trace(net, list);
1694 spin_lock_init(&list->lock);
1695 tipc_sk_rcv(net, list);
1696 return 0;
1697 }
1698
1699 n = tipc_node_find(net, dnode);
1700 if (unlikely(!n)) {
1701 __skb_queue_purge(list);
1702 return -EHOSTUNREACH;
1703 }
1704
1705 tipc_node_read_lock(n);
1706 node_up = node_is_up(n);
1707 if (node_up && n->peer_net && check_net(n->peer_net)) {
1708 /* xmit inner linux container */
1709 tipc_lxc_xmit(n->peer_net, list);
1710 if (likely(skb_queue_empty(list))) {
1711 tipc_node_read_unlock(n);
1712 tipc_node_put(n);
1713 return 0;
1714 }
1715 }
1716
1717 bearer_id = n->active_links[selector & 1];
1718 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1719 tipc_node_read_unlock(n);
1720 tipc_node_put(n);
1721 __skb_queue_purge(list);
1722 return -EHOSTUNREACH;
1723 }
1724
1725 __skb_queue_head_init(&xmitq);
1726 le = &n->links[bearer_id];
1727 spin_lock_bh(&le->lock);
1728 rc = tipc_link_xmit(le->link, list, &xmitq);
1729 spin_unlock_bh(&le->lock);
1730 tipc_node_read_unlock(n);
1731
1732 if (unlikely(rc == -ENOBUFS))
1733 tipc_node_link_down(n, bearer_id, false);
1734 else
1735 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1736
1737 tipc_node_put(n);
1738
1739 return rc;
1740 }
1741
1742 /* tipc_node_xmit_skb(): send single buffer to destination
1743 * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
1744 * messages, which will not be rejected
1745 * The only exception is datagram messages rerouted after secondary
1746 * lookup, which are rare and safe to dispose of anyway.
1747 */
1748 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1749 u32 selector)
1750 {
1751 struct sk_buff_head head;
1752
1753 __skb_queue_head_init(&head);
1754 __skb_queue_tail(&head, skb);
1755 tipc_node_xmit(net, &head, dnode, selector);
1756 return 0;
1757 }
1758
1759 /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1760 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1761 */
1762 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1763 {
1764 struct sk_buff *skb;
1765 u32 selector, dnode;
1766
1767 while ((skb = __skb_dequeue(xmitq))) {
1768 selector = msg_origport(buf_msg(skb));
1769 dnode = msg_destnode(buf_msg(skb));
1770 tipc_node_xmit_skb(net, skb, dnode, selector);
1771 }
1772 return 0;
1773 }
1774
1775 void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
1776 {
1777 struct sk_buff_head xmitq;
1778 struct sk_buff *txskb;
1779 struct tipc_node *n;
1780 u16 dummy;
1781 u32 dst;
1782
1783 /* Use broadcast if all nodes support it */
1784 if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
1785 __skb_queue_head_init(&xmitq);
1786 __skb_queue_tail(&xmitq, skb);
1787 tipc_bcast_xmit(net, &xmitq, &dummy);
1788 return;
1789 }
1790
1791 /* Otherwise use legacy replicast method */
1792 rcu_read_lock();
1793 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1794 dst = n->addr;
1795 if (in_own_node(net, dst))
1796 continue;
1797 if (!node_is_up(n))
1798 continue;
1799 txskb = pskb_copy(skb, GFP_ATOMIC);
1800 if (!txskb)
1801 break;
1802 msg_set_destnode(buf_msg(txskb), dst);
1803 tipc_node_xmit_skb(net, txskb, dst, 0);
1804 }
1805 rcu_read_unlock();
1806 kfree_skb(skb);
1807 }
1808
1809 static void tipc_node_mcast_rcv(struct tipc_node *n)
1810 {
1811 struct tipc_bclink_entry *be = &n->bc_entry;
1812
1813 /* 'arrvq' is under inputq2's lock protection */
1814 spin_lock_bh(&be->inputq2.lock);
1815 spin_lock_bh(&be->inputq1.lock);
1816 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1817 spin_unlock_bh(&be->inputq1.lock);
1818 spin_unlock_bh(&be->inputq2.lock);
1819 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1820 }
1821
1822 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1823 int bearer_id, struct sk_buff_head *xmitq)
1824 {
1825 struct tipc_link *ucl;
1826 int rc;
1827
1828 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1829
1830 if (rc & TIPC_LINK_DOWN_EVT) {
1831 tipc_node_reset_links(n);
1832 return;
1833 }
1834
1835 if (!(rc & TIPC_LINK_SND_STATE))
1836 return;
1837
1838 /* If probe message, a STATE response will be sent anyway */
1839 if (msg_probe(hdr))
1840 return;
1841
1842 /* Produce a STATE message carrying broadcast NACK */
1843 tipc_node_read_lock(n);
1844 ucl = n->links[bearer_id].link;
1845 if (ucl)
1846 tipc_link_build_state_msg(ucl, xmitq);
1847 tipc_node_read_unlock(n);
1848 }
1849
1850 /**
1851 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1852 * @net: the applicable net namespace
1853 * @skb: TIPC packet
1854 * @bearer_id: id of bearer message arrived on
1855 *
1856 * Invoked with no locks held.
1857 */
1858 static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1859 {
1860 int rc;
1861 struct sk_buff_head xmitq;
1862 struct tipc_bclink_entry *be;
1863 struct tipc_link_entry *le;
1864 struct tipc_msg *hdr = buf_msg(skb);
1865 int usr = msg_user(hdr);
1866 u32 dnode = msg_destnode(hdr);
1867 struct tipc_node *n;
1868
1869 __skb_queue_head_init(&xmitq);
1870
1871 /* If NACK for other node, let rcv link for that node peek into it */
1872 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1873 n = tipc_node_find(net, dnode);
1874 else
1875 n = tipc_node_find(net, msg_prevnode(hdr));
1876 if (!n) {
1877 kfree_skb(skb);
1878 return;
1879 }
1880 be = &n->bc_entry;
1881 le = &n->links[bearer_id];
1882
1883 rc = tipc_bcast_rcv(net, be->link, skb);
1884
1885 /* Broadcast ACKs are sent on a unicast link */
1886 if (rc & TIPC_LINK_SND_STATE) {
1887 tipc_node_read_lock(n);
1888 tipc_link_build_state_msg(le->link, &xmitq);
1889 tipc_node_read_unlock(n);
1890 }
1891
1892 if (!skb_queue_empty(&xmitq))
1893 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1894
1895 if (!skb_queue_empty(&be->inputq1))
1896 tipc_node_mcast_rcv(n);
1897
1898 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1899 if (!skb_queue_empty(&n->bc_entry.namedq))
1900 tipc_named_rcv(net, &n->bc_entry.namedq,
1901 &n->bc_entry.named_rcv_nxt,
1902 &n->bc_entry.named_open);
1903
1904 /* If reassembly or retransmission failure => reset all links to peer */
1905 if (rc & TIPC_LINK_DOWN_EVT)
1906 tipc_node_reset_links(n);
1907
1908 tipc_node_put(n);
1909 }
1910
1911 /**
1912 * tipc_node_check_state - check and if necessary update node state
1913 * @n: target tipc_node
1914 * @skb: TIPC packet
1915 * @bearer_id: identity of bearer delivering the packet
1916 * @xmitq: queue for messages to be xmited on
1917 * Return: true if state and msg are ok, otherwise false
1918 */
1919 static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1920 int bearer_id, struct sk_buff_head *xmitq)
1921 {
1922 struct tipc_msg *hdr = buf_msg(skb);
1923 int usr = msg_user(hdr);
1924 int mtyp = msg_type(hdr);
1925 u16 oseqno = msg_seqno(hdr);
1926 u16 exp_pkts = msg_msgcnt(hdr);
1927 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1928 int state = n->state;
1929 struct tipc_link *l, *tnl, *pl = NULL;
1930 struct tipc_media_addr *maddr;
1931 int pb_id;
1932
1933 if (trace_tipc_node_check_state_enabled()) {
1934 trace_tipc_skb_dump(skb, false, "skb for node state check");
1935 trace_tipc_node_check_state(n, true, " ");
1936 }
1937 l = n->links[bearer_id].link;
1938 if (!l)
1939 return false;
1940 rcv_nxt = tipc_link_rcv_nxt(l);
1941
1942
1943 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1944 return true;
1945
1946 /* Find parallel link, if any */
1947 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1948 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1949 pl = n->links[pb_id].link;
1950 break;
1951 }
1952 }
1953
1954 if (!tipc_link_validate_msg(l, hdr)) {
1955 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1956 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1957 return false;
1958 }
1959
1960 /* Check and update node accesibility if applicable */
1961 if (state == SELF_UP_PEER_COMING) {
1962 if (!tipc_link_is_up(l))
1963 return true;
1964 if (!msg_peer_link_is_up(hdr))
1965 return true;
1966 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1967 }
1968
1969 if (state == SELF_DOWN_PEER_LEAVING) {
1970 if (msg_peer_node_is_up(hdr))
1971 return false;
1972 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1973 return true;
1974 }
1975
1976 if (state == SELF_LEAVING_PEER_DOWN)
1977 return false;
1978
1979 /* Ignore duplicate packets */
1980 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1981 return true;
1982
1983 /* Initiate or update failover mode if applicable */
1984 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1985 syncpt = oseqno + exp_pkts - 1;
1986 if (pl && !tipc_link_is_reset(pl)) {
1987 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1988 trace_tipc_node_link_down(n, true,
1989 "node link down <- failover!");
1990 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1991 tipc_link_inputq(l));
1992 }
1993
1994 /* If parallel link was already down, and this happened before
1995 * the tunnel link came up, node failover was never started.
1996 * Ensure that a FAILOVER_MSG is sent to get peer out of
1997 * NODE_FAILINGOVER state, also this node must accept
1998 * TUNNEL_MSGs from peer.
1999 */
2000 if (n->state != NODE_FAILINGOVER)
2001 tipc_node_link_failover(n, pl, l, xmitq);
2002
2003 /* If pkts arrive out of order, use lowest calculated syncpt */
2004 if (less(syncpt, n->sync_point))
2005 n->sync_point = syncpt;
2006 }
2007
2008 /* Open parallel link when tunnel link reaches synch point */
2009 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
2010 if (!more(rcv_nxt, n->sync_point))
2011 return true;
2012 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
2013 if (pl)
2014 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
2015 return true;
2016 }
2017
2018 /* No syncing needed if only one link */
2019 if (!pl || !tipc_link_is_up(pl))
2020 return true;
2021
2022 /* Initiate synch mode if applicable */
2023 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
2024 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
2025 syncpt = msg_syncpt(hdr);
2026 else
2027 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
2028 if (!tipc_link_is_up(l))
2029 __tipc_node_link_up(n, bearer_id, xmitq);
2030 if (n->state == SELF_UP_PEER_UP) {
2031 n->sync_point = syncpt;
2032 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
2033 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
2034 }
2035 }
2036
2037 /* Open tunnel link when parallel link reaches synch point */
2038 if (n->state == NODE_SYNCHING) {
2039 if (tipc_link_is_synching(l)) {
2040 tnl = l;
2041 } else {
2042 tnl = pl;
2043 pl = l;
2044 }
2045 inputq_len = skb_queue_len(tipc_link_inputq(pl));
2046 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
2047 if (more(dlv_nxt, n->sync_point)) {
2048 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
2049 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
2050 return true;
2051 }
2052 if (l == pl)
2053 return true;
2054 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
2055 return true;
2056 if (usr == LINK_PROTOCOL)
2057 return true;
2058 return false;
2059 }
2060 return true;
2061 }
2062
2063 /**
2064 * tipc_rcv - process TIPC packets/messages arriving from off-node
2065 * @net: the applicable net namespace
2066 * @skb: TIPC packet
2067 * @b: pointer to bearer message arrived on
2068 *
2069 * Invoked with no locks held. Bearer pointer must point to a valid bearer
2070 * structure (i.e. cannot be NULL), but bearer can be inactive.
2071 */
2072 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2073 {
2074 struct sk_buff_head xmitq;
2075 struct tipc_link_entry *le;
2076 struct tipc_msg *hdr;
2077 struct tipc_node *n;
2078 int bearer_id = b->identity;
2079 u32 self = tipc_own_addr(net);
2080 int usr, rc = 0;
2081 u16 bc_ack;
2082 #ifdef CONFIG_TIPC_CRYPTO
2083 struct tipc_ehdr *ehdr;
2084
2085 /* Check if message must be decrypted first */
2086 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2087 goto rcv;
2088
2089 ehdr = (struct tipc_ehdr *)skb->data;
2090 if (likely(ehdr->user != LINK_CONFIG)) {
2091 n = tipc_node_find(net, ntohl(ehdr->addr));
2092 if (unlikely(!n))
2093 goto discard;
2094 } else {
2095 n = tipc_node_find_by_id(net, ehdr->id);
2096 }
2097 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2098 if (!skb)
2099 return;
2100
2101 rcv:
2102 #endif
2103 /* Ensure message is well-formed before touching the header */
2104 if (unlikely(!tipc_msg_validate(&skb)))
2105 goto discard;
2106 __skb_queue_head_init(&xmitq);
2107 hdr = buf_msg(skb);
2108 usr = msg_user(hdr);
2109 bc_ack = msg_bcast_ack(hdr);
2110
2111 /* Handle arrival of discovery or broadcast packet */
2112 if (unlikely(msg_non_seq(hdr))) {
2113 if (unlikely(usr == LINK_CONFIG))
2114 return tipc_disc_rcv(net, skb, b);
2115 else
2116 return tipc_node_bc_rcv(net, skb, bearer_id);
2117 }
2118
2119 /* Discard unicast link messages destined for another node */
2120 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2121 goto discard;
2122
2123 /* Locate neighboring node that sent packet */
2124 n = tipc_node_find(net, msg_prevnode(hdr));
2125 if (unlikely(!n))
2126 goto discard;
2127 le = &n->links[bearer_id];
2128
2129 /* Ensure broadcast reception is in synch with peer's send state */
2130 if (unlikely(usr == LINK_PROTOCOL)) {
2131 if (unlikely(skb_linearize(skb))) {
2132 tipc_node_put(n);
2133 goto discard;
2134 }
2135 hdr = buf_msg(skb);
2136 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2137 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2138 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2139 }
2140
2141 /* Receive packet directly if conditions permit */
2142 tipc_node_read_lock(n);
2143 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2144 spin_lock_bh(&le->lock);
2145 if (le->link) {
2146 rc = tipc_link_rcv(le->link, skb, &xmitq);
2147 skb = NULL;
2148 }
2149 spin_unlock_bh(&le->lock);
2150 }
2151 tipc_node_read_unlock(n);
2152
2153 /* Check/update node state before receiving */
2154 if (unlikely(skb)) {
2155 if (unlikely(skb_linearize(skb)))
2156 goto out_node_put;
2157 tipc_node_write_lock(n);
2158 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2159 if (le->link) {
2160 rc = tipc_link_rcv(le->link, skb, &xmitq);
2161 skb = NULL;
2162 }
2163 }
2164 tipc_node_write_unlock(n);
2165 }
2166
2167 if (unlikely(rc & TIPC_LINK_UP_EVT))
2168 tipc_node_link_up(n, bearer_id, &xmitq);
2169
2170 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2171 tipc_node_link_down(n, bearer_id, false);
2172
2173 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2174 tipc_named_rcv(net, &n->bc_entry.namedq,
2175 &n->bc_entry.named_rcv_nxt,
2176 &n->bc_entry.named_open);
2177
2178 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2179 tipc_node_mcast_rcv(n);
2180
2181 if (!skb_queue_empty(&le->inputq))
2182 tipc_sk_rcv(net, &le->inputq);
2183
2184 if (!skb_queue_empty(&xmitq))
2185 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2186
2187 out_node_put:
2188 tipc_node_put(n);
2189 discard:
2190 kfree_skb(skb);
2191 }
2192
2193 void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2194 int prop)
2195 {
2196 struct tipc_net *tn = tipc_net(net);
2197 int bearer_id = b->identity;
2198 struct sk_buff_head xmitq;
2199 struct tipc_link_entry *e;
2200 struct tipc_node *n;
2201
2202 __skb_queue_head_init(&xmitq);
2203
2204 rcu_read_lock();
2205
2206 list_for_each_entry_rcu(n, &tn->node_list, list) {
2207 tipc_node_write_lock(n);
2208 e = &n->links[bearer_id];
2209 if (e->link) {
2210 if (prop == TIPC_NLA_PROP_TOL)
2211 tipc_link_set_tolerance(e->link, b->tolerance,
2212 &xmitq);
2213 else if (prop == TIPC_NLA_PROP_MTU)
2214 tipc_link_set_mtu(e->link, b->mtu);
2215
2216 /* Update MTU for node link entry */
2217 e->mtu = tipc_link_mss(e->link);
2218 }
2219
2220 tipc_node_write_unlock(n);
2221 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2222 }
2223
2224 rcu_read_unlock();
2225 }
2226
2227 int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2228 {
2229 struct net *net = sock_net(skb->sk);
2230 struct tipc_net *tn = net_generic(net, tipc_net_id);
2231 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2232 struct tipc_node *peer, *temp_node;
2233 u8 node_id[NODE_ID_LEN];
2234 u64 *w0 = (u64 *)&node_id[0];
2235 u64 *w1 = (u64 *)&node_id[8];
2236 u32 addr;
2237 int err;
2238
2239 /* We identify the peer by its net */
2240 if (!info->attrs[TIPC_NLA_NET])
2241 return -EINVAL;
2242
2243 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2244 info->attrs[TIPC_NLA_NET],
2245 tipc_nl_net_policy, info->extack);
2246 if (err)
2247 return err;
2248
2249 /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
2250 * mutually exclusive cases
2251 */
2252 if (attrs[TIPC_NLA_NET_ADDR]) {
2253 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2254 if (!addr)
2255 return -EINVAL;
2256 }
2257
2258 if (attrs[TIPC_NLA_NET_NODEID]) {
2259 if (!attrs[TIPC_NLA_NET_NODEID_W1])
2260 return -EINVAL;
2261 *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
2262 *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
2263 addr = hash128to32(node_id);
2264 }
2265
2266 if (in_own_node(net, addr))
2267 return -ENOTSUPP;
2268
2269 spin_lock_bh(&tn->node_list_lock);
2270 peer = tipc_node_find(net, addr);
2271 if (!peer) {
2272 spin_unlock_bh(&tn->node_list_lock);
2273 return -ENXIO;
2274 }
2275
2276 tipc_node_write_lock(peer);
2277 if (peer->state != SELF_DOWN_PEER_DOWN &&
2278 peer->state != SELF_DOWN_PEER_LEAVING) {
2279 tipc_node_write_unlock(peer);
2280 err = -EBUSY;
2281 goto err_out;
2282 }
2283
2284 tipc_node_clear_links(peer);
2285 tipc_node_write_unlock(peer);
2286 tipc_node_delete(peer);
2287
2288 /* Calculate cluster capabilities */
2289 tn->capabilities = TIPC_NODE_CAPABILITIES;
2290 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2291 tn->capabilities &= temp_node->capabilities;
2292 }
2293 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2294 err = 0;
2295 err_out:
2296 tipc_node_put(peer);
2297 spin_unlock_bh(&tn->node_list_lock);
2298
2299 return err;
2300 }
2301
2302 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2303 {
2304 int err;
2305 struct net *net = sock_net(skb->sk);
2306 struct tipc_net *tn = net_generic(net, tipc_net_id);
2307 int done = cb->args[0];
2308 int last_addr = cb->args[1];
2309 struct tipc_node *node;
2310 struct tipc_nl_msg msg;
2311
2312 if (done)
2313 return 0;
2314
2315 msg.skb = skb;
2316 msg.portid = NETLINK_CB(cb->skb).portid;
2317 msg.seq = cb->nlh->nlmsg_seq;
2318
2319 rcu_read_lock();
2320 if (last_addr) {
2321 node = tipc_node_find(net, last_addr);
2322 if (!node) {
2323 rcu_read_unlock();
2324 /* We never set seq or call nl_dump_check_consistent()
2325 * this means that setting prev_seq here will cause the
2326 * consistence check to fail in the netlink callback
2327 * handler. Resulting in the NLMSG_DONE message having
2328 * the NLM_F_DUMP_INTR flag set if the node state
2329 * changed while we released the lock.
2330 */
2331 cb->prev_seq = 1;
2332 return -EPIPE;
2333 }
2334 tipc_node_put(node);
2335 }
2336
2337 list_for_each_entry_rcu(node, &tn->node_list, list) {
2338 if (node->preliminary)
2339 continue;
2340 if (last_addr) {
2341 if (node->addr == last_addr)
2342 last_addr = 0;
2343 else
2344 continue;
2345 }
2346
2347 tipc_node_read_lock(node);
2348 err = __tipc_nl_add_node(&msg, node);
2349 if (err) {
2350 last_addr = node->addr;
2351 tipc_node_read_unlock(node);
2352 goto out;
2353 }
2354
2355 tipc_node_read_unlock(node);
2356 }
2357 done = 1;
2358 out:
2359 cb->args[0] = done;
2360 cb->args[1] = last_addr;
2361 rcu_read_unlock();
2362
2363 return skb->len;
2364 }
2365
2366 /* tipc_node_find_by_name - locate owner node of link by link's name
2367 * @net: the applicable net namespace
2368 * @name: pointer to link name string
2369 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2370 *
2371 * Returns pointer to node owning the link, or 0 if no matching link is found.
2372 */
2373 static struct tipc_node *tipc_node_find_by_name(struct net *net,
2374 const char *link_name,
2375 unsigned int *bearer_id)
2376 {
2377 struct tipc_net *tn = net_generic(net, tipc_net_id);
2378 struct tipc_link *l;
2379 struct tipc_node *n;
2380 struct tipc_node *found_node = NULL;
2381 int i;
2382
2383 *bearer_id = 0;
2384 rcu_read_lock();
2385 list_for_each_entry_rcu(n, &tn->node_list, list) {
2386 tipc_node_read_lock(n);
2387 for (i = 0; i < MAX_BEARERS; i++) {
2388 l = n->links[i].link;
2389 if (l && !strcmp(tipc_link_name(l), link_name)) {
2390 *bearer_id = i;
2391 found_node = n;
2392 break;
2393 }
2394 }
2395 tipc_node_read_unlock(n);
2396 if (found_node)
2397 break;
2398 }
2399 rcu_read_unlock();
2400
2401 return found_node;
2402 }
2403
2404 int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2405 {
2406 int err;
2407 int res = 0;
2408 int bearer_id;
2409 char *name;
2410 struct tipc_link *link;
2411 struct tipc_node *node;
2412 struct sk_buff_head xmitq;
2413 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2414 struct net *net = sock_net(skb->sk);
2415
2416 __skb_queue_head_init(&xmitq);
2417
2418 if (!info->attrs[TIPC_NLA_LINK])
2419 return -EINVAL;
2420
2421 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2422 info->attrs[TIPC_NLA_LINK],
2423 tipc_nl_link_policy, info->extack);
2424 if (err)
2425 return err;
2426
2427 if (!attrs[TIPC_NLA_LINK_NAME])
2428 return -EINVAL;
2429
2430 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2431
2432 if (strcmp(name, tipc_bclink_name) == 0)
2433 return tipc_nl_bc_link_set(net, attrs);
2434
2435 node = tipc_node_find_by_name(net, name, &bearer_id);
2436 if (!node)
2437 return -EINVAL;
2438
2439 tipc_node_read_lock(node);
2440
2441 link = node->links[bearer_id].link;
2442 if (!link) {
2443 res = -EINVAL;
2444 goto out;
2445 }
2446
2447 if (attrs[TIPC_NLA_LINK_PROP]) {
2448 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2449
2450 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2451 if (err) {
2452 res = err;
2453 goto out;
2454 }
2455
2456 if (props[TIPC_NLA_PROP_TOL]) {
2457 u32 tol;
2458
2459 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2460 tipc_link_set_tolerance(link, tol, &xmitq);
2461 }
2462 if (props[TIPC_NLA_PROP_PRIO]) {
2463 u32 prio;
2464
2465 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2466 tipc_link_set_prio(link, prio, &xmitq);
2467 }
2468 if (props[TIPC_NLA_PROP_WIN]) {
2469 u32 max_win;
2470
2471 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2472 tipc_link_set_queue_limits(link,
2473 tipc_link_min_win(link),
2474 max_win);
2475 }
2476 }
2477
2478 out:
2479 tipc_node_read_unlock(node);
2480 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2481 NULL);
2482 return res;
2483 }
2484
2485 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2486 {
2487 struct net *net = genl_info_net(info);
2488 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2489 struct tipc_nl_msg msg;
2490 char *name;
2491 int err;
2492
2493 msg.portid = info->snd_portid;
2494 msg.seq = info->snd_seq;
2495
2496 if (!info->attrs[TIPC_NLA_LINK])
2497 return -EINVAL;
2498
2499 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2500 info->attrs[TIPC_NLA_LINK],
2501 tipc_nl_link_policy, info->extack);
2502 if (err)
2503 return err;
2504
2505 if (!attrs[TIPC_NLA_LINK_NAME])
2506 return -EINVAL;
2507
2508 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2509
2510 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2511 if (!msg.skb)
2512 return -ENOMEM;
2513
2514 if (strcmp(name, tipc_bclink_name) == 0) {
2515 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2516 if (err)
2517 goto err_free;
2518 } else {
2519 int bearer_id;
2520 struct tipc_node *node;
2521 struct tipc_link *link;
2522
2523 node = tipc_node_find_by_name(net, name, &bearer_id);
2524 if (!node) {
2525 err = -EINVAL;
2526 goto err_free;
2527 }
2528
2529 tipc_node_read_lock(node);
2530 link = node->links[bearer_id].link;
2531 if (!link) {
2532 tipc_node_read_unlock(node);
2533 err = -EINVAL;
2534 goto err_free;
2535 }
2536
2537 err = __tipc_nl_add_link(net, &msg, link, 0);
2538 tipc_node_read_unlock(node);
2539 if (err)
2540 goto err_free;
2541 }
2542
2543 return genlmsg_reply(msg.skb, info);
2544
2545 err_free:
2546 nlmsg_free(msg.skb);
2547 return err;
2548 }
2549
2550 int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2551 {
2552 int err;
2553 char *link_name;
2554 unsigned int bearer_id;
2555 struct tipc_link *link;
2556 struct tipc_node *node;
2557 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2558 struct net *net = sock_net(skb->sk);
2559 struct tipc_net *tn = tipc_net(net);
2560 struct tipc_link_entry *le;
2561
2562 if (!info->attrs[TIPC_NLA_LINK])
2563 return -EINVAL;
2564
2565 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2566 info->attrs[TIPC_NLA_LINK],
2567 tipc_nl_link_policy, info->extack);
2568 if (err)
2569 return err;
2570
2571 if (!attrs[TIPC_NLA_LINK_NAME])
2572 return -EINVAL;
2573
2574 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2575
2576 err = -EINVAL;
2577 if (!strcmp(link_name, tipc_bclink_name)) {
2578 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2579 if (err)
2580 return err;
2581 return 0;
2582 } else if (strstr(link_name, tipc_bclink_name)) {
2583 rcu_read_lock();
2584 list_for_each_entry_rcu(node, &tn->node_list, list) {
2585 tipc_node_read_lock(node);
2586 link = node->bc_entry.link;
2587 if (link && !strcmp(link_name, tipc_link_name(link))) {
2588 err = tipc_bclink_reset_stats(net, link);
2589 tipc_node_read_unlock(node);
2590 break;
2591 }
2592 tipc_node_read_unlock(node);
2593 }
2594 rcu_read_unlock();
2595 return err;
2596 }
2597
2598 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2599 if (!node)
2600 return -EINVAL;
2601
2602 le = &node->links[bearer_id];
2603 tipc_node_read_lock(node);
2604 spin_lock_bh(&le->lock);
2605 link = node->links[bearer_id].link;
2606 if (!link) {
2607 spin_unlock_bh(&le->lock);
2608 tipc_node_read_unlock(node);
2609 return -EINVAL;
2610 }
2611 tipc_link_reset_stats(link);
2612 spin_unlock_bh(&le->lock);
2613 tipc_node_read_unlock(node);
2614 return 0;
2615 }
2616
2617 /* Caller should hold node lock */
2618 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2619 struct tipc_node *node, u32 *prev_link,
2620 bool bc_link)
2621 {
2622 u32 i;
2623 int err;
2624
2625 for (i = *prev_link; i < MAX_BEARERS; i++) {
2626 *prev_link = i;
2627
2628 if (!node->links[i].link)
2629 continue;
2630
2631 err = __tipc_nl_add_link(net, msg,
2632 node->links[i].link, NLM_F_MULTI);
2633 if (err)
2634 return err;
2635 }
2636
2637 if (bc_link) {
2638 *prev_link = i;
2639 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2640 if (err)
2641 return err;
2642 }
2643
2644 *prev_link = 0;
2645
2646 return 0;
2647 }
2648
2649 int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2650 {
2651 struct net *net = sock_net(skb->sk);
2652 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2653 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2654 struct tipc_net *tn = net_generic(net, tipc_net_id);
2655 struct tipc_node *node;
2656 struct tipc_nl_msg msg;
2657 u32 prev_node = cb->args[0];
2658 u32 prev_link = cb->args[1];
2659 int done = cb->args[2];
2660 bool bc_link = cb->args[3];
2661 int err;
2662
2663 if (done)
2664 return 0;
2665
2666 if (!prev_node) {
2667 /* Check if broadcast-receiver links dumping is needed */
2668 if (attrs && attrs[TIPC_NLA_LINK]) {
2669 err = nla_parse_nested_deprecated(link,
2670 TIPC_NLA_LINK_MAX,
2671 attrs[TIPC_NLA_LINK],
2672 tipc_nl_link_policy,
2673 NULL);
2674 if (unlikely(err))
2675 return err;
2676 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2677 return -EINVAL;
2678 bc_link = true;
2679 }
2680 }
2681
2682 msg.skb = skb;
2683 msg.portid = NETLINK_CB(cb->skb).portid;
2684 msg.seq = cb->nlh->nlmsg_seq;
2685
2686 rcu_read_lock();
2687 if (prev_node) {
2688 node = tipc_node_find(net, prev_node);
2689 if (!node) {
2690 /* We never set seq or call nl_dump_check_consistent()
2691 * this means that setting prev_seq here will cause the
2692 * consistence check to fail in the netlink callback
2693 * handler. Resulting in the last NLMSG_DONE message
2694 * having the NLM_F_DUMP_INTR flag set.
2695 */
2696 cb->prev_seq = 1;
2697 goto out;
2698 }
2699 tipc_node_put(node);
2700
2701 list_for_each_entry_continue_rcu(node, &tn->node_list,
2702 list) {
2703 tipc_node_read_lock(node);
2704 err = __tipc_nl_add_node_links(net, &msg, node,
2705 &prev_link, bc_link);
2706 tipc_node_read_unlock(node);
2707 if (err)
2708 goto out;
2709
2710 prev_node = node->addr;
2711 }
2712 } else {
2713 err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2714 if (err)
2715 goto out;
2716
2717 list_for_each_entry_rcu(node, &tn->node_list, list) {
2718 tipc_node_read_lock(node);
2719 err = __tipc_nl_add_node_links(net, &msg, node,
2720 &prev_link, bc_link);
2721 tipc_node_read_unlock(node);
2722 if (err)
2723 goto out;
2724
2725 prev_node = node->addr;
2726 }
2727 }
2728 done = 1;
2729 out:
2730 rcu_read_unlock();
2731
2732 cb->args[0] = prev_node;
2733 cb->args[1] = prev_link;
2734 cb->args[2] = done;
2735 cb->args[3] = bc_link;
2736
2737 return skb->len;
2738 }
2739
2740 int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2741 {
2742 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2743 struct net *net = sock_net(skb->sk);
2744 int err;
2745
2746 if (!info->attrs[TIPC_NLA_MON])
2747 return -EINVAL;
2748
2749 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2750 info->attrs[TIPC_NLA_MON],
2751 tipc_nl_monitor_policy,
2752 info->extack);
2753 if (err)
2754 return err;
2755
2756 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2757 u32 val;
2758
2759 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2760 err = tipc_nl_monitor_set_threshold(net, val);
2761 if (err)
2762 return err;
2763 }
2764
2765 return 0;
2766 }
2767
2768 static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2769 {
2770 struct nlattr *attrs;
2771 void *hdr;
2772 u32 val;
2773
2774 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2775 0, TIPC_NL_MON_GET);
2776 if (!hdr)
2777 return -EMSGSIZE;
2778
2779 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2780 if (!attrs)
2781 goto msg_full;
2782
2783 val = tipc_nl_monitor_get_threshold(net);
2784
2785 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2786 goto attr_msg_full;
2787
2788 nla_nest_end(msg->skb, attrs);
2789 genlmsg_end(msg->skb, hdr);
2790
2791 return 0;
2792
2793 attr_msg_full:
2794 nla_nest_cancel(msg->skb, attrs);
2795 msg_full:
2796 genlmsg_cancel(msg->skb, hdr);
2797
2798 return -EMSGSIZE;
2799 }
2800
2801 int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2802 {
2803 struct net *net = sock_net(skb->sk);
2804 struct tipc_nl_msg msg;
2805 int err;
2806
2807 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2808 if (!msg.skb)
2809 return -ENOMEM;
2810 msg.portid = info->snd_portid;
2811 msg.seq = info->snd_seq;
2812
2813 err = __tipc_nl_add_monitor_prop(net, &msg);
2814 if (err) {
2815 nlmsg_free(msg.skb);
2816 return err;
2817 }
2818
2819 return genlmsg_reply(msg.skb, info);
2820 }
2821
2822 int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2823 {
2824 struct net *net = sock_net(skb->sk);
2825 u32 prev_bearer = cb->args[0];
2826 struct tipc_nl_msg msg;
2827 int bearer_id;
2828 int err;
2829
2830 if (prev_bearer == MAX_BEARERS)
2831 return 0;
2832
2833 msg.skb = skb;
2834 msg.portid = NETLINK_CB(cb->skb).portid;
2835 msg.seq = cb->nlh->nlmsg_seq;
2836
2837 rtnl_lock();
2838 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2839 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2840 if (err)
2841 break;
2842 }
2843 rtnl_unlock();
2844 cb->args[0] = bearer_id;
2845
2846 return skb->len;
2847 }
2848
2849 int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2850 struct netlink_callback *cb)
2851 {
2852 struct net *net = sock_net(skb->sk);
2853 u32 prev_node = cb->args[1];
2854 u32 bearer_id = cb->args[2];
2855 int done = cb->args[0];
2856 struct tipc_nl_msg msg;
2857 int err;
2858
2859 if (!prev_node) {
2860 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2861 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2862
2863 if (!attrs[TIPC_NLA_MON])
2864 return -EINVAL;
2865
2866 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2867 attrs[TIPC_NLA_MON],
2868 tipc_nl_monitor_policy,
2869 NULL);
2870 if (err)
2871 return err;
2872
2873 if (!mon[TIPC_NLA_MON_REF])
2874 return -EINVAL;
2875
2876 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2877
2878 if (bearer_id >= MAX_BEARERS)
2879 return -EINVAL;
2880 }
2881
2882 if (done)
2883 return 0;
2884
2885 msg.skb = skb;
2886 msg.portid = NETLINK_CB(cb->skb).portid;
2887 msg.seq = cb->nlh->nlmsg_seq;
2888
2889 rtnl_lock();
2890 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2891 if (!err)
2892 done = 1;
2893
2894 rtnl_unlock();
2895 cb->args[0] = done;
2896 cb->args[1] = prev_node;
2897 cb->args[2] = bearer_id;
2898
2899 return skb->len;
2900 }
2901
2902 #ifdef CONFIG_TIPC_CRYPTO
2903 static int tipc_nl_retrieve_key(struct nlattr **attrs,
2904 struct tipc_aead_key **pkey)
2905 {
2906 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2907 struct tipc_aead_key *key;
2908
2909 if (!attr)
2910 return -ENODATA;
2911
2912 if (nla_len(attr) < sizeof(*key))
2913 return -EINVAL;
2914 key = (struct tipc_aead_key *)nla_data(attr);
2915 if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
2916 nla_len(attr) < tipc_aead_key_size(key))
2917 return -EINVAL;
2918
2919 *pkey = key;
2920 return 0;
2921 }
2922
2923 static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2924 {
2925 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2926
2927 if (!attr)
2928 return -ENODATA;
2929
2930 if (nla_len(attr) < TIPC_NODEID_LEN)
2931 return -EINVAL;
2932
2933 *node_id = (u8 *)nla_data(attr);
2934 return 0;
2935 }
2936
2937 static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
2938 {
2939 struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
2940
2941 if (!attr)
2942 return -ENODATA;
2943
2944 *intv = nla_get_u32(attr);
2945 return 0;
2946 }
2947
2948 static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2949 {
2950 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2951 struct net *net = sock_net(skb->sk);
2952 struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
2953 struct tipc_node *n = NULL;
2954 struct tipc_aead_key *ukey;
2955 bool rekeying = true, master_key = false;
2956 u8 *id, *own_id, mode;
2957 u32 intv = 0;
2958 int rc = 0;
2959
2960 if (!info->attrs[TIPC_NLA_NODE])
2961 return -EINVAL;
2962
2963 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2964 info->attrs[TIPC_NLA_NODE],
2965 tipc_nl_node_policy, info->extack);
2966 if (rc)
2967 return rc;
2968
2969 own_id = tipc_own_id(net);
2970 if (!own_id) {
2971 GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
2972 return -EPERM;
2973 }
2974
2975 rc = tipc_nl_retrieve_rekeying(attrs, &intv);
2976 if (rc == -ENODATA)
2977 rekeying = false;
2978
2979 rc = tipc_nl_retrieve_key(attrs, &ukey);
2980 if (rc == -ENODATA && rekeying)
2981 goto rekeying;
2982 else if (rc)
2983 return rc;
2984
2985 rc = tipc_aead_key_validate(ukey, info);
2986 if (rc)
2987 return rc;
2988
2989 rc = tipc_nl_retrieve_nodeid(attrs, &id);
2990 switch (rc) {
2991 case -ENODATA:
2992 mode = CLUSTER_KEY;
2993 master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
2994 break;
2995 case 0:
2996 mode = PER_NODE_KEY;
2997 if (memcmp(id, own_id, NODE_ID_LEN)) {
2998 n = tipc_node_find_by_id(net, id) ?:
2999 tipc_node_create(net, 0, id, 0xffffu, 0, true);
3000 if (unlikely(!n))
3001 return -ENOMEM;
3002 c = n->crypto_rx;
3003 }
3004 break;
3005 default:
3006 return rc;
3007 }
3008
3009 /* Initiate the TX/RX key */
3010 rc = tipc_crypto_key_init(c, ukey, mode, master_key);
3011 if (n)
3012 tipc_node_put(n);
3013
3014 if (unlikely(rc < 0)) {
3015 GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
3016 return rc;
3017 } else if (c == tx) {
3018 /* Distribute TX key but not master one */
3019 if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
3020 GENL_SET_ERR_MSG(info, "failed to replicate new key");
3021 rekeying:
3022 /* Schedule TX rekeying if needed */
3023 tipc_crypto_rekeying_sched(tx, rekeying, intv);
3024 }
3025
3026 return 0;
3027 }
3028
3029 int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
3030 {
3031 int err;
3032
3033 rtnl_lock();
3034 err = __tipc_nl_node_set_key(skb, info);
3035 rtnl_unlock();
3036
3037 return err;
3038 }
3039
3040 static int __tipc_nl_node_flush_key(struct sk_buff *skb,
3041 struct genl_info *info)
3042 {
3043 struct net *net = sock_net(skb->sk);
3044 struct tipc_net *tn = tipc_net(net);
3045 struct tipc_node *n;
3046
3047 tipc_crypto_key_flush(tn->crypto_tx);
3048 rcu_read_lock();
3049 list_for_each_entry_rcu(n, &tn->node_list, list)
3050 tipc_crypto_key_flush(n->crypto_rx);
3051 rcu_read_unlock();
3052
3053 return 0;
3054 }
3055
3056 int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
3057 {
3058 int err;
3059
3060 rtnl_lock();
3061 err = __tipc_nl_node_flush_key(skb, info);
3062 rtnl_unlock();
3063
3064 return err;
3065 }
3066 #endif
3067
3068 /**
3069 * tipc_node_dump - dump TIPC node data
3070 * @n: tipc node to be dumped
3071 * @more: dump more?
3072 * - false: dump only tipc node data
3073 * - true: dump node link data as well
3074 * @buf: returned buffer of dump data in format
3075 */
3076 int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
3077 {
3078 int i = 0;
3079 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
3080
3081 if (!n) {
3082 i += scnprintf(buf, sz, "node data: (null)\n");
3083 return i;
3084 }
3085
3086 i += scnprintf(buf, sz, "node data: %x", n->addr);
3087 i += scnprintf(buf + i, sz - i, " %x", n->state);
3088 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
3089 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
3090 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
3091 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
3092 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
3093 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
3094 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
3095 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
3096 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
3097
3098 if (!more)
3099 return i;
3100
3101 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
3102 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
3103 i += scnprintf(buf + i, sz - i, " media: ");
3104 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
3105 i += scnprintf(buf + i, sz - i, "\n");
3106 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
3107 i += scnprintf(buf + i, sz - i, " inputq: ");
3108 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3109
3110 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3111 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3112 i += scnprintf(buf + i, sz - i, " media: ");
3113 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3114 i += scnprintf(buf + i, sz - i, "\n");
3115 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3116 i += scnprintf(buf + i, sz - i, " inputq: ");
3117 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3118
3119 i += scnprintf(buf + i, sz - i, "bclink:\n ");
3120 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3121
3122 return i;
3123 }
3124
3125 void tipc_node_pre_cleanup_net(struct net *exit_net)
3126 {
3127 struct tipc_node *n;
3128 struct tipc_net *tn;
3129 struct net *tmp;
3130
3131 rcu_read_lock();
3132 for_each_net_rcu(tmp) {
3133 if (tmp == exit_net)
3134 continue;
3135 tn = tipc_net(tmp);
3136 if (!tn)
3137 continue;
3138 spin_lock_bh(&tn->node_list_lock);
3139 list_for_each_entry_rcu(n, &tn->node_list, list) {
3140 if (!n->peer_net)
3141 continue;
3142 if (n->peer_net != exit_net)
3143 continue;
3144 tipc_node_write_lock(n);
3145 n->peer_net = NULL;
3146 n->peer_hash_mix = 0;
3147 tipc_node_write_unlock_fast(n);
3148 break;
3149 }
3150 spin_unlock_bh(&tn->node_list_lock);
3151 }
3152 rcu_read_unlock();
3153 }