]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/tipc/node.c
Merge tag 'upstream-4.1-rc1' of git://git.infradead.org/linux-ubifs
[mirror_ubuntu-zesty-kernel.git] / net / tipc / node.c
1 /*
2 * net/tipc/node.c: TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "link.h"
39 #include "node.h"
40 #include "name_distr.h"
41 #include "socket.h"
42
43 static void node_lost_contact(struct tipc_node *n_ptr);
44 static void node_established_contact(struct tipc_node *n_ptr);
45 static void tipc_node_delete(struct tipc_node *node);
46
47 struct tipc_sock_conn {
48 u32 port;
49 u32 peer_port;
50 u32 peer_node;
51 struct list_head list;
52 };
53
54 static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
55 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
56 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
57 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
58 };
59
60 /*
61 * A trivial power-of-two bitmask technique is used for speed, since this
62 * operation is done for every incoming TIPC packet. The number of hash table
63 * entries has been chosen so that no hash chain exceeds 8 nodes and will
64 * usually be much smaller (typically only a single node).
65 */
66 static unsigned int tipc_hashfn(u32 addr)
67 {
68 return addr & (NODE_HTABLE_SIZE - 1);
69 }
70
71 static void tipc_node_kref_release(struct kref *kref)
72 {
73 struct tipc_node *node = container_of(kref, struct tipc_node, kref);
74
75 tipc_node_delete(node);
76 }
77
78 void tipc_node_put(struct tipc_node *node)
79 {
80 kref_put(&node->kref, tipc_node_kref_release);
81 }
82
83 static void tipc_node_get(struct tipc_node *node)
84 {
85 kref_get(&node->kref);
86 }
87
88 /*
89 * tipc_node_find - locate specified node object, if it exists
90 */
91 struct tipc_node *tipc_node_find(struct net *net, u32 addr)
92 {
93 struct tipc_net *tn = net_generic(net, tipc_net_id);
94 struct tipc_node *node;
95
96 if (unlikely(!in_own_cluster_exact(net, addr)))
97 return NULL;
98
99 rcu_read_lock();
100 hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
101 hash) {
102 if (node->addr == addr) {
103 tipc_node_get(node);
104 rcu_read_unlock();
105 return node;
106 }
107 }
108 rcu_read_unlock();
109 return NULL;
110 }
111
112 struct tipc_node *tipc_node_create(struct net *net, u32 addr)
113 {
114 struct tipc_net *tn = net_generic(net, tipc_net_id);
115 struct tipc_node *n_ptr, *temp_node;
116
117 spin_lock_bh(&tn->node_list_lock);
118 n_ptr = tipc_node_find(net, addr);
119 if (n_ptr)
120 goto exit;
121 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
122 if (!n_ptr) {
123 pr_warn("Node creation failed, no memory\n");
124 goto exit;
125 }
126 n_ptr->addr = addr;
127 n_ptr->net = net;
128 kref_init(&n_ptr->kref);
129 spin_lock_init(&n_ptr->lock);
130 INIT_HLIST_NODE(&n_ptr->hash);
131 INIT_LIST_HEAD(&n_ptr->list);
132 INIT_LIST_HEAD(&n_ptr->publ_list);
133 INIT_LIST_HEAD(&n_ptr->conn_sks);
134 __skb_queue_head_init(&n_ptr->bclink.deferdq);
135 hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
136 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
137 if (n_ptr->addr < temp_node->addr)
138 break;
139 }
140 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
141 n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
142 n_ptr->signature = INVALID_NODE_SIG;
143 tipc_node_get(n_ptr);
144 exit:
145 spin_unlock_bh(&tn->node_list_lock);
146 return n_ptr;
147 }
148
149 static void tipc_node_delete(struct tipc_node *node)
150 {
151 list_del_rcu(&node->list);
152 hlist_del_rcu(&node->hash);
153 kfree_rcu(node, rcu);
154 }
155
156 void tipc_node_stop(struct net *net)
157 {
158 struct tipc_net *tn = net_generic(net, tipc_net_id);
159 struct tipc_node *node, *t_node;
160
161 spin_lock_bh(&tn->node_list_lock);
162 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
163 tipc_node_put(node);
164 spin_unlock_bh(&tn->node_list_lock);
165 }
166
167 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
168 {
169 struct tipc_node *node;
170 struct tipc_sock_conn *conn;
171 int err = 0;
172
173 if (in_own_node(net, dnode))
174 return 0;
175
176 node = tipc_node_find(net, dnode);
177 if (!node) {
178 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
179 return -EHOSTUNREACH;
180 }
181 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
182 if (!conn) {
183 err = -EHOSTUNREACH;
184 goto exit;
185 }
186 conn->peer_node = dnode;
187 conn->port = port;
188 conn->peer_port = peer_port;
189
190 tipc_node_lock(node);
191 list_add_tail(&conn->list, &node->conn_sks);
192 tipc_node_unlock(node);
193 exit:
194 tipc_node_put(node);
195 return err;
196 }
197
198 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
199 {
200 struct tipc_node *node;
201 struct tipc_sock_conn *conn, *safe;
202
203 if (in_own_node(net, dnode))
204 return;
205
206 node = tipc_node_find(net, dnode);
207 if (!node)
208 return;
209
210 tipc_node_lock(node);
211 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
212 if (port != conn->port)
213 continue;
214 list_del(&conn->list);
215 kfree(conn);
216 }
217 tipc_node_unlock(node);
218 tipc_node_put(node);
219 }
220
221 /**
222 * tipc_node_link_up - handle addition of link
223 *
224 * Link becomes active (alone or shared) or standby, depending on its priority.
225 */
226 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
227 {
228 struct tipc_link **active = &n_ptr->active_links[0];
229
230 n_ptr->working_links++;
231 n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
232 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
233
234 pr_debug("Established link <%s> on network plane %c\n",
235 l_ptr->name, l_ptr->net_plane);
236
237 if (!active[0]) {
238 active[0] = active[1] = l_ptr;
239 node_established_contact(n_ptr);
240 goto exit;
241 }
242 if (l_ptr->priority < active[0]->priority) {
243 pr_debug("New link <%s> becomes standby\n", l_ptr->name);
244 goto exit;
245 }
246 tipc_link_dup_queue_xmit(active[0], l_ptr);
247 if (l_ptr->priority == active[0]->priority) {
248 active[0] = l_ptr;
249 goto exit;
250 }
251 pr_debug("Old link <%s> becomes standby\n", active[0]->name);
252 if (active[1] != active[0])
253 pr_debug("Old link <%s> becomes standby\n", active[1]->name);
254 active[0] = active[1] = l_ptr;
255 exit:
256 /* Leave room for changeover header when returning 'mtu' to users: */
257 n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
258 n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
259 }
260
261 /**
262 * node_select_active_links - select active link
263 */
264 static void node_select_active_links(struct tipc_node *n_ptr)
265 {
266 struct tipc_link **active = &n_ptr->active_links[0];
267 u32 i;
268 u32 highest_prio = 0;
269
270 active[0] = active[1] = NULL;
271
272 for (i = 0; i < MAX_BEARERS; i++) {
273 struct tipc_link *l_ptr = n_ptr->links[i];
274
275 if (!l_ptr || !tipc_link_is_up(l_ptr) ||
276 (l_ptr->priority < highest_prio))
277 continue;
278
279 if (l_ptr->priority > highest_prio) {
280 highest_prio = l_ptr->priority;
281 active[0] = active[1] = l_ptr;
282 } else {
283 active[1] = l_ptr;
284 }
285 }
286 }
287
288 /**
289 * tipc_node_link_down - handle loss of link
290 */
291 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
292 {
293 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
294 struct tipc_link **active;
295
296 n_ptr->working_links--;
297 n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
298 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
299
300 if (!tipc_link_is_active(l_ptr)) {
301 pr_debug("Lost standby link <%s> on network plane %c\n",
302 l_ptr->name, l_ptr->net_plane);
303 return;
304 }
305 pr_debug("Lost link <%s> on network plane %c\n",
306 l_ptr->name, l_ptr->net_plane);
307
308 active = &n_ptr->active_links[0];
309 if (active[0] == l_ptr)
310 active[0] = active[1];
311 if (active[1] == l_ptr)
312 active[1] = active[0];
313 if (active[0] == l_ptr)
314 node_select_active_links(n_ptr);
315 if (tipc_node_is_up(n_ptr))
316 tipc_link_failover_send_queue(l_ptr);
317 else
318 node_lost_contact(n_ptr);
319
320 /* Leave room for changeover header when returning 'mtu' to users: */
321 if (active[0]) {
322 n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
323 n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
324 return;
325 }
326 /* Loopback link went down? No fragmentation needed from now on. */
327 if (n_ptr->addr == tn->own_addr) {
328 n_ptr->act_mtus[0] = MAX_MSG_SIZE;
329 n_ptr->act_mtus[1] = MAX_MSG_SIZE;
330 }
331 }
332
333 int tipc_node_active_links(struct tipc_node *n_ptr)
334 {
335 return n_ptr->active_links[0] != NULL;
336 }
337
338 int tipc_node_is_up(struct tipc_node *n_ptr)
339 {
340 return tipc_node_active_links(n_ptr);
341 }
342
343 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
344 {
345 n_ptr->links[l_ptr->bearer_id] = l_ptr;
346 n_ptr->link_cnt++;
347 }
348
349 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
350 {
351 int i;
352
353 for (i = 0; i < MAX_BEARERS; i++) {
354 if (l_ptr != n_ptr->links[i])
355 continue;
356 n_ptr->links[i] = NULL;
357 n_ptr->link_cnt--;
358 }
359 }
360
361 static void node_established_contact(struct tipc_node *n_ptr)
362 {
363 n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
364 n_ptr->bclink.oos_state = 0;
365 n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
366 tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
367 }
368
369 static void node_lost_contact(struct tipc_node *n_ptr)
370 {
371 char addr_string[16];
372 struct tipc_sock_conn *conn, *safe;
373 struct list_head *conns = &n_ptr->conn_sks;
374 struct sk_buff *skb;
375 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
376 uint i;
377
378 pr_debug("Lost contact with %s\n",
379 tipc_addr_string_fill(addr_string, n_ptr->addr));
380
381 /* Flush broadcast link info associated with lost node */
382 if (n_ptr->bclink.recv_permitted) {
383 __skb_queue_purge(&n_ptr->bclink.deferdq);
384
385 if (n_ptr->bclink.reasm_buf) {
386 kfree_skb(n_ptr->bclink.reasm_buf);
387 n_ptr->bclink.reasm_buf = NULL;
388 }
389
390 tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
391 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
392
393 n_ptr->bclink.recv_permitted = false;
394 }
395
396 /* Abort any ongoing link failover */
397 for (i = 0; i < MAX_BEARERS; i++) {
398 struct tipc_link *l_ptr = n_ptr->links[i];
399 if (!l_ptr)
400 continue;
401 l_ptr->flags &= ~LINK_FAILINGOVER;
402 l_ptr->failover_checkpt = 0;
403 l_ptr->failover_pkts = 0;
404 kfree_skb(l_ptr->failover_skb);
405 l_ptr->failover_skb = NULL;
406 tipc_link_reset_fragments(l_ptr);
407 }
408
409 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
410
411 /* Prevent re-contact with node until cleanup is done */
412 n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
413
414 /* Notify publications from this node */
415 n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
416
417 /* Notify sockets connected to node */
418 list_for_each_entry_safe(conn, safe, conns, list) {
419 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
420 SHORT_H_SIZE, 0, tn->own_addr,
421 conn->peer_node, conn->port,
422 conn->peer_port, TIPC_ERR_NO_NODE);
423 if (likely(skb)) {
424 skb_queue_tail(n_ptr->inputq, skb);
425 n_ptr->action_flags |= TIPC_MSG_EVT;
426 }
427 list_del(&conn->list);
428 kfree(conn);
429 }
430 }
431
432 /**
433 * tipc_node_get_linkname - get the name of a link
434 *
435 * @bearer_id: id of the bearer
436 * @node: peer node address
437 * @linkname: link name output buffer
438 *
439 * Returns 0 on success
440 */
441 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
442 char *linkname, size_t len)
443 {
444 struct tipc_link *link;
445 int err = -EINVAL;
446 struct tipc_node *node = tipc_node_find(net, addr);
447
448 if (!node)
449 return err;
450
451 if (bearer_id >= MAX_BEARERS)
452 goto exit;
453
454 tipc_node_lock(node);
455 link = node->links[bearer_id];
456 if (link) {
457 strncpy(linkname, link->name, len);
458 err = 0;
459 }
460 exit:
461 tipc_node_unlock(node);
462 tipc_node_put(node);
463 return err;
464 }
465
466 void tipc_node_unlock(struct tipc_node *node)
467 {
468 struct net *net = node->net;
469 u32 addr = 0;
470 u32 flags = node->action_flags;
471 u32 link_id = 0;
472 struct list_head *publ_list;
473 struct sk_buff_head *inputq = node->inputq;
474 struct sk_buff_head *namedq;
475
476 if (likely(!flags || (flags == TIPC_MSG_EVT))) {
477 node->action_flags = 0;
478 spin_unlock_bh(&node->lock);
479 if (flags == TIPC_MSG_EVT)
480 tipc_sk_rcv(net, inputq);
481 return;
482 }
483
484 addr = node->addr;
485 link_id = node->link_id;
486 namedq = node->namedq;
487 publ_list = &node->publ_list;
488
489 node->action_flags &= ~(TIPC_MSG_EVT |
490 TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
491 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
492 TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
493 TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
494
495 spin_unlock_bh(&node->lock);
496
497 if (flags & TIPC_NOTIFY_NODE_DOWN)
498 tipc_publ_notify(net, publ_list, addr);
499
500 if (flags & TIPC_WAKEUP_BCAST_USERS)
501 tipc_bclink_wakeup_users(net);
502
503 if (flags & TIPC_NOTIFY_NODE_UP)
504 tipc_named_node_up(net, addr);
505
506 if (flags & TIPC_NOTIFY_LINK_UP)
507 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
508 TIPC_NODE_SCOPE, link_id, addr);
509
510 if (flags & TIPC_NOTIFY_LINK_DOWN)
511 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
512 link_id, addr);
513
514 if (flags & TIPC_MSG_EVT)
515 tipc_sk_rcv(net, inputq);
516
517 if (flags & TIPC_NAMED_MSG_EVT)
518 tipc_named_rcv(net, namedq);
519
520 if (flags & TIPC_BCAST_MSG_EVT)
521 tipc_bclink_input(net);
522
523 if (flags & TIPC_BCAST_RESET)
524 tipc_link_reset_all(node);
525 }
526
527 /* Caller should hold node lock for the passed node */
528 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
529 {
530 void *hdr;
531 struct nlattr *attrs;
532
533 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
534 NLM_F_MULTI, TIPC_NL_NODE_GET);
535 if (!hdr)
536 return -EMSGSIZE;
537
538 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
539 if (!attrs)
540 goto msg_full;
541
542 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
543 goto attr_msg_full;
544 if (tipc_node_is_up(node))
545 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
546 goto attr_msg_full;
547
548 nla_nest_end(msg->skb, attrs);
549 genlmsg_end(msg->skb, hdr);
550
551 return 0;
552
553 attr_msg_full:
554 nla_nest_cancel(msg->skb, attrs);
555 msg_full:
556 genlmsg_cancel(msg->skb, hdr);
557
558 return -EMSGSIZE;
559 }
560
561 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
562 {
563 int err;
564 struct net *net = sock_net(skb->sk);
565 struct tipc_net *tn = net_generic(net, tipc_net_id);
566 int done = cb->args[0];
567 int last_addr = cb->args[1];
568 struct tipc_node *node;
569 struct tipc_nl_msg msg;
570
571 if (done)
572 return 0;
573
574 msg.skb = skb;
575 msg.portid = NETLINK_CB(cb->skb).portid;
576 msg.seq = cb->nlh->nlmsg_seq;
577
578 rcu_read_lock();
579 if (last_addr) {
580 node = tipc_node_find(net, last_addr);
581 if (!node) {
582 rcu_read_unlock();
583 /* We never set seq or call nl_dump_check_consistent()
584 * this means that setting prev_seq here will cause the
585 * consistence check to fail in the netlink callback
586 * handler. Resulting in the NLMSG_DONE message having
587 * the NLM_F_DUMP_INTR flag set if the node state
588 * changed while we released the lock.
589 */
590 cb->prev_seq = 1;
591 return -EPIPE;
592 }
593 tipc_node_put(node);
594 }
595
596 list_for_each_entry_rcu(node, &tn->node_list, list) {
597 if (last_addr) {
598 if (node->addr == last_addr)
599 last_addr = 0;
600 else
601 continue;
602 }
603
604 tipc_node_lock(node);
605 err = __tipc_nl_add_node(&msg, node);
606 if (err) {
607 last_addr = node->addr;
608 tipc_node_unlock(node);
609 goto out;
610 }
611
612 tipc_node_unlock(node);
613 }
614 done = 1;
615 out:
616 cb->args[0] = done;
617 cb->args[1] = last_addr;
618 rcu_read_unlock();
619
620 return skb->len;
621 }