]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/tipc/cluster.c
[TIPC] License header update
[mirror_ubuntu-bionic-kernel.git] / net / tipc / cluster.c
1 /*
2 * net/tipc/cluster.c: TIPC cluster management routines
3 *
4 * Copyright (c) 2003-2005, Ericsson Research Canada
5 * Copyright (c) 2005, Wind River Systems
6 * Copyright (c) 2005-2006, Ericsson AB
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include "core.h"
39 #include "cluster.h"
40 #include "addr.h"
41 #include "node_subscr.h"
42 #include "link.h"
43 #include "node.h"
44 #include "net.h"
45 #include "msg.h"
46 #include "bearer.h"
47
48 void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
49 u32 lower, u32 upper);
50 struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
51
52 struct node **local_nodes = 0;
53 struct node_map cluster_bcast_nodes = {0,{0,}};
54 u32 highest_allowed_slave = 0;
55
56 struct cluster *cluster_create(u32 addr)
57 {
58 struct _zone *z_ptr;
59 struct cluster *c_ptr;
60 int max_nodes;
61 int alloc;
62
63 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
64 if (c_ptr == NULL)
65 return 0;
66 memset(c_ptr, 0, sizeof(*c_ptr));
67
68 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
69 if (in_own_cluster(addr))
70 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
71 else
72 max_nodes = tipc_max_nodes + 1;
73 alloc = sizeof(void *) * (max_nodes + 1);
74 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
75 if (c_ptr->nodes == NULL) {
76 kfree(c_ptr);
77 return 0;
78 }
79 memset(c_ptr->nodes, 0, alloc);
80 if (in_own_cluster(addr))
81 local_nodes = c_ptr->nodes;
82 c_ptr->highest_slave = LOWEST_SLAVE - 1;
83 c_ptr->highest_node = 0;
84
85 z_ptr = zone_find(tipc_zone(addr));
86 if (z_ptr == NULL) {
87 z_ptr = zone_create(addr);
88 }
89 if (z_ptr != NULL) {
90 zone_attach_cluster(z_ptr, c_ptr);
91 c_ptr->owner = z_ptr;
92 }
93 else {
94 kfree(c_ptr);
95 c_ptr = 0;
96 }
97
98 return c_ptr;
99 }
100
101 void cluster_delete(struct cluster *c_ptr)
102 {
103 u32 n_num;
104
105 if (!c_ptr)
106 return;
107 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
108 node_delete(c_ptr->nodes[n_num]);
109 }
110 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
111 node_delete(c_ptr->nodes[n_num]);
112 }
113 kfree(c_ptr->nodes);
114 kfree(c_ptr);
115 }
116
117 u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
118 {
119 struct node *n_ptr;
120 u32 n_num = tipc_node(addr) + 1;
121
122 if (!c_ptr)
123 return addr;
124 for (; n_num <= c_ptr->highest_node; n_num++) {
125 n_ptr = c_ptr->nodes[n_num];
126 if (n_ptr && node_has_active_links(n_ptr))
127 return n_ptr->addr;
128 }
129 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
130 n_ptr = c_ptr->nodes[n_num];
131 if (n_ptr && node_has_active_links(n_ptr))
132 return n_ptr->addr;
133 }
134 return 0;
135 }
136
137 void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
138 {
139 u32 n_num = tipc_node(n_ptr->addr);
140 u32 max_n_num = tipc_max_nodes;
141
142 if (in_own_cluster(n_ptr->addr))
143 max_n_num = highest_allowed_slave;
144 assert(n_num > 0);
145 assert(n_num <= max_n_num);
146 assert(c_ptr->nodes[n_num] == 0);
147 c_ptr->nodes[n_num] = n_ptr;
148 if (n_num > c_ptr->highest_node)
149 c_ptr->highest_node = n_num;
150 }
151
152 /**
153 * cluster_select_router - select router to a cluster
154 *
155 * Uses deterministic and fair algorithm.
156 */
157
158 u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
159 {
160 u32 n_num;
161 u32 ulim = c_ptr->highest_node;
162 u32 mask;
163 u32 tstart;
164
165 assert(!in_own_cluster(c_ptr->addr));
166 if (!ulim)
167 return 0;
168
169 /* Start entry must be random */
170 mask = tipc_max_nodes;
171 while (mask > ulim)
172 mask >>= 1;
173 tstart = ref & mask;
174 n_num = tstart;
175
176 /* Lookup upwards with wrap-around */
177 do {
178 if (node_is_up(c_ptr->nodes[n_num]))
179 break;
180 } while (++n_num <= ulim);
181 if (n_num > ulim) {
182 n_num = 1;
183 do {
184 if (node_is_up(c_ptr->nodes[n_num]))
185 break;
186 } while (++n_num < tstart);
187 if (n_num == tstart)
188 return 0;
189 }
190 assert(n_num <= ulim);
191 return node_select_router(c_ptr->nodes[n_num], ref);
192 }
193
194 /**
195 * cluster_select_node - select destination node within a remote cluster
196 *
197 * Uses deterministic and fair algorithm.
198 */
199
200 struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
201 {
202 u32 n_num;
203 u32 mask = tipc_max_nodes;
204 u32 start_entry;
205
206 assert(!in_own_cluster(c_ptr->addr));
207 if (!c_ptr->highest_node)
208 return 0;
209
210 /* Start entry must be random */
211 while (mask > c_ptr->highest_node) {
212 mask >>= 1;
213 }
214 start_entry = (selector & mask) ? selector & mask : 1u;
215 assert(start_entry <= c_ptr->highest_node);
216
217 /* Lookup upwards with wrap-around */
218 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
219 if (node_has_active_links(c_ptr->nodes[n_num]))
220 return c_ptr->nodes[n_num];
221 }
222 for (n_num = 1; n_num < start_entry; n_num++) {
223 if (node_has_active_links(c_ptr->nodes[n_num]))
224 return c_ptr->nodes[n_num];
225 }
226 return 0;
227 }
228
229 /*
230 * Routing table management: See description in node.c
231 */
232
233 struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
234 {
235 u32 size = INT_H_SIZE + data_size;
236 struct sk_buff *buf = buf_acquire(size);
237 struct tipc_msg *msg;
238
239 if (buf) {
240 msg = buf_msg(buf);
241 memset((char *)msg, 0, size);
242 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
243 }
244 return buf;
245 }
246
247 void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
248 u32 lower, u32 upper)
249 {
250 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
251 struct tipc_msg *msg;
252
253 if (buf) {
254 msg = buf_msg(buf);
255 msg_set_remote_node(msg, dest);
256 msg_set_type(msg, ROUTE_ADDITION);
257 cluster_multicast(c_ptr, buf, lower, upper);
258 } else {
259 warn("Memory squeeze: broadcast of new route failed\n");
260 }
261 }
262
263 void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
264 u32 lower, u32 upper)
265 {
266 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
267 struct tipc_msg *msg;
268
269 if (buf) {
270 msg = buf_msg(buf);
271 msg_set_remote_node(msg, dest);
272 msg_set_type(msg, ROUTE_REMOVAL);
273 cluster_multicast(c_ptr, buf, lower, upper);
274 } else {
275 warn("Memory squeeze: broadcast of lost route failed\n");
276 }
277 }
278
279 void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
280 {
281 struct sk_buff *buf;
282 struct tipc_msg *msg;
283 u32 highest = c_ptr->highest_slave;
284 u32 n_num;
285 int send = 0;
286
287 assert(!is_slave(dest));
288 assert(in_own_cluster(dest));
289 assert(in_own_cluster(c_ptr->addr));
290 if (highest <= LOWEST_SLAVE)
291 return;
292 buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
293 c_ptr->addr);
294 if (buf) {
295 msg = buf_msg(buf);
296 msg_set_remote_node(msg, c_ptr->addr);
297 msg_set_type(msg, SLAVE_ROUTING_TABLE);
298 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
299 if (c_ptr->nodes[n_num] &&
300 node_has_active_links(c_ptr->nodes[n_num])) {
301 send = 1;
302 msg_set_dataoctet(msg, n_num);
303 }
304 }
305 if (send)
306 link_send(buf, dest, dest);
307 else
308 buf_discard(buf);
309 } else {
310 warn("Memory squeeze: broadcast of lost route failed\n");
311 }
312 }
313
314 void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
315 {
316 struct sk_buff *buf;
317 struct tipc_msg *msg;
318 u32 highest = c_ptr->highest_node;
319 u32 n_num;
320 int send = 0;
321
322 if (in_own_cluster(c_ptr->addr))
323 return;
324 assert(!is_slave(dest));
325 assert(in_own_cluster(dest));
326 highest = c_ptr->highest_node;
327 buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
328 if (buf) {
329 msg = buf_msg(buf);
330 msg_set_remote_node(msg, c_ptr->addr);
331 msg_set_type(msg, EXT_ROUTING_TABLE);
332 for (n_num = 1; n_num <= highest; n_num++) {
333 if (c_ptr->nodes[n_num] &&
334 node_has_active_links(c_ptr->nodes[n_num])) {
335 send = 1;
336 msg_set_dataoctet(msg, n_num);
337 }
338 }
339 if (send)
340 link_send(buf, dest, dest);
341 else
342 buf_discard(buf);
343 } else {
344 warn("Memory squeeze: broadcast of external route failed\n");
345 }
346 }
347
348 void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
349 {
350 struct sk_buff *buf;
351 struct tipc_msg *msg;
352 u32 highest = c_ptr->highest_node;
353 u32 n_num;
354 int send = 0;
355
356 assert(is_slave(dest));
357 assert(in_own_cluster(c_ptr->addr));
358 buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
359 if (buf) {
360 msg = buf_msg(buf);
361 msg_set_remote_node(msg, c_ptr->addr);
362 msg_set_type(msg, LOCAL_ROUTING_TABLE);
363 for (n_num = 1; n_num <= highest; n_num++) {
364 if (c_ptr->nodes[n_num] &&
365 node_has_active_links(c_ptr->nodes[n_num])) {
366 send = 1;
367 msg_set_dataoctet(msg, n_num);
368 }
369 }
370 if (send)
371 link_send(buf, dest, dest);
372 else
373 buf_discard(buf);
374 } else {
375 warn("Memory squeeze: broadcast of local route failed\n");
376 }
377 }
378
379 void cluster_recv_routing_table(struct sk_buff *buf)
380 {
381 struct tipc_msg *msg = buf_msg(buf);
382 struct cluster *c_ptr;
383 struct node *n_ptr;
384 unchar *node_table;
385 u32 table_size;
386 u32 router;
387 u32 rem_node = msg_remote_node(msg);
388 u32 z_num;
389 u32 c_num;
390 u32 n_num;
391
392 c_ptr = cluster_find(rem_node);
393 if (!c_ptr) {
394 c_ptr = cluster_create(rem_node);
395 if (!c_ptr) {
396 buf_discard(buf);
397 return;
398 }
399 }
400
401 node_table = buf->data + msg_hdr_sz(msg);
402 table_size = msg_size(msg) - msg_hdr_sz(msg);
403 router = msg_prevnode(msg);
404 z_num = tipc_zone(rem_node);
405 c_num = tipc_cluster(rem_node);
406
407 switch (msg_type(msg)) {
408 case LOCAL_ROUTING_TABLE:
409 assert(is_slave(tipc_own_addr));
410 case EXT_ROUTING_TABLE:
411 for (n_num = 1; n_num < table_size; n_num++) {
412 if (node_table[n_num]) {
413 u32 addr = tipc_addr(z_num, c_num, n_num);
414 n_ptr = c_ptr->nodes[n_num];
415 if (!n_ptr) {
416 n_ptr = node_create(addr);
417 }
418 if (n_ptr)
419 node_add_router(n_ptr, router);
420 }
421 }
422 break;
423 case SLAVE_ROUTING_TABLE:
424 assert(!is_slave(tipc_own_addr));
425 assert(in_own_cluster(c_ptr->addr));
426 for (n_num = 1; n_num < table_size; n_num++) {
427 if (node_table[n_num]) {
428 u32 slave_num = n_num + LOWEST_SLAVE;
429 u32 addr = tipc_addr(z_num, c_num, slave_num);
430 n_ptr = c_ptr->nodes[slave_num];
431 if (!n_ptr) {
432 n_ptr = node_create(addr);
433 }
434 if (n_ptr)
435 node_add_router(n_ptr, router);
436 }
437 }
438 break;
439 case ROUTE_ADDITION:
440 if (!is_slave(tipc_own_addr)) {
441 assert(!in_own_cluster(c_ptr->addr)
442 || is_slave(rem_node));
443 } else {
444 assert(in_own_cluster(c_ptr->addr)
445 && !is_slave(rem_node));
446 }
447 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
448 if (!n_ptr)
449 n_ptr = node_create(rem_node);
450 if (n_ptr)
451 node_add_router(n_ptr, router);
452 break;
453 case ROUTE_REMOVAL:
454 if (!is_slave(tipc_own_addr)) {
455 assert(!in_own_cluster(c_ptr->addr)
456 || is_slave(rem_node));
457 } else {
458 assert(in_own_cluster(c_ptr->addr)
459 && !is_slave(rem_node));
460 }
461 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
462 if (n_ptr)
463 node_remove_router(n_ptr, router);
464 break;
465 default:
466 assert(!"Illegal routing manager message received\n");
467 }
468 buf_discard(buf);
469 }
470
471 void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
472 {
473 u32 start_entry;
474 u32 tstop;
475 u32 n_num;
476
477 if (is_slave(router))
478 return; /* Slave nodes can not be routers */
479
480 if (in_own_cluster(c_ptr->addr)) {
481 start_entry = LOWEST_SLAVE;
482 tstop = c_ptr->highest_slave;
483 } else {
484 start_entry = 1;
485 tstop = c_ptr->highest_node;
486 }
487
488 for (n_num = start_entry; n_num <= tstop; n_num++) {
489 if (c_ptr->nodes[n_num]) {
490 node_remove_router(c_ptr->nodes[n_num], router);
491 }
492 }
493 }
494
495 /**
496 * cluster_multicast - multicast message to local nodes
497 */
498
499 void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
500 u32 lower, u32 upper)
501 {
502 struct sk_buff *buf_copy;
503 struct node *n_ptr;
504 u32 n_num;
505 u32 tstop;
506
507 assert(lower <= upper);
508 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
509 ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
510 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
511 ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
512 assert(in_own_cluster(c_ptr->addr));
513
514 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
515 if (tstop > upper)
516 tstop = upper;
517 for (n_num = lower; n_num <= tstop; n_num++) {
518 n_ptr = c_ptr->nodes[n_num];
519 if (n_ptr && node_has_active_links(n_ptr)) {
520 buf_copy = skb_copy(buf, GFP_ATOMIC);
521 if (buf_copy == NULL)
522 break;
523 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
524 link_send(buf_copy, n_ptr->addr, n_ptr->addr);
525 }
526 }
527 buf_discard(buf);
528 }
529
530 /**
531 * cluster_broadcast - broadcast message to all nodes within cluster
532 */
533
534 void cluster_broadcast(struct sk_buff *buf)
535 {
536 struct sk_buff *buf_copy;
537 struct cluster *c_ptr;
538 struct node *n_ptr;
539 u32 n_num;
540 u32 tstart;
541 u32 tstop;
542 u32 node_type;
543
544 if (tipc_mode == TIPC_NET_MODE) {
545 c_ptr = cluster_find(tipc_own_addr);
546 assert(in_own_cluster(c_ptr->addr)); /* For now */
547
548 /* Send to standard nodes, then repeat loop sending to slaves */
549 tstart = 1;
550 tstop = c_ptr->highest_node;
551 for (node_type = 1; node_type <= 2; node_type++) {
552 for (n_num = tstart; n_num <= tstop; n_num++) {
553 n_ptr = c_ptr->nodes[n_num];
554 if (n_ptr && node_has_active_links(n_ptr)) {
555 buf_copy = skb_copy(buf, GFP_ATOMIC);
556 if (buf_copy == NULL)
557 goto exit;
558 msg_set_destnode(buf_msg(buf_copy),
559 n_ptr->addr);
560 link_send(buf_copy, n_ptr->addr,
561 n_ptr->addr);
562 }
563 }
564 tstart = LOWEST_SLAVE;
565 tstop = c_ptr->highest_slave;
566 }
567 }
568 exit:
569 buf_discard(buf);
570 }
571
572 int cluster_init(void)
573 {
574 highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
575 return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
576 }
577