]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/tipc/name_distr.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / net / tipc / name_distr.c
1 /*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "link.h"
39 #include "name_distr.h"
40
41 int sysctl_tipc_named_timeout __read_mostly = 2000;
42
43 struct distr_queue_item {
44 struct distr_item i;
45 u32 dtype;
46 u32 node;
47 unsigned long expires;
48 struct list_head next;
49 };
50
51 /**
52 * publ_to_item - add publication info to a publication message
53 */
54 static void publ_to_item(struct distr_item *i, struct publication *p)
55 {
56 i->type = htonl(p->type);
57 i->lower = htonl(p->lower);
58 i->upper = htonl(p->upper);
59 i->ref = htonl(p->ref);
60 i->key = htonl(p->key);
61 }
62
63 /**
64 * named_prepare_buf - allocate & initialize a publication message
65 *
66 * The buffer returned is of size INT_H_SIZE + payload size
67 */
68 static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
69 u32 dest)
70 {
71 struct tipc_net *tn = net_generic(net, tipc_net_id);
72 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
73 struct tipc_msg *msg;
74
75 if (buf != NULL) {
76 msg = buf_msg(buf);
77 tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type,
78 INT_H_SIZE, dest);
79 msg_set_size(msg, INT_H_SIZE + size);
80 }
81 return buf;
82 }
83
84 /**
85 * tipc_named_publish - tell other nodes about a new publication by this node
86 */
87 struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
88 {
89 struct tipc_net *tn = net_generic(net, tipc_net_id);
90 struct sk_buff *buf;
91 struct distr_item *item;
92
93 list_add_tail_rcu(&publ->local_list,
94 &tn->nametbl->publ_list[publ->scope]);
95
96 if (publ->scope == TIPC_NODE_SCOPE)
97 return NULL;
98
99 buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
100 if (!buf) {
101 pr_warn("Publication distribution failure\n");
102 return NULL;
103 }
104
105 item = (struct distr_item *)msg_data(buf_msg(buf));
106 publ_to_item(item, publ);
107 return buf;
108 }
109
110 /**
111 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
112 */
113 struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
114 {
115 struct sk_buff *buf;
116 struct distr_item *item;
117
118 list_del(&publ->local_list);
119
120 if (publ->scope == TIPC_NODE_SCOPE)
121 return NULL;
122
123 buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
124 if (!buf) {
125 pr_warn("Withdrawal distribution failure\n");
126 return NULL;
127 }
128
129 item = (struct distr_item *)msg_data(buf_msg(buf));
130 publ_to_item(item, publ);
131 return buf;
132 }
133
134 /**
135 * named_distribute - prepare name info for bulk distribution to another node
136 * @list: list of messages (buffers) to be returned from this function
137 * @dnode: node to be updated
138 * @pls: linked list of publication items to be packed into buffer chain
139 */
140 static void named_distribute(struct net *net, struct sk_buff_head *list,
141 u32 dnode, struct list_head *pls)
142 {
143 struct publication *publ;
144 struct sk_buff *skb = NULL;
145 struct distr_item *item = NULL;
146 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
147 ITEM_SIZE) * ITEM_SIZE;
148 u32 msg_rem = msg_dsz;
149
150 list_for_each_entry(publ, pls, local_list) {
151 /* Prepare next buffer: */
152 if (!skb) {
153 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
154 dnode);
155 if (!skb) {
156 pr_warn("Bulk publication failure\n");
157 return;
158 }
159 msg_set_bc_ack_invalid(buf_msg(skb), true);
160 item = (struct distr_item *)msg_data(buf_msg(skb));
161 }
162
163 /* Pack publication into message: */
164 publ_to_item(item, publ);
165 item++;
166 msg_rem -= ITEM_SIZE;
167
168 /* Append full buffer to list: */
169 if (!msg_rem) {
170 __skb_queue_tail(list, skb);
171 skb = NULL;
172 msg_rem = msg_dsz;
173 }
174 }
175 if (skb) {
176 msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
177 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
178 __skb_queue_tail(list, skb);
179 }
180 }
181
182 /**
183 * tipc_named_node_up - tell specified node about all publications by this node
184 */
185 void tipc_named_node_up(struct net *net, u32 dnode)
186 {
187 struct tipc_net *tn = net_generic(net, tipc_net_id);
188 struct sk_buff_head head;
189
190 __skb_queue_head_init(&head);
191
192 rcu_read_lock();
193 named_distribute(net, &head, dnode,
194 &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
195 named_distribute(net, &head, dnode,
196 &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
197 rcu_read_unlock();
198
199 tipc_node_xmit(net, &head, dnode, 0);
200 }
201
202 /**
203 * tipc_publ_purge - remove publication associated with a failed node
204 *
205 * Invoked for each publication issued by a newly failed node.
206 * Removes publication structure from name table & deletes it.
207 */
208 static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
209 {
210 struct tipc_net *tn = net_generic(net, tipc_net_id);
211 struct publication *p;
212
213 spin_lock_bh(&tn->nametbl_lock);
214 p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
215 publ->node, publ->ref, publ->key);
216 if (p)
217 tipc_node_unsubscribe(net, &p->nodesub_list, addr);
218 spin_unlock_bh(&tn->nametbl_lock);
219
220 if (p != publ) {
221 pr_err("Unable to remove publication from failed node\n"
222 " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
223 publ->type, publ->lower, publ->node, publ->ref,
224 publ->key);
225 }
226
227 kfree_rcu(p, rcu);
228 }
229
230 /**
231 * tipc_dist_queue_purge - remove deferred updates from a node that went down
232 */
233 static void tipc_dist_queue_purge(struct net *net, u32 addr)
234 {
235 struct tipc_net *tn = net_generic(net, tipc_net_id);
236 struct distr_queue_item *e, *tmp;
237
238 spin_lock_bh(&tn->nametbl_lock);
239 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
240 if (e->node != addr)
241 continue;
242 list_del(&e->next);
243 kfree(e);
244 }
245 spin_unlock_bh(&tn->nametbl_lock);
246 }
247
248 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
249 {
250 struct publication *publ, *tmp;
251
252 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
253 tipc_publ_purge(net, publ, addr);
254 tipc_dist_queue_purge(net, addr);
255 }
256
257 /**
258 * tipc_update_nametbl - try to process a nametable update and notify
259 * subscribers
260 *
261 * tipc_nametbl_lock must be held.
262 * Returns the publication item if successful, otherwise NULL.
263 */
264 static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
265 u32 node, u32 dtype)
266 {
267 struct publication *publ = NULL;
268
269 if (dtype == PUBLICATION) {
270 publ = tipc_nametbl_insert_publ(net, ntohl(i->type),
271 ntohl(i->lower),
272 ntohl(i->upper),
273 TIPC_CLUSTER_SCOPE, node,
274 ntohl(i->ref), ntohl(i->key));
275 if (publ) {
276 tipc_node_subscribe(net, &publ->nodesub_list, node);
277 return true;
278 }
279 } else if (dtype == WITHDRAWAL) {
280 publ = tipc_nametbl_remove_publ(net, ntohl(i->type),
281 ntohl(i->lower),
282 node, ntohl(i->ref),
283 ntohl(i->key));
284 if (publ) {
285 tipc_node_unsubscribe(net, &publ->nodesub_list, node);
286 kfree_rcu(publ, rcu);
287 return true;
288 }
289 } else {
290 pr_warn("Unrecognized name table message received\n");
291 }
292 return false;
293 }
294
295 /**
296 * tipc_named_add_backlog - add a failed name table update to the backlog
297 *
298 */
299 static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
300 u32 type, u32 node)
301 {
302 struct distr_queue_item *e;
303 struct tipc_net *tn = net_generic(net, tipc_net_id);
304 unsigned long now = get_jiffies_64();
305
306 e = kzalloc(sizeof(*e), GFP_ATOMIC);
307 if (!e)
308 return;
309 e->dtype = type;
310 e->node = node;
311 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
312 memcpy(e, i, sizeof(*i));
313 list_add_tail(&e->next, &tn->dist_queue);
314 }
315
316 /**
317 * tipc_named_process_backlog - try to process any pending name table updates
318 * from the network.
319 */
320 void tipc_named_process_backlog(struct net *net)
321 {
322 struct distr_queue_item *e, *tmp;
323 struct tipc_net *tn = net_generic(net, tipc_net_id);
324 char addr[16];
325 unsigned long now = get_jiffies_64();
326
327 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
328 if (time_after(e->expires, now)) {
329 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
330 continue;
331 } else {
332 tipc_addr_string_fill(addr, e->node);
333 pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n",
334 e->dtype, ntohl(e->i.type),
335 ntohl(e->i.lower),
336 ntohl(e->i.upper),
337 addr, ntohl(e->i.key));
338 }
339 list_del(&e->next);
340 kfree(e);
341 }
342 }
343
344 /**
345 * tipc_named_rcv - process name table update messages sent by another node
346 */
347 void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
348 {
349 struct tipc_net *tn = net_generic(net, tipc_net_id);
350 struct tipc_msg *msg;
351 struct distr_item *item;
352 uint count;
353 u32 node;
354 struct sk_buff *skb;
355 int mtype;
356
357 spin_lock_bh(&tn->nametbl_lock);
358 for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
359 skb_linearize(skb);
360 msg = buf_msg(skb);
361 mtype = msg_type(msg);
362 item = (struct distr_item *)msg_data(msg);
363 count = msg_data_sz(msg) / ITEM_SIZE;
364 node = msg_orignode(msg);
365 while (count--) {
366 if (!tipc_update_nametbl(net, item, node, mtype))
367 tipc_named_add_backlog(net, item, mtype, node);
368 item++;
369 }
370 kfree_skb(skb);
371 tipc_named_process_backlog(net);
372 }
373 spin_unlock_bh(&tn->nametbl_lock);
374 }
375
376 /**
377 * tipc_named_reinit - re-initialize local publications
378 *
379 * This routine is called whenever TIPC networking is enabled.
380 * All name table entries published by this node are updated to reflect
381 * the node's new network address.
382 */
383 void tipc_named_reinit(struct net *net)
384 {
385 struct tipc_net *tn = net_generic(net, tipc_net_id);
386 struct publication *publ;
387 int scope;
388
389 spin_lock_bh(&tn->nametbl_lock);
390
391 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
392 list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope],
393 local_list)
394 publ->node = tn->own_addr;
395
396 spin_unlock_bh(&tn->nametbl_lock);
397 }