]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/batman-adv/translation-table.c
802eacef05b85bd42ffd1738dad7095d6d532067
[mirror_ubuntu-jammy-kernel.git] / net / batman-adv / translation-table.c
1 /*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "hash.h"
27 #include "originator.h"
28
29 static void tt_local_purge(struct work_struct *work);
30 static void _tt_global_del_orig(struct bat_priv *bat_priv,
31 struct tt_global_entry *tt_global_entry,
32 const char *message);
33
34 /* returns 1 if they are the same mac addr */
35 static int compare_ltt(const struct hlist_node *node, const void *data2)
36 {
37 const void *data1 = container_of(node, struct tt_local_entry,
38 hash_entry);
39
40 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
41 }
42
43 /* returns 1 if they are the same mac addr */
44 static int compare_gtt(const struct hlist_node *node, const void *data2)
45 {
46 const void *data1 = container_of(node, struct tt_global_entry,
47 hash_entry);
48
49 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
50 }
51
52 static void tt_local_start_timer(struct bat_priv *bat_priv)
53 {
54 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
55 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
56 }
57
58 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
59 const void *data)
60 {
61 struct hashtable_t *hash = bat_priv->tt_local_hash;
62 struct hlist_head *head;
63 struct hlist_node *node;
64 struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
65 int index;
66
67 if (!hash)
68 return NULL;
69
70 index = choose_orig(data, hash->size);
71 head = &hash->table[index];
72
73 rcu_read_lock();
74 hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
75 if (!compare_eth(tt_local_entry, data))
76 continue;
77
78 tt_local_entry_tmp = tt_local_entry;
79 break;
80 }
81 rcu_read_unlock();
82
83 return tt_local_entry_tmp;
84 }
85
86 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
87 const void *data)
88 {
89 struct hashtable_t *hash = bat_priv->tt_global_hash;
90 struct hlist_head *head;
91 struct hlist_node *node;
92 struct tt_global_entry *tt_global_entry;
93 struct tt_global_entry *tt_global_entry_tmp = NULL;
94 int index;
95
96 if (!hash)
97 return NULL;
98
99 index = choose_orig(data, hash->size);
100 head = &hash->table[index];
101
102 rcu_read_lock();
103 hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
104 if (!compare_eth(tt_global_entry, data))
105 continue;
106
107 tt_global_entry_tmp = tt_global_entry;
108 break;
109 }
110 rcu_read_unlock();
111
112 return tt_global_entry_tmp;
113 }
114
115 int tt_local_init(struct bat_priv *bat_priv)
116 {
117 if (bat_priv->tt_local_hash)
118 return 1;
119
120 bat_priv->tt_local_hash = hash_new(1024);
121
122 if (!bat_priv->tt_local_hash)
123 return 0;
124
125 atomic_set(&bat_priv->tt_local_changed, 0);
126 tt_local_start_timer(bat_priv);
127
128 return 1;
129 }
130
131 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
132 {
133 struct bat_priv *bat_priv = netdev_priv(soft_iface);
134 struct tt_local_entry *tt_local_entry;
135 struct tt_global_entry *tt_global_entry;
136 int required_bytes;
137
138 spin_lock_bh(&bat_priv->tt_lhash_lock);
139 tt_local_entry = tt_local_hash_find(bat_priv, addr);
140 spin_unlock_bh(&bat_priv->tt_lhash_lock);
141
142 if (tt_local_entry) {
143 tt_local_entry->last_seen = jiffies;
144 return;
145 }
146
147 /* only announce as many hosts as possible in the batman-packet and
148 space in batman_packet->num_tt That also should give a limit to
149 MAC-flooding. */
150 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
151 required_bytes += BAT_PACKET_LEN;
152
153 if ((required_bytes > ETH_DATA_LEN) ||
154 (atomic_read(&bat_priv->aggregated_ogms) &&
155 required_bytes > MAX_AGGREGATION_BYTES) ||
156 (bat_priv->num_local_tt + 1 > 255)) {
157 bat_dbg(DBG_ROUTES, bat_priv,
158 "Can't add new local tt entry (%pM): "
159 "number of local tt entries exceeds packet size\n",
160 addr);
161 return;
162 }
163
164 bat_dbg(DBG_ROUTES, bat_priv,
165 "Creating new local tt entry: %pM\n", addr);
166
167 tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
168 if (!tt_local_entry)
169 return;
170
171 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
172 tt_local_entry->last_seen = jiffies;
173
174 /* the batman interface mac address should never be purged */
175 if (compare_eth(addr, soft_iface->dev_addr))
176 tt_local_entry->never_purge = 1;
177 else
178 tt_local_entry->never_purge = 0;
179
180 spin_lock_bh(&bat_priv->tt_lhash_lock);
181
182 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
183 tt_local_entry, &tt_local_entry->hash_entry);
184 bat_priv->num_local_tt++;
185 atomic_set(&bat_priv->tt_local_changed, 1);
186
187 spin_unlock_bh(&bat_priv->tt_lhash_lock);
188
189 /* remove address from global hash if present */
190 spin_lock_bh(&bat_priv->tt_ghash_lock);
191
192 tt_global_entry = tt_global_hash_find(bat_priv, addr);
193
194 if (tt_global_entry)
195 _tt_global_del_orig(bat_priv, tt_global_entry,
196 "local tt received");
197
198 spin_unlock_bh(&bat_priv->tt_ghash_lock);
199 }
200
201 int tt_local_fill_buffer(struct bat_priv *bat_priv,
202 unsigned char *buff, int buff_len)
203 {
204 struct hashtable_t *hash = bat_priv->tt_local_hash;
205 struct tt_local_entry *tt_local_entry;
206 struct hlist_node *node;
207 struct hlist_head *head;
208 int i, count = 0;
209
210 spin_lock_bh(&bat_priv->tt_lhash_lock);
211
212 for (i = 0; i < hash->size; i++) {
213 head = &hash->table[i];
214
215 rcu_read_lock();
216 hlist_for_each_entry_rcu(tt_local_entry, node,
217 head, hash_entry) {
218 if (buff_len < (count + 1) * ETH_ALEN)
219 break;
220
221 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
222 ETH_ALEN);
223
224 count++;
225 }
226 rcu_read_unlock();
227 }
228
229 /* if we did not get all new local tts see you next time ;-) */
230 if (count == bat_priv->num_local_tt)
231 atomic_set(&bat_priv->tt_local_changed, 0);
232
233 spin_unlock_bh(&bat_priv->tt_lhash_lock);
234 return count;
235 }
236
237 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
238 {
239 struct net_device *net_dev = (struct net_device *)seq->private;
240 struct bat_priv *bat_priv = netdev_priv(net_dev);
241 struct hashtable_t *hash = bat_priv->tt_local_hash;
242 struct tt_local_entry *tt_local_entry;
243 struct hard_iface *primary_if;
244 struct hlist_node *node;
245 struct hlist_head *head;
246 size_t buf_size, pos;
247 char *buff;
248 int i, ret = 0;
249
250 primary_if = primary_if_get_selected(bat_priv);
251 if (!primary_if) {
252 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
253 "please specify interfaces to enable it\n",
254 net_dev->name);
255 goto out;
256 }
257
258 if (primary_if->if_status != IF_ACTIVE) {
259 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
260 "primary interface not active\n",
261 net_dev->name);
262 goto out;
263 }
264
265 seq_printf(seq, "Locally retrieved addresses (from %s) "
266 "announced via TT:\n",
267 net_dev->name);
268
269 spin_lock_bh(&bat_priv->tt_lhash_lock);
270
271 buf_size = 1;
272 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
273 for (i = 0; i < hash->size; i++) {
274 head = &hash->table[i];
275
276 rcu_read_lock();
277 __hlist_for_each_rcu(node, head)
278 buf_size += 21;
279 rcu_read_unlock();
280 }
281
282 buff = kmalloc(buf_size, GFP_ATOMIC);
283 if (!buff) {
284 spin_unlock_bh(&bat_priv->tt_lhash_lock);
285 ret = -ENOMEM;
286 goto out;
287 }
288
289 buff[0] = '\0';
290 pos = 0;
291
292 for (i = 0; i < hash->size; i++) {
293 head = &hash->table[i];
294
295 rcu_read_lock();
296 hlist_for_each_entry_rcu(tt_local_entry, node,
297 head, hash_entry) {
298 pos += snprintf(buff + pos, 22, " * %pM\n",
299 tt_local_entry->addr);
300 }
301 rcu_read_unlock();
302 }
303
304 spin_unlock_bh(&bat_priv->tt_lhash_lock);
305
306 seq_printf(seq, "%s", buff);
307 kfree(buff);
308 out:
309 if (primary_if)
310 hardif_free_ref(primary_if);
311 return ret;
312 }
313
314 static void _tt_local_del(struct hlist_node *node, void *arg)
315 {
316 struct bat_priv *bat_priv = arg;
317 void *data = container_of(node, struct tt_local_entry, hash_entry);
318
319 kfree(data);
320 bat_priv->num_local_tt--;
321 atomic_set(&bat_priv->tt_local_changed, 1);
322 }
323
324 static void tt_local_del(struct bat_priv *bat_priv,
325 struct tt_local_entry *tt_local_entry,
326 const char *message)
327 {
328 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
329 tt_local_entry->addr, message);
330
331 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
332 tt_local_entry->addr);
333 _tt_local_del(&tt_local_entry->hash_entry, bat_priv);
334 }
335
336 void tt_local_remove(struct bat_priv *bat_priv,
337 const uint8_t *addr, const char *message)
338 {
339 struct tt_local_entry *tt_local_entry;
340
341 spin_lock_bh(&bat_priv->tt_lhash_lock);
342
343 tt_local_entry = tt_local_hash_find(bat_priv, addr);
344
345 if (tt_local_entry)
346 tt_local_del(bat_priv, tt_local_entry, message);
347
348 spin_unlock_bh(&bat_priv->tt_lhash_lock);
349 }
350
351 static void tt_local_purge(struct work_struct *work)
352 {
353 struct delayed_work *delayed_work =
354 container_of(work, struct delayed_work, work);
355 struct bat_priv *bat_priv =
356 container_of(delayed_work, struct bat_priv, tt_work);
357 struct hashtable_t *hash = bat_priv->tt_local_hash;
358 struct tt_local_entry *tt_local_entry;
359 struct hlist_node *node, *node_tmp;
360 struct hlist_head *head;
361 unsigned long timeout;
362 int i;
363
364 spin_lock_bh(&bat_priv->tt_lhash_lock);
365
366 for (i = 0; i < hash->size; i++) {
367 head = &hash->table[i];
368
369 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
370 head, hash_entry) {
371 if (tt_local_entry->never_purge)
372 continue;
373
374 timeout = tt_local_entry->last_seen;
375 timeout += TT_LOCAL_TIMEOUT * HZ;
376
377 if (time_before(jiffies, timeout))
378 continue;
379
380 tt_local_del(bat_priv, tt_local_entry,
381 "address timed out");
382 }
383 }
384
385 spin_unlock_bh(&bat_priv->tt_lhash_lock);
386 tt_local_start_timer(bat_priv);
387 }
388
389 void tt_local_free(struct bat_priv *bat_priv)
390 {
391 if (!bat_priv->tt_local_hash)
392 return;
393
394 cancel_delayed_work_sync(&bat_priv->tt_work);
395 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
396 bat_priv->tt_local_hash = NULL;
397 }
398
399 int tt_global_init(struct bat_priv *bat_priv)
400 {
401 if (bat_priv->tt_global_hash)
402 return 1;
403
404 bat_priv->tt_global_hash = hash_new(1024);
405
406 if (!bat_priv->tt_global_hash)
407 return 0;
408
409 return 1;
410 }
411
412 void tt_global_add_orig(struct bat_priv *bat_priv,
413 struct orig_node *orig_node,
414 const unsigned char *tt_buff, int tt_buff_len)
415 {
416 struct tt_global_entry *tt_global_entry;
417 struct tt_local_entry *tt_local_entry;
418 int tt_buff_count = 0;
419 const unsigned char *tt_ptr;
420
421 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
422 spin_lock_bh(&bat_priv->tt_ghash_lock);
423
424 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
425 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
426
427 if (!tt_global_entry) {
428 spin_unlock_bh(&bat_priv->tt_ghash_lock);
429
430 tt_global_entry =
431 kmalloc(sizeof(struct tt_global_entry),
432 GFP_ATOMIC);
433
434 if (!tt_global_entry)
435 break;
436
437 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
438
439 bat_dbg(DBG_ROUTES, bat_priv,
440 "Creating new global tt entry: "
441 "%pM (via %pM)\n",
442 tt_global_entry->addr, orig_node->orig);
443
444 spin_lock_bh(&bat_priv->tt_ghash_lock);
445 hash_add(bat_priv->tt_global_hash, compare_gtt,
446 choose_orig, tt_global_entry,
447 &tt_global_entry->hash_entry);
448
449 }
450
451 tt_global_entry->orig_node = orig_node;
452 spin_unlock_bh(&bat_priv->tt_ghash_lock);
453
454 /* remove address from local hash if present */
455 spin_lock_bh(&bat_priv->tt_lhash_lock);
456
457 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
458 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
459
460 if (tt_local_entry)
461 tt_local_del(bat_priv, tt_local_entry,
462 "global tt received");
463
464 spin_unlock_bh(&bat_priv->tt_lhash_lock);
465
466 tt_buff_count++;
467 }
468
469 /* initialize, and overwrite if malloc succeeds */
470 orig_node->tt_buff = NULL;
471 orig_node->tt_buff_len = 0;
472
473 if (tt_buff_len > 0) {
474 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
475 if (orig_node->tt_buff) {
476 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
477 orig_node->tt_buff_len = tt_buff_len;
478 }
479 }
480 }
481
482 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
483 {
484 struct net_device *net_dev = (struct net_device *)seq->private;
485 struct bat_priv *bat_priv = netdev_priv(net_dev);
486 struct hashtable_t *hash = bat_priv->tt_global_hash;
487 struct tt_global_entry *tt_global_entry;
488 struct hard_iface *primary_if;
489 struct hlist_node *node;
490 struct hlist_head *head;
491 size_t buf_size, pos;
492 char *buff;
493 int i, ret = 0;
494
495 primary_if = primary_if_get_selected(bat_priv);
496 if (!primary_if) {
497 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
498 "specify interfaces to enable it\n",
499 net_dev->name);
500 goto out;
501 }
502
503 if (primary_if->if_status != IF_ACTIVE) {
504 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
505 "primary interface not active\n",
506 net_dev->name);
507 goto out;
508 }
509
510 seq_printf(seq,
511 "Globally announced TT entries received via the mesh %s\n",
512 net_dev->name);
513
514 spin_lock_bh(&bat_priv->tt_ghash_lock);
515
516 buf_size = 1;
517 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
518 for (i = 0; i < hash->size; i++) {
519 head = &hash->table[i];
520
521 rcu_read_lock();
522 __hlist_for_each_rcu(node, head)
523 buf_size += 43;
524 rcu_read_unlock();
525 }
526
527 buff = kmalloc(buf_size, GFP_ATOMIC);
528 if (!buff) {
529 spin_unlock_bh(&bat_priv->tt_ghash_lock);
530 ret = -ENOMEM;
531 goto out;
532 }
533 buff[0] = '\0';
534 pos = 0;
535
536 for (i = 0; i < hash->size; i++) {
537 head = &hash->table[i];
538
539 rcu_read_lock();
540 hlist_for_each_entry_rcu(tt_global_entry, node,
541 head, hash_entry) {
542 pos += snprintf(buff + pos, 44,
543 " * %pM via %pM\n",
544 tt_global_entry->addr,
545 tt_global_entry->orig_node->orig);
546 }
547 rcu_read_unlock();
548 }
549
550 spin_unlock_bh(&bat_priv->tt_ghash_lock);
551
552 seq_printf(seq, "%s", buff);
553 kfree(buff);
554 out:
555 if (primary_if)
556 hardif_free_ref(primary_if);
557 return ret;
558 }
559
560 static void _tt_global_del_orig(struct bat_priv *bat_priv,
561 struct tt_global_entry *tt_global_entry,
562 const char *message)
563 {
564 bat_dbg(DBG_ROUTES, bat_priv,
565 "Deleting global tt entry %pM (via %pM): %s\n",
566 tt_global_entry->addr, tt_global_entry->orig_node->orig,
567 message);
568
569 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
570 tt_global_entry->addr);
571 kfree(tt_global_entry);
572 }
573
574 void tt_global_del_orig(struct bat_priv *bat_priv,
575 struct orig_node *orig_node, const char *message)
576 {
577 struct tt_global_entry *tt_global_entry;
578 int tt_buff_count = 0;
579 unsigned char *tt_ptr;
580
581 if (orig_node->tt_buff_len == 0)
582 return;
583
584 spin_lock_bh(&bat_priv->tt_ghash_lock);
585
586 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
587 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
588 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
589
590 if ((tt_global_entry) &&
591 (tt_global_entry->orig_node == orig_node))
592 _tt_global_del_orig(bat_priv, tt_global_entry,
593 message);
594
595 tt_buff_count++;
596 }
597
598 spin_unlock_bh(&bat_priv->tt_ghash_lock);
599
600 orig_node->tt_buff_len = 0;
601 kfree(orig_node->tt_buff);
602 orig_node->tt_buff = NULL;
603 }
604
605 static void tt_global_del(struct hlist_node *node, void *arg)
606 {
607 void *data = container_of(node, struct tt_global_entry, hash_entry);
608
609 kfree(data);
610 }
611
612 void tt_global_free(struct bat_priv *bat_priv)
613 {
614 if (!bat_priv->tt_global_hash)
615 return;
616
617 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
618 bat_priv->tt_global_hash = NULL;
619 }
620
621 struct orig_node *transtable_search(struct bat_priv *bat_priv,
622 const uint8_t *addr)
623 {
624 struct tt_global_entry *tt_global_entry;
625 struct orig_node *orig_node = NULL;
626
627 spin_lock_bh(&bat_priv->tt_ghash_lock);
628 tt_global_entry = tt_global_hash_find(bat_priv, addr);
629
630 if (!tt_global_entry)
631 goto out;
632
633 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
634 goto out;
635
636 orig_node = tt_global_entry->orig_node;
637
638 out:
639 spin_unlock_bh(&bat_priv->tt_ghash_lock);
640 return orig_node;
641 }