]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/batman-adv/translation-table.c
batman-adv: Prefix hard-interface non-static functions with batadv_
[mirror_ubuntu-zesty-kernel.git] / net / batman-adv / translation-table.c
1 /*
2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "send.h"
27 #include "hash.h"
28 #include "originator.h"
29 #include "routing.h"
30 #include "bridge_loop_avoidance.h"
31
32 #include <linux/crc16.h>
33
34 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
35 struct orig_node *orig_node);
36 static void tt_purge(struct work_struct *work);
37 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
38
39 /* returns 1 if they are the same mac addr */
40 static int compare_tt(const struct hlist_node *node, const void *data2)
41 {
42 const void *data1 = container_of(node, struct tt_common_entry,
43 hash_entry);
44
45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46 }
47
48 static void tt_start_timer(struct bat_priv *bat_priv)
49 {
50 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
51 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
52 msecs_to_jiffies(5000));
53 }
54
55 static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
56 const void *data)
57 {
58 struct hlist_head *head;
59 struct hlist_node *node;
60 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
61 uint32_t index;
62
63 if (!hash)
64 return NULL;
65
66 index = choose_orig(data, hash->size);
67 head = &hash->table[index];
68
69 rcu_read_lock();
70 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
71 if (!compare_eth(tt_common_entry, data))
72 continue;
73
74 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
75 continue;
76
77 tt_common_entry_tmp = tt_common_entry;
78 break;
79 }
80 rcu_read_unlock();
81
82 return tt_common_entry_tmp;
83 }
84
85 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
86 const void *data)
87 {
88 struct tt_common_entry *tt_common_entry;
89 struct tt_local_entry *tt_local_entry = NULL;
90
91 tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
92 if (tt_common_entry)
93 tt_local_entry = container_of(tt_common_entry,
94 struct tt_local_entry, common);
95 return tt_local_entry;
96 }
97
98 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
99 const void *data)
100 {
101 struct tt_common_entry *tt_common_entry;
102 struct tt_global_entry *tt_global_entry = NULL;
103
104 tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
105 if (tt_common_entry)
106 tt_global_entry = container_of(tt_common_entry,
107 struct tt_global_entry, common);
108 return tt_global_entry;
109
110 }
111
112 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
113 {
114 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115 kfree_rcu(tt_local_entry, common.rcu);
116 }
117
118 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
119 {
120 struct tt_common_entry *tt_common_entry;
121 struct tt_global_entry *tt_global_entry;
122
123 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
125 common);
126
127 kfree(tt_global_entry);
128 }
129
130 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
131 {
132 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133 tt_global_del_orig_list(tt_global_entry);
134 call_rcu(&tt_global_entry->common.rcu,
135 tt_global_entry_free_rcu);
136 }
137 }
138
139 static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140 {
141 struct tt_orig_list_entry *orig_entry;
142
143 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
144 atomic_dec(&orig_entry->orig_node->tt_size);
145 orig_node_free_ref(orig_entry->orig_node);
146 kfree(orig_entry);
147 }
148
149 static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
150 {
151 call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
152 }
153
154 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
155 uint8_t flags)
156 {
157 struct tt_change_node *tt_change_node;
158
159 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
160
161 if (!tt_change_node)
162 return;
163
164 tt_change_node->change.flags = flags;
165 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
166
167 spin_lock_bh(&bat_priv->tt_changes_list_lock);
168 /* track the change in the OGMinterval list */
169 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
170 atomic_inc(&bat_priv->tt_local_changes);
171 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
172
173 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
174 }
175
176 int tt_len(int changes_num)
177 {
178 return changes_num * sizeof(struct tt_change);
179 }
180
181 static int tt_local_init(struct bat_priv *bat_priv)
182 {
183 if (bat_priv->tt_local_hash)
184 return 0;
185
186 bat_priv->tt_local_hash = hash_new(1024);
187
188 if (!bat_priv->tt_local_hash)
189 return -ENOMEM;
190
191 return 0;
192 }
193
194 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
195 int ifindex)
196 {
197 struct bat_priv *bat_priv = netdev_priv(soft_iface);
198 struct tt_local_entry *tt_local_entry = NULL;
199 struct tt_global_entry *tt_global_entry = NULL;
200 struct hlist_head *head;
201 struct hlist_node *node;
202 struct tt_orig_list_entry *orig_entry;
203 int hash_added;
204
205 tt_local_entry = tt_local_hash_find(bat_priv, addr);
206
207 if (tt_local_entry) {
208 tt_local_entry->last_seen = jiffies;
209 /* possibly unset the TT_CLIENT_PENDING flag */
210 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
211 goto out;
212 }
213
214 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
215 if (!tt_local_entry)
216 goto out;
217
218 bat_dbg(DBG_TT, bat_priv,
219 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
220 (uint8_t)atomic_read(&bat_priv->ttvn));
221
222 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
223 tt_local_entry->common.flags = NO_FLAGS;
224 if (batadv_is_wifi_iface(ifindex))
225 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
226 atomic_set(&tt_local_entry->common.refcount, 2);
227 tt_local_entry->last_seen = jiffies;
228
229 /* the batman interface mac address should never be purged */
230 if (compare_eth(addr, soft_iface->dev_addr))
231 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
232
233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry->common.flags |= TT_CLIENT_NEW;
237
238 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
239 &tt_local_entry->common,
240 &tt_local_entry->common.hash_entry);
241
242 if (unlikely(hash_added != 0)) {
243 /* remove the reference for the hash */
244 tt_local_entry_free_ref(tt_local_entry);
245 goto out;
246 }
247
248 tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
249
250 /* remove address from global hash if present */
251 tt_global_entry = tt_global_hash_find(bat_priv, addr);
252
253 /* Check whether it is a roaming! */
254 if (tt_global_entry) {
255 /* These node are probably going to update their tt table */
256 head = &tt_global_entry->orig_list;
257 rcu_read_lock();
258 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
259 orig_entry->orig_node->tt_poss_change = true;
260
261 send_roam_adv(bat_priv, tt_global_entry->common.addr,
262 orig_entry->orig_node);
263 }
264 rcu_read_unlock();
265 /* The global entry has to be marked as ROAMING and
266 * has to be kept for consistency purpose
267 */
268 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
269 tt_global_entry->roam_at = jiffies;
270 }
271 out:
272 if (tt_local_entry)
273 tt_local_entry_free_ref(tt_local_entry);
274 if (tt_global_entry)
275 tt_global_entry_free_ref(tt_global_entry);
276 }
277
278 static void tt_realloc_packet_buff(unsigned char **packet_buff,
279 int *packet_buff_len, int min_packet_len,
280 int new_packet_len)
281 {
282 unsigned char *new_buff;
283
284 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
285
286 /* keep old buffer if kmalloc should fail */
287 if (new_buff) {
288 memcpy(new_buff, *packet_buff, min_packet_len);
289 kfree(*packet_buff);
290 *packet_buff = new_buff;
291 *packet_buff_len = new_packet_len;
292 }
293 }
294
295 static void tt_prepare_packet_buff(struct bat_priv *bat_priv,
296 unsigned char **packet_buff,
297 int *packet_buff_len, int min_packet_len)
298 {
299 struct hard_iface *primary_if;
300 int req_len;
301
302 primary_if = primary_if_get_selected(bat_priv);
303
304 req_len = min_packet_len;
305 req_len += tt_len(atomic_read(&bat_priv->tt_local_changes));
306
307 /* if we have too many changes for one packet don't send any
308 * and wait for the tt table request which will be fragmented
309 */
310 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
311 req_len = min_packet_len;
312
313 tt_realloc_packet_buff(packet_buff, packet_buff_len,
314 min_packet_len, req_len);
315
316 if (primary_if)
317 hardif_free_ref(primary_if);
318 }
319
320 static int tt_changes_fill_buff(struct bat_priv *bat_priv,
321 unsigned char **packet_buff,
322 int *packet_buff_len, int min_packet_len)
323 {
324 struct tt_change_node *entry, *safe;
325 int count = 0, tot_changes = 0, new_len;
326 unsigned char *tt_buff;
327
328 tt_prepare_packet_buff(bat_priv, packet_buff,
329 packet_buff_len, min_packet_len);
330
331 new_len = *packet_buff_len - min_packet_len;
332 tt_buff = *packet_buff + min_packet_len;
333
334 if (new_len > 0)
335 tot_changes = new_len / tt_len(1);
336
337 spin_lock_bh(&bat_priv->tt_changes_list_lock);
338 atomic_set(&bat_priv->tt_local_changes, 0);
339
340 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
341 list) {
342 if (count < tot_changes) {
343 memcpy(tt_buff + tt_len(count),
344 &entry->change, sizeof(struct tt_change));
345 count++;
346 }
347 list_del(&entry->list);
348 kfree(entry);
349 }
350 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
351
352 /* Keep the buffer for possible tt_request */
353 spin_lock_bh(&bat_priv->tt_buff_lock);
354 kfree(bat_priv->tt_buff);
355 bat_priv->tt_buff_len = 0;
356 bat_priv->tt_buff = NULL;
357 /* check whether this new OGM has no changes due to size problems */
358 if (new_len > 0) {
359 /* if kmalloc() fails we will reply with the full table
360 * instead of providing the diff
361 */
362 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
363 if (bat_priv->tt_buff) {
364 memcpy(bat_priv->tt_buff, tt_buff, new_len);
365 bat_priv->tt_buff_len = new_len;
366 }
367 }
368 spin_unlock_bh(&bat_priv->tt_buff_lock);
369
370 return count;
371 }
372
373 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
374 {
375 struct net_device *net_dev = (struct net_device *)seq->private;
376 struct bat_priv *bat_priv = netdev_priv(net_dev);
377 struct hashtable_t *hash = bat_priv->tt_local_hash;
378 struct tt_common_entry *tt_common_entry;
379 struct hard_iface *primary_if;
380 struct hlist_node *node;
381 struct hlist_head *head;
382 uint32_t i;
383 int ret = 0;
384
385 primary_if = primary_if_get_selected(bat_priv);
386 if (!primary_if) {
387 ret = seq_printf(seq,
388 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
389 net_dev->name);
390 goto out;
391 }
392
393 if (primary_if->if_status != IF_ACTIVE) {
394 ret = seq_printf(seq,
395 "BATMAN mesh %s disabled - primary interface not active\n",
396 net_dev->name);
397 goto out;
398 }
399
400 seq_printf(seq,
401 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
402 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
403
404 for (i = 0; i < hash->size; i++) {
405 head = &hash->table[i];
406
407 rcu_read_lock();
408 hlist_for_each_entry_rcu(tt_common_entry, node,
409 head, hash_entry) {
410 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
411 tt_common_entry->addr,
412 (tt_common_entry->flags &
413 TT_CLIENT_ROAM ? 'R' : '.'),
414 (tt_common_entry->flags &
415 TT_CLIENT_NOPURGE ? 'P' : '.'),
416 (tt_common_entry->flags &
417 TT_CLIENT_NEW ? 'N' : '.'),
418 (tt_common_entry->flags &
419 TT_CLIENT_PENDING ? 'X' : '.'),
420 (tt_common_entry->flags &
421 TT_CLIENT_WIFI ? 'W' : '.'));
422 }
423 rcu_read_unlock();
424 }
425 out:
426 if (primary_if)
427 hardif_free_ref(primary_if);
428 return ret;
429 }
430
431 static void tt_local_set_pending(struct bat_priv *bat_priv,
432 struct tt_local_entry *tt_local_entry,
433 uint16_t flags, const char *message)
434 {
435 tt_local_event(bat_priv, tt_local_entry->common.addr,
436 tt_local_entry->common.flags | flags);
437
438 /* The local client has to be marked as "pending to be removed" but has
439 * to be kept in the table in order to send it in a full table
440 * response issued before the net ttvn increment (consistency check) */
441 tt_local_entry->common.flags |= TT_CLIENT_PENDING;
442
443 bat_dbg(DBG_TT, bat_priv,
444 "Local tt entry (%pM) pending to be removed: %s\n",
445 tt_local_entry->common.addr, message);
446 }
447
448 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
449 const char *message, bool roaming)
450 {
451 struct tt_local_entry *tt_local_entry = NULL;
452
453 tt_local_entry = tt_local_hash_find(bat_priv, addr);
454 if (!tt_local_entry)
455 goto out;
456
457 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
458 (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
459 out:
460 if (tt_local_entry)
461 tt_local_entry_free_ref(tt_local_entry);
462 }
463
464 static void tt_local_purge(struct bat_priv *bat_priv)
465 {
466 struct hashtable_t *hash = bat_priv->tt_local_hash;
467 struct tt_local_entry *tt_local_entry;
468 struct tt_common_entry *tt_common_entry;
469 struct hlist_node *node, *node_tmp;
470 struct hlist_head *head;
471 spinlock_t *list_lock; /* protects write access to the hash lists */
472 uint32_t i;
473
474 for (i = 0; i < hash->size; i++) {
475 head = &hash->table[i];
476 list_lock = &hash->list_locks[i];
477
478 spin_lock_bh(list_lock);
479 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
480 head, hash_entry) {
481 tt_local_entry = container_of(tt_common_entry,
482 struct tt_local_entry,
483 common);
484 if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
485 continue;
486
487 /* entry already marked for deletion */
488 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
489 continue;
490
491 if (!has_timed_out(tt_local_entry->last_seen,
492 TT_LOCAL_TIMEOUT))
493 continue;
494
495 tt_local_set_pending(bat_priv, tt_local_entry,
496 TT_CLIENT_DEL, "timed out");
497 }
498 spin_unlock_bh(list_lock);
499 }
500
501 }
502
503 static void tt_local_table_free(struct bat_priv *bat_priv)
504 {
505 struct hashtable_t *hash;
506 spinlock_t *list_lock; /* protects write access to the hash lists */
507 struct tt_common_entry *tt_common_entry;
508 struct tt_local_entry *tt_local_entry;
509 struct hlist_node *node, *node_tmp;
510 struct hlist_head *head;
511 uint32_t i;
512
513 if (!bat_priv->tt_local_hash)
514 return;
515
516 hash = bat_priv->tt_local_hash;
517
518 for (i = 0; i < hash->size; i++) {
519 head = &hash->table[i];
520 list_lock = &hash->list_locks[i];
521
522 spin_lock_bh(list_lock);
523 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
524 head, hash_entry) {
525 hlist_del_rcu(node);
526 tt_local_entry = container_of(tt_common_entry,
527 struct tt_local_entry,
528 common);
529 tt_local_entry_free_ref(tt_local_entry);
530 }
531 spin_unlock_bh(list_lock);
532 }
533
534 hash_destroy(hash);
535
536 bat_priv->tt_local_hash = NULL;
537 }
538
539 static int tt_global_init(struct bat_priv *bat_priv)
540 {
541 if (bat_priv->tt_global_hash)
542 return 0;
543
544 bat_priv->tt_global_hash = hash_new(1024);
545
546 if (!bat_priv->tt_global_hash)
547 return -ENOMEM;
548
549 return 0;
550 }
551
552 static void tt_changes_list_free(struct bat_priv *bat_priv)
553 {
554 struct tt_change_node *entry, *safe;
555
556 spin_lock_bh(&bat_priv->tt_changes_list_lock);
557
558 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
559 list) {
560 list_del(&entry->list);
561 kfree(entry);
562 }
563
564 atomic_set(&bat_priv->tt_local_changes, 0);
565 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
566 }
567
568 /* find out if an orig_node is already in the list of a tt_global_entry.
569 * returns 1 if found, 0 otherwise
570 */
571 static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
572 const struct orig_node *orig_node)
573 {
574 struct tt_orig_list_entry *tmp_orig_entry;
575 const struct hlist_head *head;
576 struct hlist_node *node;
577 bool found = false;
578
579 rcu_read_lock();
580 head = &entry->orig_list;
581 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
582 if (tmp_orig_entry->orig_node == orig_node) {
583 found = true;
584 break;
585 }
586 }
587 rcu_read_unlock();
588 return found;
589 }
590
591 static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
592 struct orig_node *orig_node,
593 int ttvn)
594 {
595 struct tt_orig_list_entry *orig_entry;
596
597 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
598 if (!orig_entry)
599 return;
600
601 INIT_HLIST_NODE(&orig_entry->list);
602 atomic_inc(&orig_node->refcount);
603 atomic_inc(&orig_node->tt_size);
604 orig_entry->orig_node = orig_node;
605 orig_entry->ttvn = ttvn;
606
607 spin_lock_bh(&tt_global_entry->list_lock);
608 hlist_add_head_rcu(&orig_entry->list,
609 &tt_global_entry->orig_list);
610 spin_unlock_bh(&tt_global_entry->list_lock);
611 }
612
613 /* caller must hold orig_node refcount */
614 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
615 const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
616 bool wifi)
617 {
618 struct tt_global_entry *tt_global_entry = NULL;
619 int ret = 0;
620 int hash_added;
621
622 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
623
624 if (!tt_global_entry) {
625 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
626 GFP_ATOMIC);
627 if (!tt_global_entry)
628 goto out;
629
630 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
631
632 tt_global_entry->common.flags = NO_FLAGS;
633 tt_global_entry->roam_at = 0;
634 atomic_set(&tt_global_entry->common.refcount, 2);
635
636 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
637 spin_lock_init(&tt_global_entry->list_lock);
638
639 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
640 choose_orig, &tt_global_entry->common,
641 &tt_global_entry->common.hash_entry);
642
643 if (unlikely(hash_added != 0)) {
644 /* remove the reference for the hash */
645 tt_global_entry_free_ref(tt_global_entry);
646 goto out_remove;
647 }
648
649 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
650 } else {
651 /* there is already a global entry, use this one. */
652
653 /* If there is the TT_CLIENT_ROAM flag set, there is only one
654 * originator left in the list and we previously received a
655 * delete + roaming change for this originator.
656 *
657 * We should first delete the old originator before adding the
658 * new one.
659 */
660 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
661 tt_global_del_orig_list(tt_global_entry);
662 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
663 tt_global_entry->roam_at = 0;
664 }
665
666 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
667 tt_global_add_orig_entry(tt_global_entry, orig_node,
668 ttvn);
669 }
670
671 if (wifi)
672 tt_global_entry->common.flags |= TT_CLIENT_WIFI;
673
674 bat_dbg(DBG_TT, bat_priv,
675 "Creating new global tt entry: %pM (via %pM)\n",
676 tt_global_entry->common.addr, orig_node->orig);
677
678 out_remove:
679 /* remove address from local hash if present */
680 tt_local_remove(bat_priv, tt_global_entry->common.addr,
681 "global tt received", roaming);
682 ret = 1;
683 out:
684 if (tt_global_entry)
685 tt_global_entry_free_ref(tt_global_entry);
686 return ret;
687 }
688
689 /* print all orig nodes who announce the address for this global entry.
690 * it is assumed that the caller holds rcu_read_lock();
691 */
692 static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
693 struct seq_file *seq)
694 {
695 struct hlist_head *head;
696 struct hlist_node *node;
697 struct tt_orig_list_entry *orig_entry;
698 struct tt_common_entry *tt_common_entry;
699 uint16_t flags;
700 uint8_t last_ttvn;
701
702 tt_common_entry = &tt_global_entry->common;
703
704 head = &tt_global_entry->orig_list;
705
706 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
707 flags = tt_common_entry->flags;
708 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
709 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
710 tt_global_entry->common.addr, orig_entry->ttvn,
711 orig_entry->orig_node->orig, last_ttvn,
712 (flags & TT_CLIENT_ROAM ? 'R' : '.'),
713 (flags & TT_CLIENT_WIFI ? 'W' : '.'));
714 }
715 }
716
717 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
718 {
719 struct net_device *net_dev = (struct net_device *)seq->private;
720 struct bat_priv *bat_priv = netdev_priv(net_dev);
721 struct hashtable_t *hash = bat_priv->tt_global_hash;
722 struct tt_common_entry *tt_common_entry;
723 struct tt_global_entry *tt_global_entry;
724 struct hard_iface *primary_if;
725 struct hlist_node *node;
726 struct hlist_head *head;
727 uint32_t i;
728 int ret = 0;
729
730 primary_if = primary_if_get_selected(bat_priv);
731 if (!primary_if) {
732 ret = seq_printf(seq,
733 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
734 net_dev->name);
735 goto out;
736 }
737
738 if (primary_if->if_status != IF_ACTIVE) {
739 ret = seq_printf(seq,
740 "BATMAN mesh %s disabled - primary interface not active\n",
741 net_dev->name);
742 goto out;
743 }
744
745 seq_printf(seq,
746 "Globally announced TT entries received via the mesh %s\n",
747 net_dev->name);
748 seq_printf(seq, " %-13s %s %-15s %s %s\n",
749 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
750
751 for (i = 0; i < hash->size; i++) {
752 head = &hash->table[i];
753
754 rcu_read_lock();
755 hlist_for_each_entry_rcu(tt_common_entry, node,
756 head, hash_entry) {
757 tt_global_entry = container_of(tt_common_entry,
758 struct tt_global_entry,
759 common);
760 tt_global_print_entry(tt_global_entry, seq);
761 }
762 rcu_read_unlock();
763 }
764 out:
765 if (primary_if)
766 hardif_free_ref(primary_if);
767 return ret;
768 }
769
770 /* deletes the orig list of a tt_global_entry */
771 static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
772 {
773 struct hlist_head *head;
774 struct hlist_node *node, *safe;
775 struct tt_orig_list_entry *orig_entry;
776
777 spin_lock_bh(&tt_global_entry->list_lock);
778 head = &tt_global_entry->orig_list;
779 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
780 hlist_del_rcu(node);
781 tt_orig_list_entry_free_ref(orig_entry);
782 }
783 spin_unlock_bh(&tt_global_entry->list_lock);
784
785 }
786
787 static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
788 struct tt_global_entry *tt_global_entry,
789 struct orig_node *orig_node,
790 const char *message)
791 {
792 struct hlist_head *head;
793 struct hlist_node *node, *safe;
794 struct tt_orig_list_entry *orig_entry;
795
796 spin_lock_bh(&tt_global_entry->list_lock);
797 head = &tt_global_entry->orig_list;
798 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
799 if (orig_entry->orig_node == orig_node) {
800 bat_dbg(DBG_TT, bat_priv,
801 "Deleting %pM from global tt entry %pM: %s\n",
802 orig_node->orig, tt_global_entry->common.addr,
803 message);
804 hlist_del_rcu(node);
805 tt_orig_list_entry_free_ref(orig_entry);
806 }
807 }
808 spin_unlock_bh(&tt_global_entry->list_lock);
809 }
810
811 static void tt_global_del_struct(struct bat_priv *bat_priv,
812 struct tt_global_entry *tt_global_entry,
813 const char *message)
814 {
815 bat_dbg(DBG_TT, bat_priv,
816 "Deleting global tt entry %pM: %s\n",
817 tt_global_entry->common.addr, message);
818
819 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
820 tt_global_entry->common.addr);
821 tt_global_entry_free_ref(tt_global_entry);
822
823 }
824
825 /* If the client is to be deleted, we check if it is the last origantor entry
826 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
827 * otherwise we simply remove the originator scheduled for deletion.
828 */
829 static void tt_global_del_roaming(struct bat_priv *bat_priv,
830 struct tt_global_entry *tt_global_entry,
831 struct orig_node *orig_node,
832 const char *message)
833 {
834 bool last_entry = true;
835 struct hlist_head *head;
836 struct hlist_node *node;
837 struct tt_orig_list_entry *orig_entry;
838
839 /* no local entry exists, case 1:
840 * Check if this is the last one or if other entries exist.
841 */
842
843 rcu_read_lock();
844 head = &tt_global_entry->orig_list;
845 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
846 if (orig_entry->orig_node != orig_node) {
847 last_entry = false;
848 break;
849 }
850 }
851 rcu_read_unlock();
852
853 if (last_entry) {
854 /* its the last one, mark for roaming. */
855 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
856 tt_global_entry->roam_at = jiffies;
857 } else
858 /* there is another entry, we can simply delete this
859 * one and can still use the other one.
860 */
861 tt_global_del_orig_entry(bat_priv, tt_global_entry,
862 orig_node, message);
863 }
864
865
866
867 static void tt_global_del(struct bat_priv *bat_priv,
868 struct orig_node *orig_node,
869 const unsigned char *addr,
870 const char *message, bool roaming)
871 {
872 struct tt_global_entry *tt_global_entry = NULL;
873 struct tt_local_entry *tt_local_entry = NULL;
874
875 tt_global_entry = tt_global_hash_find(bat_priv, addr);
876 if (!tt_global_entry)
877 goto out;
878
879 if (!roaming) {
880 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
881 message);
882
883 if (hlist_empty(&tt_global_entry->orig_list))
884 tt_global_del_struct(bat_priv, tt_global_entry,
885 message);
886
887 goto out;
888 }
889
890 /* if we are deleting a global entry due to a roam
891 * event, there are two possibilities:
892 * 1) the client roamed from node A to node B => if there
893 * is only one originator left for this client, we mark
894 * it with TT_CLIENT_ROAM, we start a timer and we
895 * wait for node B to claim it. In case of timeout
896 * the entry is purged.
897 *
898 * If there are other originators left, we directly delete
899 * the originator.
900 * 2) the client roamed to us => we can directly delete
901 * the global entry, since it is useless now. */
902
903 tt_local_entry = tt_local_hash_find(bat_priv,
904 tt_global_entry->common.addr);
905 if (tt_local_entry) {
906 /* local entry exists, case 2: client roamed to us. */
907 tt_global_del_orig_list(tt_global_entry);
908 tt_global_del_struct(bat_priv, tt_global_entry, message);
909 } else
910 /* no local entry exists, case 1: check for roaming */
911 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
912 message);
913
914
915 out:
916 if (tt_global_entry)
917 tt_global_entry_free_ref(tt_global_entry);
918 if (tt_local_entry)
919 tt_local_entry_free_ref(tt_local_entry);
920 }
921
922 void tt_global_del_orig(struct bat_priv *bat_priv,
923 struct orig_node *orig_node, const char *message)
924 {
925 struct tt_global_entry *tt_global_entry;
926 struct tt_common_entry *tt_common_entry;
927 uint32_t i;
928 struct hashtable_t *hash = bat_priv->tt_global_hash;
929 struct hlist_node *node, *safe;
930 struct hlist_head *head;
931 spinlock_t *list_lock; /* protects write access to the hash lists */
932
933 if (!hash)
934 return;
935
936 for (i = 0; i < hash->size; i++) {
937 head = &hash->table[i];
938 list_lock = &hash->list_locks[i];
939
940 spin_lock_bh(list_lock);
941 hlist_for_each_entry_safe(tt_common_entry, node, safe,
942 head, hash_entry) {
943 tt_global_entry = container_of(tt_common_entry,
944 struct tt_global_entry,
945 common);
946
947 tt_global_del_orig_entry(bat_priv, tt_global_entry,
948 orig_node, message);
949
950 if (hlist_empty(&tt_global_entry->orig_list)) {
951 bat_dbg(DBG_TT, bat_priv,
952 "Deleting global tt entry %pM: %s\n",
953 tt_global_entry->common.addr,
954 message);
955 hlist_del_rcu(node);
956 tt_global_entry_free_ref(tt_global_entry);
957 }
958 }
959 spin_unlock_bh(list_lock);
960 }
961 atomic_set(&orig_node->tt_size, 0);
962 orig_node->tt_initialised = false;
963 }
964
965 static void tt_global_roam_purge(struct bat_priv *bat_priv)
966 {
967 struct hashtable_t *hash = bat_priv->tt_global_hash;
968 struct tt_common_entry *tt_common_entry;
969 struct tt_global_entry *tt_global_entry;
970 struct hlist_node *node, *node_tmp;
971 struct hlist_head *head;
972 spinlock_t *list_lock; /* protects write access to the hash lists */
973 uint32_t i;
974
975 for (i = 0; i < hash->size; i++) {
976 head = &hash->table[i];
977 list_lock = &hash->list_locks[i];
978
979 spin_lock_bh(list_lock);
980 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
981 head, hash_entry) {
982 tt_global_entry = container_of(tt_common_entry,
983 struct tt_global_entry,
984 common);
985 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
986 continue;
987 if (!has_timed_out(tt_global_entry->roam_at,
988 TT_CLIENT_ROAM_TIMEOUT))
989 continue;
990
991 bat_dbg(DBG_TT, bat_priv,
992 "Deleting global tt entry (%pM): Roaming timeout\n",
993 tt_global_entry->common.addr);
994
995 hlist_del_rcu(node);
996 tt_global_entry_free_ref(tt_global_entry);
997 }
998 spin_unlock_bh(list_lock);
999 }
1000
1001 }
1002
1003 static void tt_global_table_free(struct bat_priv *bat_priv)
1004 {
1005 struct hashtable_t *hash;
1006 spinlock_t *list_lock; /* protects write access to the hash lists */
1007 struct tt_common_entry *tt_common_entry;
1008 struct tt_global_entry *tt_global_entry;
1009 struct hlist_node *node, *node_tmp;
1010 struct hlist_head *head;
1011 uint32_t i;
1012
1013 if (!bat_priv->tt_global_hash)
1014 return;
1015
1016 hash = bat_priv->tt_global_hash;
1017
1018 for (i = 0; i < hash->size; i++) {
1019 head = &hash->table[i];
1020 list_lock = &hash->list_locks[i];
1021
1022 spin_lock_bh(list_lock);
1023 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1024 head, hash_entry) {
1025 hlist_del_rcu(node);
1026 tt_global_entry = container_of(tt_common_entry,
1027 struct tt_global_entry,
1028 common);
1029 tt_global_entry_free_ref(tt_global_entry);
1030 }
1031 spin_unlock_bh(list_lock);
1032 }
1033
1034 hash_destroy(hash);
1035
1036 bat_priv->tt_global_hash = NULL;
1037 }
1038
1039 static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
1040 struct tt_global_entry *tt_global_entry)
1041 {
1042 bool ret = false;
1043
1044 if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
1045 tt_global_entry->common.flags & TT_CLIENT_WIFI)
1046 ret = true;
1047
1048 return ret;
1049 }
1050
1051 struct orig_node *transtable_search(struct bat_priv *bat_priv,
1052 const uint8_t *src, const uint8_t *addr)
1053 {
1054 struct tt_local_entry *tt_local_entry = NULL;
1055 struct tt_global_entry *tt_global_entry = NULL;
1056 struct orig_node *orig_node = NULL;
1057 struct neigh_node *router = NULL;
1058 struct hlist_head *head;
1059 struct hlist_node *node;
1060 struct tt_orig_list_entry *orig_entry;
1061 int best_tq;
1062
1063 if (src && atomic_read(&bat_priv->ap_isolation)) {
1064 tt_local_entry = tt_local_hash_find(bat_priv, src);
1065 if (!tt_local_entry)
1066 goto out;
1067 }
1068
1069 tt_global_entry = tt_global_hash_find(bat_priv, addr);
1070 if (!tt_global_entry)
1071 goto out;
1072
1073 /* check whether the clients should not communicate due to AP
1074 * isolation */
1075 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
1076 goto out;
1077
1078 best_tq = 0;
1079
1080 rcu_read_lock();
1081 head = &tt_global_entry->orig_list;
1082 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1083 router = orig_node_get_router(orig_entry->orig_node);
1084 if (!router)
1085 continue;
1086
1087 if (router->tq_avg > best_tq) {
1088 orig_node = orig_entry->orig_node;
1089 best_tq = router->tq_avg;
1090 }
1091 neigh_node_free_ref(router);
1092 }
1093 /* found anything? */
1094 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1095 orig_node = NULL;
1096 rcu_read_unlock();
1097 out:
1098 if (tt_global_entry)
1099 tt_global_entry_free_ref(tt_global_entry);
1100 if (tt_local_entry)
1101 tt_local_entry_free_ref(tt_local_entry);
1102
1103 return orig_node;
1104 }
1105
1106 /* Calculates the checksum of the local table of a given orig_node */
1107 static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1108 struct orig_node *orig_node)
1109 {
1110 uint16_t total = 0, total_one;
1111 struct hashtable_t *hash = bat_priv->tt_global_hash;
1112 struct tt_common_entry *tt_common_entry;
1113 struct tt_global_entry *tt_global_entry;
1114 struct hlist_node *node;
1115 struct hlist_head *head;
1116 uint32_t i;
1117 int j;
1118
1119 for (i = 0; i < hash->size; i++) {
1120 head = &hash->table[i];
1121
1122 rcu_read_lock();
1123 hlist_for_each_entry_rcu(tt_common_entry, node,
1124 head, hash_entry) {
1125 tt_global_entry = container_of(tt_common_entry,
1126 struct tt_global_entry,
1127 common);
1128 /* Roaming clients are in the global table for
1129 * consistency only. They don't have to be
1130 * taken into account while computing the
1131 * global crc
1132 */
1133 if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1134 continue;
1135
1136 /* find out if this global entry is announced by this
1137 * originator
1138 */
1139 if (!tt_global_entry_has_orig(tt_global_entry,
1140 orig_node))
1141 continue;
1142
1143 total_one = 0;
1144 for (j = 0; j < ETH_ALEN; j++)
1145 total_one = crc16_byte(total_one,
1146 tt_global_entry->common.addr[j]);
1147 total ^= total_one;
1148 }
1149 rcu_read_unlock();
1150 }
1151
1152 return total;
1153 }
1154
1155 /* Calculates the checksum of the local table */
1156 static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
1157 {
1158 uint16_t total = 0, total_one;
1159 struct hashtable_t *hash = bat_priv->tt_local_hash;
1160 struct tt_common_entry *tt_common_entry;
1161 struct hlist_node *node;
1162 struct hlist_head *head;
1163 uint32_t i;
1164 int j;
1165
1166 for (i = 0; i < hash->size; i++) {
1167 head = &hash->table[i];
1168
1169 rcu_read_lock();
1170 hlist_for_each_entry_rcu(tt_common_entry, node,
1171 head, hash_entry) {
1172 /* not yet committed clients have not to be taken into
1173 * account while computing the CRC */
1174 if (tt_common_entry->flags & TT_CLIENT_NEW)
1175 continue;
1176 total_one = 0;
1177 for (j = 0; j < ETH_ALEN; j++)
1178 total_one = crc16_byte(total_one,
1179 tt_common_entry->addr[j]);
1180 total ^= total_one;
1181 }
1182 rcu_read_unlock();
1183 }
1184
1185 return total;
1186 }
1187
1188 static void tt_req_list_free(struct bat_priv *bat_priv)
1189 {
1190 struct tt_req_node *node, *safe;
1191
1192 spin_lock_bh(&bat_priv->tt_req_list_lock);
1193
1194 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1195 list_del(&node->list);
1196 kfree(node);
1197 }
1198
1199 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1200 }
1201
1202 static void tt_save_orig_buffer(struct bat_priv *bat_priv,
1203 struct orig_node *orig_node,
1204 const unsigned char *tt_buff,
1205 uint8_t tt_num_changes)
1206 {
1207 uint16_t tt_buff_len = tt_len(tt_num_changes);
1208
1209 /* Replace the old buffer only if I received something in the
1210 * last OGM (the OGM could carry no changes) */
1211 spin_lock_bh(&orig_node->tt_buff_lock);
1212 if (tt_buff_len > 0) {
1213 kfree(orig_node->tt_buff);
1214 orig_node->tt_buff_len = 0;
1215 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1216 if (orig_node->tt_buff) {
1217 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1218 orig_node->tt_buff_len = tt_buff_len;
1219 }
1220 }
1221 spin_unlock_bh(&orig_node->tt_buff_lock);
1222 }
1223
1224 static void tt_req_purge(struct bat_priv *bat_priv)
1225 {
1226 struct tt_req_node *node, *safe;
1227
1228 spin_lock_bh(&bat_priv->tt_req_list_lock);
1229 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1230 if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
1231 list_del(&node->list);
1232 kfree(node);
1233 }
1234 }
1235 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1236 }
1237
1238 /* returns the pointer to the new tt_req_node struct if no request
1239 * has already been issued for this orig_node, NULL otherwise */
1240 static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
1241 struct orig_node *orig_node)
1242 {
1243 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1244
1245 spin_lock_bh(&bat_priv->tt_req_list_lock);
1246 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1247 if (compare_eth(tt_req_node_tmp, orig_node) &&
1248 !has_timed_out(tt_req_node_tmp->issued_at,
1249 TT_REQUEST_TIMEOUT))
1250 goto unlock;
1251 }
1252
1253 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1254 if (!tt_req_node)
1255 goto unlock;
1256
1257 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1258 tt_req_node->issued_at = jiffies;
1259
1260 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1261 unlock:
1262 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1263 return tt_req_node;
1264 }
1265
1266 /* data_ptr is useless here, but has to be kept to respect the prototype */
1267 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1268 {
1269 const struct tt_common_entry *tt_common_entry = entry_ptr;
1270
1271 if (tt_common_entry->flags & TT_CLIENT_NEW)
1272 return 0;
1273 return 1;
1274 }
1275
1276 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1277 {
1278 const struct tt_common_entry *tt_common_entry = entry_ptr;
1279 const struct tt_global_entry *tt_global_entry;
1280 const struct orig_node *orig_node = data_ptr;
1281
1282 if (tt_common_entry->flags & TT_CLIENT_ROAM)
1283 return 0;
1284
1285 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1286 common);
1287
1288 return tt_global_entry_has_orig(tt_global_entry, orig_node);
1289 }
1290
1291 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1292 struct hashtable_t *hash,
1293 struct hard_iface *primary_if,
1294 int (*valid_cb)(const void *,
1295 const void *),
1296 void *cb_data)
1297 {
1298 struct tt_common_entry *tt_common_entry;
1299 struct tt_query_packet *tt_response;
1300 struct tt_change *tt_change;
1301 struct hlist_node *node;
1302 struct hlist_head *head;
1303 struct sk_buff *skb = NULL;
1304 uint16_t tt_tot, tt_count;
1305 ssize_t tt_query_size = sizeof(struct tt_query_packet);
1306 uint32_t i;
1307
1308 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1309 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1310 tt_len -= tt_len % sizeof(struct tt_change);
1311 }
1312 tt_tot = tt_len / sizeof(struct tt_change);
1313
1314 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1315 if (!skb)
1316 goto out;
1317
1318 skb_reserve(skb, ETH_HLEN);
1319 tt_response = (struct tt_query_packet *)skb_put(skb,
1320 tt_query_size + tt_len);
1321 tt_response->ttvn = ttvn;
1322
1323 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1324 tt_count = 0;
1325
1326 rcu_read_lock();
1327 for (i = 0; i < hash->size; i++) {
1328 head = &hash->table[i];
1329
1330 hlist_for_each_entry_rcu(tt_common_entry, node,
1331 head, hash_entry) {
1332 if (tt_count == tt_tot)
1333 break;
1334
1335 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1336 continue;
1337
1338 memcpy(tt_change->addr, tt_common_entry->addr,
1339 ETH_ALEN);
1340 tt_change->flags = NO_FLAGS;
1341
1342 tt_count++;
1343 tt_change++;
1344 }
1345 }
1346 rcu_read_unlock();
1347
1348 /* store in the message the number of entries we have successfully
1349 * copied */
1350 tt_response->tt_data = htons(tt_count);
1351
1352 out:
1353 return skb;
1354 }
1355
1356 static int send_tt_request(struct bat_priv *bat_priv,
1357 struct orig_node *dst_orig_node,
1358 uint8_t ttvn, uint16_t tt_crc, bool full_table)
1359 {
1360 struct sk_buff *skb = NULL;
1361 struct tt_query_packet *tt_request;
1362 struct neigh_node *neigh_node = NULL;
1363 struct hard_iface *primary_if;
1364 struct tt_req_node *tt_req_node = NULL;
1365 int ret = 1;
1366
1367 primary_if = primary_if_get_selected(bat_priv);
1368 if (!primary_if)
1369 goto out;
1370
1371 /* The new tt_req will be issued only if I'm not waiting for a
1372 * reply from the same orig_node yet */
1373 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1374 if (!tt_req_node)
1375 goto out;
1376
1377 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1378 if (!skb)
1379 goto out;
1380
1381 skb_reserve(skb, ETH_HLEN);
1382
1383 tt_request = (struct tt_query_packet *)skb_put(skb,
1384 sizeof(struct tt_query_packet));
1385
1386 tt_request->header.packet_type = BAT_TT_QUERY;
1387 tt_request->header.version = COMPAT_VERSION;
1388 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1389 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1390 tt_request->header.ttl = TTL;
1391 tt_request->ttvn = ttvn;
1392 tt_request->tt_data = htons(tt_crc);
1393 tt_request->flags = TT_REQUEST;
1394
1395 if (full_table)
1396 tt_request->flags |= TT_FULL_TABLE;
1397
1398 neigh_node = orig_node_get_router(dst_orig_node);
1399 if (!neigh_node)
1400 goto out;
1401
1402 bat_dbg(DBG_TT, bat_priv,
1403 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1404 dst_orig_node->orig, neigh_node->addr,
1405 (full_table ? 'F' : '.'));
1406
1407 batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
1408
1409 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1410 ret = 0;
1411
1412 out:
1413 if (neigh_node)
1414 neigh_node_free_ref(neigh_node);
1415 if (primary_if)
1416 hardif_free_ref(primary_if);
1417 if (ret)
1418 kfree_skb(skb);
1419 if (ret && tt_req_node) {
1420 spin_lock_bh(&bat_priv->tt_req_list_lock);
1421 list_del(&tt_req_node->list);
1422 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1423 kfree(tt_req_node);
1424 }
1425 return ret;
1426 }
1427
1428 static bool send_other_tt_response(struct bat_priv *bat_priv,
1429 struct tt_query_packet *tt_request)
1430 {
1431 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1432 struct neigh_node *neigh_node = NULL;
1433 struct hard_iface *primary_if = NULL;
1434 uint8_t orig_ttvn, req_ttvn, ttvn;
1435 int ret = false;
1436 unsigned char *tt_buff;
1437 bool full_table;
1438 uint16_t tt_len, tt_tot;
1439 struct sk_buff *skb = NULL;
1440 struct tt_query_packet *tt_response;
1441
1442 bat_dbg(DBG_TT, bat_priv,
1443 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1444 tt_request->src, tt_request->ttvn, tt_request->dst,
1445 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1446
1447 /* Let's get the orig node of the REAL destination */
1448 req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
1449 if (!req_dst_orig_node)
1450 goto out;
1451
1452 res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
1453 if (!res_dst_orig_node)
1454 goto out;
1455
1456 neigh_node = orig_node_get_router(res_dst_orig_node);
1457 if (!neigh_node)
1458 goto out;
1459
1460 primary_if = primary_if_get_selected(bat_priv);
1461 if (!primary_if)
1462 goto out;
1463
1464 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1465 req_ttvn = tt_request->ttvn;
1466
1467 /* I don't have the requested data */
1468 if (orig_ttvn != req_ttvn ||
1469 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1470 goto out;
1471
1472 /* If the full table has been explicitly requested */
1473 if (tt_request->flags & TT_FULL_TABLE ||
1474 !req_dst_orig_node->tt_buff)
1475 full_table = true;
1476 else
1477 full_table = false;
1478
1479 /* In this version, fragmentation is not implemented, then
1480 * I'll send only one packet with as much TT entries as I can */
1481 if (!full_table) {
1482 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1483 tt_len = req_dst_orig_node->tt_buff_len;
1484 tt_tot = tt_len / sizeof(struct tt_change);
1485
1486 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1487 tt_len + ETH_HLEN);
1488 if (!skb)
1489 goto unlock;
1490
1491 skb_reserve(skb, ETH_HLEN);
1492 tt_response = (struct tt_query_packet *)skb_put(skb,
1493 sizeof(struct tt_query_packet) + tt_len);
1494 tt_response->ttvn = req_ttvn;
1495 tt_response->tt_data = htons(tt_tot);
1496
1497 tt_buff = skb->data + sizeof(struct tt_query_packet);
1498 /* Copy the last orig_node's OGM buffer */
1499 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1500 req_dst_orig_node->tt_buff_len);
1501
1502 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1503 } else {
1504 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1505 sizeof(struct tt_change);
1506 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1507
1508 skb = tt_response_fill_table(tt_len, ttvn,
1509 bat_priv->tt_global_hash,
1510 primary_if, tt_global_valid_entry,
1511 req_dst_orig_node);
1512 if (!skb)
1513 goto out;
1514
1515 tt_response = (struct tt_query_packet *)skb->data;
1516 }
1517
1518 tt_response->header.packet_type = BAT_TT_QUERY;
1519 tt_response->header.version = COMPAT_VERSION;
1520 tt_response->header.ttl = TTL;
1521 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1522 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1523 tt_response->flags = TT_RESPONSE;
1524
1525 if (full_table)
1526 tt_response->flags |= TT_FULL_TABLE;
1527
1528 bat_dbg(DBG_TT, bat_priv,
1529 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1530 res_dst_orig_node->orig, neigh_node->addr,
1531 req_dst_orig_node->orig, req_ttvn);
1532
1533 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1534
1535 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1536 ret = true;
1537 goto out;
1538
1539 unlock:
1540 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1541
1542 out:
1543 if (res_dst_orig_node)
1544 orig_node_free_ref(res_dst_orig_node);
1545 if (req_dst_orig_node)
1546 orig_node_free_ref(req_dst_orig_node);
1547 if (neigh_node)
1548 neigh_node_free_ref(neigh_node);
1549 if (primary_if)
1550 hardif_free_ref(primary_if);
1551 if (!ret)
1552 kfree_skb(skb);
1553 return ret;
1554
1555 }
1556 static bool send_my_tt_response(struct bat_priv *bat_priv,
1557 struct tt_query_packet *tt_request)
1558 {
1559 struct orig_node *orig_node = NULL;
1560 struct neigh_node *neigh_node = NULL;
1561 struct hard_iface *primary_if = NULL;
1562 uint8_t my_ttvn, req_ttvn, ttvn;
1563 int ret = false;
1564 unsigned char *tt_buff;
1565 bool full_table;
1566 uint16_t tt_len, tt_tot;
1567 struct sk_buff *skb = NULL;
1568 struct tt_query_packet *tt_response;
1569
1570 bat_dbg(DBG_TT, bat_priv,
1571 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1572 tt_request->src, tt_request->ttvn,
1573 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1574
1575
1576 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1577 req_ttvn = tt_request->ttvn;
1578
1579 orig_node = orig_hash_find(bat_priv, tt_request->src);
1580 if (!orig_node)
1581 goto out;
1582
1583 neigh_node = orig_node_get_router(orig_node);
1584 if (!neigh_node)
1585 goto out;
1586
1587 primary_if = primary_if_get_selected(bat_priv);
1588 if (!primary_if)
1589 goto out;
1590
1591 /* If the full table has been explicitly requested or the gap
1592 * is too big send the whole local translation table */
1593 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1594 !bat_priv->tt_buff)
1595 full_table = true;
1596 else
1597 full_table = false;
1598
1599 /* In this version, fragmentation is not implemented, then
1600 * I'll send only one packet with as much TT entries as I can */
1601 if (!full_table) {
1602 spin_lock_bh(&bat_priv->tt_buff_lock);
1603 tt_len = bat_priv->tt_buff_len;
1604 tt_tot = tt_len / sizeof(struct tt_change);
1605
1606 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1607 tt_len + ETH_HLEN);
1608 if (!skb)
1609 goto unlock;
1610
1611 skb_reserve(skb, ETH_HLEN);
1612 tt_response = (struct tt_query_packet *)skb_put(skb,
1613 sizeof(struct tt_query_packet) + tt_len);
1614 tt_response->ttvn = req_ttvn;
1615 tt_response->tt_data = htons(tt_tot);
1616
1617 tt_buff = skb->data + sizeof(struct tt_query_packet);
1618 memcpy(tt_buff, bat_priv->tt_buff,
1619 bat_priv->tt_buff_len);
1620 spin_unlock_bh(&bat_priv->tt_buff_lock);
1621 } else {
1622 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1623 sizeof(struct tt_change);
1624 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1625
1626 skb = tt_response_fill_table(tt_len, ttvn,
1627 bat_priv->tt_local_hash,
1628 primary_if, tt_local_valid_entry,
1629 NULL);
1630 if (!skb)
1631 goto out;
1632
1633 tt_response = (struct tt_query_packet *)skb->data;
1634 }
1635
1636 tt_response->header.packet_type = BAT_TT_QUERY;
1637 tt_response->header.version = COMPAT_VERSION;
1638 tt_response->header.ttl = TTL;
1639 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1640 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1641 tt_response->flags = TT_RESPONSE;
1642
1643 if (full_table)
1644 tt_response->flags |= TT_FULL_TABLE;
1645
1646 bat_dbg(DBG_TT, bat_priv,
1647 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1648 orig_node->orig, neigh_node->addr,
1649 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1650
1651 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1652
1653 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1654 ret = true;
1655 goto out;
1656
1657 unlock:
1658 spin_unlock_bh(&bat_priv->tt_buff_lock);
1659 out:
1660 if (orig_node)
1661 orig_node_free_ref(orig_node);
1662 if (neigh_node)
1663 neigh_node_free_ref(neigh_node);
1664 if (primary_if)
1665 hardif_free_ref(primary_if);
1666 if (!ret)
1667 kfree_skb(skb);
1668 /* This packet was for me, so it doesn't need to be re-routed */
1669 return true;
1670 }
1671
1672 bool send_tt_response(struct bat_priv *bat_priv,
1673 struct tt_query_packet *tt_request)
1674 {
1675 if (is_my_mac(tt_request->dst)) {
1676 /* don't answer backbone gws! */
1677 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1678 return true;
1679
1680 return send_my_tt_response(bat_priv, tt_request);
1681 } else {
1682 return send_other_tt_response(bat_priv, tt_request);
1683 }
1684 }
1685
1686 static void _tt_update_changes(struct bat_priv *bat_priv,
1687 struct orig_node *orig_node,
1688 struct tt_change *tt_change,
1689 uint16_t tt_num_changes, uint8_t ttvn)
1690 {
1691 int i;
1692
1693 for (i = 0; i < tt_num_changes; i++) {
1694 if ((tt_change + i)->flags & TT_CLIENT_DEL)
1695 tt_global_del(bat_priv, orig_node,
1696 (tt_change + i)->addr,
1697 "tt removed by changes",
1698 (tt_change + i)->flags & TT_CLIENT_ROAM);
1699 else
1700 if (!tt_global_add(bat_priv, orig_node,
1701 (tt_change + i)->addr, ttvn, false,
1702 (tt_change + i)->flags &
1703 TT_CLIENT_WIFI))
1704 /* In case of problem while storing a
1705 * global_entry, we stop the updating
1706 * procedure without committing the
1707 * ttvn change. This will avoid to send
1708 * corrupted data on tt_request
1709 */
1710 return;
1711 }
1712 orig_node->tt_initialised = true;
1713 }
1714
1715 static void tt_fill_gtable(struct bat_priv *bat_priv,
1716 struct tt_query_packet *tt_response)
1717 {
1718 struct orig_node *orig_node = NULL;
1719
1720 orig_node = orig_hash_find(bat_priv, tt_response->src);
1721 if (!orig_node)
1722 goto out;
1723
1724 /* Purge the old table first.. */
1725 tt_global_del_orig(bat_priv, orig_node, "Received full table");
1726
1727 _tt_update_changes(bat_priv, orig_node,
1728 (struct tt_change *)(tt_response + 1),
1729 ntohs(tt_response->tt_data), tt_response->ttvn);
1730
1731 spin_lock_bh(&orig_node->tt_buff_lock);
1732 kfree(orig_node->tt_buff);
1733 orig_node->tt_buff_len = 0;
1734 orig_node->tt_buff = NULL;
1735 spin_unlock_bh(&orig_node->tt_buff_lock);
1736
1737 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1738
1739 out:
1740 if (orig_node)
1741 orig_node_free_ref(orig_node);
1742 }
1743
1744 static void tt_update_changes(struct bat_priv *bat_priv,
1745 struct orig_node *orig_node,
1746 uint16_t tt_num_changes, uint8_t ttvn,
1747 struct tt_change *tt_change)
1748 {
1749 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1750 ttvn);
1751
1752 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1753 tt_num_changes);
1754 atomic_set(&orig_node->last_ttvn, ttvn);
1755 }
1756
1757 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1758 {
1759 struct tt_local_entry *tt_local_entry = NULL;
1760 bool ret = false;
1761
1762 tt_local_entry = tt_local_hash_find(bat_priv, addr);
1763 if (!tt_local_entry)
1764 goto out;
1765 /* Check if the client has been logically deleted (but is kept for
1766 * consistency purpose) */
1767 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
1768 goto out;
1769 ret = true;
1770 out:
1771 if (tt_local_entry)
1772 tt_local_entry_free_ref(tt_local_entry);
1773 return ret;
1774 }
1775
1776 void handle_tt_response(struct bat_priv *bat_priv,
1777 struct tt_query_packet *tt_response)
1778 {
1779 struct tt_req_node *node, *safe;
1780 struct orig_node *orig_node = NULL;
1781
1782 bat_dbg(DBG_TT, bat_priv,
1783 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1784 tt_response->src, tt_response->ttvn,
1785 ntohs(tt_response->tt_data),
1786 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1787
1788 /* we should have never asked a backbone gw */
1789 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1790 goto out;
1791
1792 orig_node = orig_hash_find(bat_priv, tt_response->src);
1793 if (!orig_node)
1794 goto out;
1795
1796 if (tt_response->flags & TT_FULL_TABLE)
1797 tt_fill_gtable(bat_priv, tt_response);
1798 else
1799 tt_update_changes(bat_priv, orig_node,
1800 ntohs(tt_response->tt_data),
1801 tt_response->ttvn,
1802 (struct tt_change *)(tt_response + 1));
1803
1804 /* Delete the tt_req_node from pending tt_requests list */
1805 spin_lock_bh(&bat_priv->tt_req_list_lock);
1806 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1807 if (!compare_eth(node->addr, tt_response->src))
1808 continue;
1809 list_del(&node->list);
1810 kfree(node);
1811 }
1812 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1813
1814 /* Recalculate the CRC for this orig_node and store it */
1815 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1816 /* Roaming phase is over: tables are in sync again. I can
1817 * unset the flag */
1818 orig_node->tt_poss_change = false;
1819 out:
1820 if (orig_node)
1821 orig_node_free_ref(orig_node);
1822 }
1823
1824 int tt_init(struct bat_priv *bat_priv)
1825 {
1826 int ret;
1827
1828 ret = tt_local_init(bat_priv);
1829 if (ret < 0)
1830 return ret;
1831
1832 ret = tt_global_init(bat_priv);
1833 if (ret < 0)
1834 return ret;
1835
1836 tt_start_timer(bat_priv);
1837
1838 return 1;
1839 }
1840
1841 static void tt_roam_list_free(struct bat_priv *bat_priv)
1842 {
1843 struct tt_roam_node *node, *safe;
1844
1845 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1846
1847 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1848 list_del(&node->list);
1849 kfree(node);
1850 }
1851
1852 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1853 }
1854
1855 static void tt_roam_purge(struct bat_priv *bat_priv)
1856 {
1857 struct tt_roam_node *node, *safe;
1858
1859 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1860 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1861 if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
1862 continue;
1863
1864 list_del(&node->list);
1865 kfree(node);
1866 }
1867 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1868 }
1869
1870 /* This function checks whether the client already reached the
1871 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1872 * will not be sent.
1873 *
1874 * returns true if the ROAMING_ADV can be sent, false otherwise */
1875 static bool tt_check_roam_count(struct bat_priv *bat_priv,
1876 uint8_t *client)
1877 {
1878 struct tt_roam_node *tt_roam_node;
1879 bool ret = false;
1880
1881 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1882 /* The new tt_req will be issued only if I'm not waiting for a
1883 * reply from the same orig_node yet */
1884 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1885 if (!compare_eth(tt_roam_node->addr, client))
1886 continue;
1887
1888 if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
1889 continue;
1890
1891 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1892 /* Sorry, you roamed too many times! */
1893 goto unlock;
1894 ret = true;
1895 break;
1896 }
1897
1898 if (!ret) {
1899 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1900 if (!tt_roam_node)
1901 goto unlock;
1902
1903 tt_roam_node->first_time = jiffies;
1904 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1905 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1906
1907 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1908 ret = true;
1909 }
1910
1911 unlock:
1912 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1913 return ret;
1914 }
1915
1916 static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1917 struct orig_node *orig_node)
1918 {
1919 struct neigh_node *neigh_node = NULL;
1920 struct sk_buff *skb = NULL;
1921 struct roam_adv_packet *roam_adv_packet;
1922 int ret = 1;
1923 struct hard_iface *primary_if;
1924
1925 /* before going on we have to check whether the client has
1926 * already roamed to us too many times */
1927 if (!tt_check_roam_count(bat_priv, client))
1928 goto out;
1929
1930 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1931 if (!skb)
1932 goto out;
1933
1934 skb_reserve(skb, ETH_HLEN);
1935
1936 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1937 sizeof(struct roam_adv_packet));
1938
1939 roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
1940 roam_adv_packet->header.version = COMPAT_VERSION;
1941 roam_adv_packet->header.ttl = TTL;
1942 primary_if = primary_if_get_selected(bat_priv);
1943 if (!primary_if)
1944 goto out;
1945 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1946 hardif_free_ref(primary_if);
1947 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1948 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1949
1950 neigh_node = orig_node_get_router(orig_node);
1951 if (!neigh_node)
1952 goto out;
1953
1954 bat_dbg(DBG_TT, bat_priv,
1955 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1956 orig_node->orig, client, neigh_node->addr);
1957
1958 batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
1959
1960 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1961 ret = 0;
1962
1963 out:
1964 if (neigh_node)
1965 neigh_node_free_ref(neigh_node);
1966 if (ret)
1967 kfree_skb(skb);
1968 return;
1969 }
1970
1971 static void tt_purge(struct work_struct *work)
1972 {
1973 struct delayed_work *delayed_work =
1974 container_of(work, struct delayed_work, work);
1975 struct bat_priv *bat_priv =
1976 container_of(delayed_work, struct bat_priv, tt_work);
1977
1978 tt_local_purge(bat_priv);
1979 tt_global_roam_purge(bat_priv);
1980 tt_req_purge(bat_priv);
1981 tt_roam_purge(bat_priv);
1982
1983 tt_start_timer(bat_priv);
1984 }
1985
1986 void tt_free(struct bat_priv *bat_priv)
1987 {
1988 cancel_delayed_work_sync(&bat_priv->tt_work);
1989
1990 tt_local_table_free(bat_priv);
1991 tt_global_table_free(bat_priv);
1992 tt_req_list_free(bat_priv);
1993 tt_changes_list_free(bat_priv);
1994 tt_roam_list_free(bat_priv);
1995
1996 kfree(bat_priv->tt_buff);
1997 }
1998
1999 /* This function will enable or disable the specified flags for all the entries
2000 * in the given hash table and returns the number of modified entries */
2001 static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
2002 bool enable)
2003 {
2004 uint32_t i;
2005 uint16_t changed_num = 0;
2006 struct hlist_head *head;
2007 struct hlist_node *node;
2008 struct tt_common_entry *tt_common_entry;
2009
2010 if (!hash)
2011 goto out;
2012
2013 for (i = 0; i < hash->size; i++) {
2014 head = &hash->table[i];
2015
2016 rcu_read_lock();
2017 hlist_for_each_entry_rcu(tt_common_entry, node,
2018 head, hash_entry) {
2019 if (enable) {
2020 if ((tt_common_entry->flags & flags) == flags)
2021 continue;
2022 tt_common_entry->flags |= flags;
2023 } else {
2024 if (!(tt_common_entry->flags & flags))
2025 continue;
2026 tt_common_entry->flags &= ~flags;
2027 }
2028 changed_num++;
2029 }
2030 rcu_read_unlock();
2031 }
2032 out:
2033 return changed_num;
2034 }
2035
2036 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
2037 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2038 {
2039 struct hashtable_t *hash = bat_priv->tt_local_hash;
2040 struct tt_common_entry *tt_common_entry;
2041 struct tt_local_entry *tt_local_entry;
2042 struct hlist_node *node, *node_tmp;
2043 struct hlist_head *head;
2044 spinlock_t *list_lock; /* protects write access to the hash lists */
2045 uint32_t i;
2046
2047 if (!hash)
2048 return;
2049
2050 for (i = 0; i < hash->size; i++) {
2051 head = &hash->table[i];
2052 list_lock = &hash->list_locks[i];
2053
2054 spin_lock_bh(list_lock);
2055 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
2056 head, hash_entry) {
2057 if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
2058 continue;
2059
2060 bat_dbg(DBG_TT, bat_priv,
2061 "Deleting local tt entry (%pM): pending\n",
2062 tt_common_entry->addr);
2063
2064 atomic_dec(&bat_priv->num_local_tt);
2065 hlist_del_rcu(node);
2066 tt_local_entry = container_of(tt_common_entry,
2067 struct tt_local_entry,
2068 common);
2069 tt_local_entry_free_ref(tt_local_entry);
2070 }
2071 spin_unlock_bh(list_lock);
2072 }
2073
2074 }
2075
2076 static int tt_commit_changes(struct bat_priv *bat_priv,
2077 unsigned char **packet_buff, int *packet_buff_len,
2078 int packet_min_len)
2079 {
2080 uint16_t changed_num = 0;
2081
2082 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2083 return -ENOENT;
2084
2085 changed_num = tt_set_flags(bat_priv->tt_local_hash,
2086 TT_CLIENT_NEW, false);
2087
2088 /* all reset entries have to be counted as local entries */
2089 atomic_add(changed_num, &bat_priv->num_local_tt);
2090 tt_local_purge_pending_clients(bat_priv);
2091 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2092
2093 /* Increment the TTVN only once per OGM interval */
2094 atomic_inc(&bat_priv->ttvn);
2095 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2096 (uint8_t)atomic_read(&bat_priv->ttvn));
2097 bat_priv->tt_poss_change = false;
2098
2099 /* reset the sending counter */
2100 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
2101
2102 return tt_changes_fill_buff(bat_priv, packet_buff,
2103 packet_buff_len, packet_min_len);
2104 }
2105
2106 /* when calling this function (hard_iface == primary_if) has to be true */
2107 int batadv_tt_append_diff(struct bat_priv *bat_priv,
2108 unsigned char **packet_buff, int *packet_buff_len,
2109 int packet_min_len)
2110 {
2111 int tt_num_changes;
2112
2113 /* if at least one change happened */
2114 tt_num_changes = tt_commit_changes(bat_priv, packet_buff,
2115 packet_buff_len, packet_min_len);
2116
2117 /* if the changes have been sent often enough */
2118 if ((tt_num_changes < 0) &&
2119 (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2120 tt_realloc_packet_buff(packet_buff, packet_buff_len,
2121 packet_min_len, packet_min_len);
2122 tt_num_changes = 0;
2123 }
2124
2125 return tt_num_changes;
2126 }
2127
2128 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
2129 {
2130 struct tt_local_entry *tt_local_entry = NULL;
2131 struct tt_global_entry *tt_global_entry = NULL;
2132 bool ret = true;
2133
2134 if (!atomic_read(&bat_priv->ap_isolation))
2135 return false;
2136
2137 tt_local_entry = tt_local_hash_find(bat_priv, dst);
2138 if (!tt_local_entry)
2139 goto out;
2140
2141 tt_global_entry = tt_global_hash_find(bat_priv, src);
2142 if (!tt_global_entry)
2143 goto out;
2144
2145 if (_is_ap_isolated(tt_local_entry, tt_global_entry))
2146 goto out;
2147
2148 ret = false;
2149
2150 out:
2151 if (tt_global_entry)
2152 tt_global_entry_free_ref(tt_global_entry);
2153 if (tt_local_entry)
2154 tt_local_entry_free_ref(tt_local_entry);
2155 return ret;
2156 }
2157
2158 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
2159 const unsigned char *tt_buff, uint8_t tt_num_changes,
2160 uint8_t ttvn, uint16_t tt_crc)
2161 {
2162 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2163 bool full_table = true;
2164
2165 /* don't care about a backbone gateways updates. */
2166 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2167 return;
2168
2169 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2170 * increased by one -> we can apply the attached changes */
2171 if ((!orig_node->tt_initialised && ttvn == 1) ||
2172 ttvn - orig_ttvn == 1) {
2173 /* the OGM could not contain the changes due to their size or
2174 * because they have already been sent TT_OGM_APPEND_MAX times.
2175 * In this case send a tt request */
2176 if (!tt_num_changes) {
2177 full_table = false;
2178 goto request_table;
2179 }
2180
2181 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
2182 (struct tt_change *)tt_buff);
2183
2184 /* Even if we received the precomputed crc with the OGM, we
2185 * prefer to recompute it to spot any possible inconsistency
2186 * in the global table */
2187 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
2188
2189 /* The ttvn alone is not enough to guarantee consistency
2190 * because a single value could represent different states
2191 * (due to the wrap around). Thus a node has to check whether
2192 * the resulting table (after applying the changes) is still
2193 * consistent or not. E.g. a node could disconnect while its
2194 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2195 * checking the CRC value is mandatory to detect the
2196 * inconsistency */
2197 if (orig_node->tt_crc != tt_crc)
2198 goto request_table;
2199
2200 /* Roaming phase is over: tables are in sync again. I can
2201 * unset the flag */
2202 orig_node->tt_poss_change = false;
2203 } else {
2204 /* if we missed more than one change or our tables are not
2205 * in sync anymore -> request fresh tt data */
2206
2207 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2208 orig_node->tt_crc != tt_crc) {
2209 request_table:
2210 bat_dbg(DBG_TT, bat_priv,
2211 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2212 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2213 orig_node->tt_crc, tt_num_changes);
2214 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
2215 full_table);
2216 return;
2217 }
2218 }
2219 }
2220
2221 /* returns true whether we know that the client has moved from its old
2222 * originator to another one. This entry is kept is still kept for consistency
2223 * purposes
2224 */
2225 bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
2226 {
2227 struct tt_global_entry *tt_global_entry;
2228 bool ret = false;
2229
2230 tt_global_entry = tt_global_hash_find(bat_priv, addr);
2231 if (!tt_global_entry)
2232 goto out;
2233
2234 ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2235 tt_global_entry_free_ref(tt_global_entry);
2236 out:
2237 return ret;
2238 }