]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/batman-adv/translation-table.c
batman-adv: Prefix main enum with BATADV_
[mirror_ubuntu-zesty-kernel.git] / net / batman-adv / translation-table.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include "main.h"
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "originator.h"
27 #include "routing.h"
28 #include "bridge_loop_avoidance.h"
29
30 #include <linux/crc16.h>
31
32 static void batadv_send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
33 struct orig_node *orig_node);
34 static void batadv_tt_purge(struct work_struct *work);
35 static void
36 batadv_tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
37
38 /* returns 1 if they are the same mac addr */
39 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
40 {
41 const void *data1 = container_of(node, struct tt_common_entry,
42 hash_entry);
43
44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45 }
46
47 static void batadv_tt_start_timer(struct bat_priv *bat_priv)
48 {
49 INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
50 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
51 msecs_to_jiffies(5000));
52 }
53
54 static struct tt_common_entry *batadv_tt_hash_find(struct hashtable_t *hash,
55 const void *data)
56 {
57 struct hlist_head *head;
58 struct hlist_node *node;
59 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
60 uint32_t index;
61
62 if (!hash)
63 return NULL;
64
65 index = batadv_choose_orig(data, hash->size);
66 head = &hash->table[index];
67
68 rcu_read_lock();
69 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
70 if (!batadv_compare_eth(tt_common_entry, data))
71 continue;
72
73 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
74 continue;
75
76 tt_common_entry_tmp = tt_common_entry;
77 break;
78 }
79 rcu_read_unlock();
80
81 return tt_common_entry_tmp;
82 }
83
84 static struct tt_local_entry *
85 batadv_tt_local_hash_find(struct bat_priv *bat_priv, const void *data)
86 {
87 struct tt_common_entry *tt_common_entry;
88 struct tt_local_entry *tt_local_entry = NULL;
89
90 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
91 if (tt_common_entry)
92 tt_local_entry = container_of(tt_common_entry,
93 struct tt_local_entry, common);
94 return tt_local_entry;
95 }
96
97 static struct tt_global_entry *
98 batadv_tt_global_hash_find(struct bat_priv *bat_priv, const void *data)
99 {
100 struct tt_common_entry *tt_common_entry;
101 struct tt_global_entry *tt_global_entry = NULL;
102
103 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
104 if (tt_common_entry)
105 tt_global_entry = container_of(tt_common_entry,
106 struct tt_global_entry, common);
107 return tt_global_entry;
108
109 }
110
111 static void
112 batadv_tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
113 {
114 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115 kfree_rcu(tt_local_entry, common.rcu);
116 }
117
118 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
119 {
120 struct tt_common_entry *tt_common_entry;
121 struct tt_global_entry *tt_global_entry;
122
123 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
125 common);
126
127 kfree(tt_global_entry);
128 }
129
130 static void
131 batadv_tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
132 {
133 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
134 batadv_tt_global_del_orig_list(tt_global_entry);
135 call_rcu(&tt_global_entry->common.rcu,
136 batadv_tt_global_entry_free_rcu);
137 }
138 }
139
140 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
141 {
142 struct tt_orig_list_entry *orig_entry;
143
144 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
145 batadv_orig_node_free_ref(orig_entry->orig_node);
146 kfree(orig_entry);
147 }
148
149 static void
150 batadv_tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
151 {
152 /* to avoid race conditions, immediately decrease the tt counter */
153 atomic_dec(&orig_entry->orig_node->tt_size);
154 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
155 }
156
157 static void batadv_tt_local_event(struct bat_priv *bat_priv,
158 const uint8_t *addr, uint8_t flags)
159 {
160 struct tt_change_node *tt_change_node, *entry, *safe;
161 bool event_removed = false;
162 bool del_op_requested, del_op_entry;
163
164 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
165
166 if (!tt_change_node)
167 return;
168
169 tt_change_node->change.flags = flags;
170 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
171
172 del_op_requested = flags & BATADV_TT_CLIENT_DEL;
173
174 /* check for ADD+DEL or DEL+ADD events */
175 spin_lock_bh(&bat_priv->tt_changes_list_lock);
176 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
177 list) {
178 if (!batadv_compare_eth(entry->change.addr, addr))
179 continue;
180
181 /* DEL+ADD in the same orig interval have no effect and can be
182 * removed to avoid silly behaviour on the receiver side. The
183 * other way around (ADD+DEL) can happen in case of roaming of
184 * a client still in the NEW state. Roaming of NEW clients is
185 * now possible due to automatically recognition of "temporary"
186 * clients
187 */
188 del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
189 if (!del_op_requested && del_op_entry)
190 goto del;
191 if (del_op_requested && !del_op_entry)
192 goto del;
193 continue;
194 del:
195 list_del(&entry->list);
196 kfree(entry);
197 event_removed = true;
198 goto unlock;
199 }
200
201 /* track the change in the OGMinterval list */
202 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
203
204 unlock:
205 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
206
207 if (event_removed)
208 atomic_dec(&bat_priv->tt_local_changes);
209 else
210 atomic_inc(&bat_priv->tt_local_changes);
211 }
212
213 int batadv_tt_len(int changes_num)
214 {
215 return changes_num * sizeof(struct tt_change);
216 }
217
218 static int batadv_tt_local_init(struct bat_priv *bat_priv)
219 {
220 if (bat_priv->tt_local_hash)
221 return 0;
222
223 bat_priv->tt_local_hash = batadv_hash_new(1024);
224
225 if (!bat_priv->tt_local_hash)
226 return -ENOMEM;
227
228 return 0;
229 }
230
231 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
232 int ifindex)
233 {
234 struct bat_priv *bat_priv = netdev_priv(soft_iface);
235 struct tt_local_entry *tt_local_entry = NULL;
236 struct tt_global_entry *tt_global_entry = NULL;
237 struct hlist_head *head;
238 struct hlist_node *node;
239 struct tt_orig_list_entry *orig_entry;
240 int hash_added;
241
242 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
243
244 if (tt_local_entry) {
245 tt_local_entry->last_seen = jiffies;
246 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
247 tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
248 goto out;
249 }
250
251 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
252 if (!tt_local_entry)
253 goto out;
254
255 batadv_dbg(BATADV_DBG_TT, bat_priv,
256 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
257 (uint8_t)atomic_read(&bat_priv->ttvn));
258
259 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
260 tt_local_entry->common.flags = BATADV_NO_FLAGS;
261 if (batadv_is_wifi_iface(ifindex))
262 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
263 atomic_set(&tt_local_entry->common.refcount, 2);
264 tt_local_entry->last_seen = jiffies;
265
266 /* the batman interface mac address should never be purged */
267 if (batadv_compare_eth(addr, soft_iface->dev_addr))
268 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
269
270 /* The local entry has to be marked as NEW to avoid to send it in
271 * a full table response going out before the next ttvn increment
272 * (consistency check)
273 */
274 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
275
276 hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
277 batadv_choose_orig,
278 &tt_local_entry->common,
279 &tt_local_entry->common.hash_entry);
280
281 if (unlikely(hash_added != 0)) {
282 /* remove the reference for the hash */
283 batadv_tt_local_entry_free_ref(tt_local_entry);
284 goto out;
285 }
286
287 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
288
289 /* remove address from global hash if present */
290 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
291
292 /* Check whether it is a roaming! */
293 if (tt_global_entry) {
294 /* These node are probably going to update their tt table */
295 head = &tt_global_entry->orig_list;
296 rcu_read_lock();
297 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
298 orig_entry->orig_node->tt_poss_change = true;
299
300 batadv_send_roam_adv(bat_priv,
301 tt_global_entry->common.addr,
302 orig_entry->orig_node);
303 }
304 rcu_read_unlock();
305 /* The global entry has to be marked as ROAMING and
306 * has to be kept for consistency purpose
307 */
308 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
309 tt_global_entry->roam_at = jiffies;
310 }
311 out:
312 if (tt_local_entry)
313 batadv_tt_local_entry_free_ref(tt_local_entry);
314 if (tt_global_entry)
315 batadv_tt_global_entry_free_ref(tt_global_entry);
316 }
317
318 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
319 int *packet_buff_len,
320 int min_packet_len,
321 int new_packet_len)
322 {
323 unsigned char *new_buff;
324
325 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
326
327 /* keep old buffer if kmalloc should fail */
328 if (new_buff) {
329 memcpy(new_buff, *packet_buff, min_packet_len);
330 kfree(*packet_buff);
331 *packet_buff = new_buff;
332 *packet_buff_len = new_packet_len;
333 }
334 }
335
336 static void batadv_tt_prepare_packet_buff(struct bat_priv *bat_priv,
337 unsigned char **packet_buff,
338 int *packet_buff_len,
339 int min_packet_len)
340 {
341 struct hard_iface *primary_if;
342 int req_len;
343
344 primary_if = batadv_primary_if_get_selected(bat_priv);
345
346 req_len = min_packet_len;
347 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
348
349 /* if we have too many changes for one packet don't send any
350 * and wait for the tt table request which will be fragmented
351 */
352 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
353 req_len = min_packet_len;
354
355 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
356 min_packet_len, req_len);
357
358 if (primary_if)
359 batadv_hardif_free_ref(primary_if);
360 }
361
362 static int batadv_tt_changes_fill_buff(struct bat_priv *bat_priv,
363 unsigned char **packet_buff,
364 int *packet_buff_len,
365 int min_packet_len)
366 {
367 struct tt_change_node *entry, *safe;
368 int count = 0, tot_changes = 0, new_len;
369 unsigned char *tt_buff;
370
371 batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
372 packet_buff_len, min_packet_len);
373
374 new_len = *packet_buff_len - min_packet_len;
375 tt_buff = *packet_buff + min_packet_len;
376
377 if (new_len > 0)
378 tot_changes = new_len / batadv_tt_len(1);
379
380 spin_lock_bh(&bat_priv->tt_changes_list_lock);
381 atomic_set(&bat_priv->tt_local_changes, 0);
382
383 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
384 list) {
385 if (count < tot_changes) {
386 memcpy(tt_buff + batadv_tt_len(count),
387 &entry->change, sizeof(struct tt_change));
388 count++;
389 }
390 list_del(&entry->list);
391 kfree(entry);
392 }
393 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
394
395 /* Keep the buffer for possible tt_request */
396 spin_lock_bh(&bat_priv->tt_buff_lock);
397 kfree(bat_priv->tt_buff);
398 bat_priv->tt_buff_len = 0;
399 bat_priv->tt_buff = NULL;
400 /* check whether this new OGM has no changes due to size problems */
401 if (new_len > 0) {
402 /* if kmalloc() fails we will reply with the full table
403 * instead of providing the diff
404 */
405 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
406 if (bat_priv->tt_buff) {
407 memcpy(bat_priv->tt_buff, tt_buff, new_len);
408 bat_priv->tt_buff_len = new_len;
409 }
410 }
411 spin_unlock_bh(&bat_priv->tt_buff_lock);
412
413 return count;
414 }
415
416 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
417 {
418 struct net_device *net_dev = (struct net_device *)seq->private;
419 struct bat_priv *bat_priv = netdev_priv(net_dev);
420 struct hashtable_t *hash = bat_priv->tt_local_hash;
421 struct tt_common_entry *tt_common_entry;
422 struct hard_iface *primary_if;
423 struct hlist_node *node;
424 struct hlist_head *head;
425 uint32_t i;
426 int ret = 0;
427
428 primary_if = batadv_primary_if_get_selected(bat_priv);
429 if (!primary_if) {
430 ret = seq_printf(seq,
431 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
432 net_dev->name);
433 goto out;
434 }
435
436 if (primary_if->if_status != BATADV_IF_ACTIVE) {
437 ret = seq_printf(seq,
438 "BATMAN mesh %s disabled - primary interface not active\n",
439 net_dev->name);
440 goto out;
441 }
442
443 seq_printf(seq,
444 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
445 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
446
447 for (i = 0; i < hash->size; i++) {
448 head = &hash->table[i];
449
450 rcu_read_lock();
451 hlist_for_each_entry_rcu(tt_common_entry, node,
452 head, hash_entry) {
453 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
454 tt_common_entry->addr,
455 (tt_common_entry->flags &
456 BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
457 (tt_common_entry->flags &
458 BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
459 (tt_common_entry->flags &
460 BATADV_TT_CLIENT_NEW ? 'N' : '.'),
461 (tt_common_entry->flags &
462 BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
463 (tt_common_entry->flags &
464 BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
465 }
466 rcu_read_unlock();
467 }
468 out:
469 if (primary_if)
470 batadv_hardif_free_ref(primary_if);
471 return ret;
472 }
473
474 static void batadv_tt_local_set_pending(struct bat_priv *bat_priv,
475 struct tt_local_entry *tt_local_entry,
476 uint16_t flags, const char *message)
477 {
478 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
479 tt_local_entry->common.flags | flags);
480
481 /* The local client has to be marked as "pending to be removed" but has
482 * to be kept in the table in order to send it in a full table
483 * response issued before the net ttvn increment (consistency check)
484 */
485 tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
486
487 batadv_dbg(BATADV_DBG_TT, bat_priv,
488 "Local tt entry (%pM) pending to be removed: %s\n",
489 tt_local_entry->common.addr, message);
490 }
491
492 void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
493 const char *message, bool roaming)
494 {
495 struct tt_local_entry *tt_local_entry = NULL;
496 uint16_t flags;
497
498 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
499 if (!tt_local_entry)
500 goto out;
501
502 flags = BATADV_TT_CLIENT_DEL;
503 if (roaming)
504 flags |= BATADV_TT_CLIENT_ROAM;
505
506 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
507 out:
508 if (tt_local_entry)
509 batadv_tt_local_entry_free_ref(tt_local_entry);
510 }
511
512 static void batadv_tt_local_purge_list(struct bat_priv *bat_priv,
513 struct hlist_head *head)
514 {
515 struct tt_local_entry *tt_local_entry;
516 struct tt_common_entry *tt_common_entry;
517 struct hlist_node *node, *node_tmp;
518
519 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
520 hash_entry) {
521 tt_local_entry = container_of(tt_common_entry,
522 struct tt_local_entry, common);
523 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
524 continue;
525
526 /* entry already marked for deletion */
527 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
528 continue;
529
530 if (!batadv_has_timed_out(tt_local_entry->last_seen,
531 BATADV_TT_LOCAL_TIMEOUT))
532 continue;
533
534 batadv_tt_local_set_pending(bat_priv, tt_local_entry,
535 BATADV_TT_CLIENT_DEL, "timed out");
536 }
537 }
538
539 static void batadv_tt_local_purge(struct bat_priv *bat_priv)
540 {
541 struct hashtable_t *hash = bat_priv->tt_local_hash;
542 struct hlist_head *head;
543 spinlock_t *list_lock; /* protects write access to the hash lists */
544 uint32_t i;
545
546 for (i = 0; i < hash->size; i++) {
547 head = &hash->table[i];
548 list_lock = &hash->list_locks[i];
549
550 spin_lock_bh(list_lock);
551 batadv_tt_local_purge_list(bat_priv, head);
552 spin_unlock_bh(list_lock);
553 }
554
555 }
556
557 static void batadv_tt_local_table_free(struct bat_priv *bat_priv)
558 {
559 struct hashtable_t *hash;
560 spinlock_t *list_lock; /* protects write access to the hash lists */
561 struct tt_common_entry *tt_common_entry;
562 struct tt_local_entry *tt_local_entry;
563 struct hlist_node *node, *node_tmp;
564 struct hlist_head *head;
565 uint32_t i;
566
567 if (!bat_priv->tt_local_hash)
568 return;
569
570 hash = bat_priv->tt_local_hash;
571
572 for (i = 0; i < hash->size; i++) {
573 head = &hash->table[i];
574 list_lock = &hash->list_locks[i];
575
576 spin_lock_bh(list_lock);
577 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
578 head, hash_entry) {
579 hlist_del_rcu(node);
580 tt_local_entry = container_of(tt_common_entry,
581 struct tt_local_entry,
582 common);
583 batadv_tt_local_entry_free_ref(tt_local_entry);
584 }
585 spin_unlock_bh(list_lock);
586 }
587
588 batadv_hash_destroy(hash);
589
590 bat_priv->tt_local_hash = NULL;
591 }
592
593 static int batadv_tt_global_init(struct bat_priv *bat_priv)
594 {
595 if (bat_priv->tt_global_hash)
596 return 0;
597
598 bat_priv->tt_global_hash = batadv_hash_new(1024);
599
600 if (!bat_priv->tt_global_hash)
601 return -ENOMEM;
602
603 return 0;
604 }
605
606 static void batadv_tt_changes_list_free(struct bat_priv *bat_priv)
607 {
608 struct tt_change_node *entry, *safe;
609
610 spin_lock_bh(&bat_priv->tt_changes_list_lock);
611
612 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
613 list) {
614 list_del(&entry->list);
615 kfree(entry);
616 }
617
618 atomic_set(&bat_priv->tt_local_changes, 0);
619 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
620 }
621
622 /* find out if an orig_node is already in the list of a tt_global_entry.
623 * returns 1 if found, 0 otherwise
624 */
625 static bool batadv_tt_global_entry_has_orig(const struct tt_global_entry *entry,
626 const struct orig_node *orig_node)
627 {
628 struct tt_orig_list_entry *tmp_orig_entry;
629 const struct hlist_head *head;
630 struct hlist_node *node;
631 bool found = false;
632
633 rcu_read_lock();
634 head = &entry->orig_list;
635 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
636 if (tmp_orig_entry->orig_node == orig_node) {
637 found = true;
638 break;
639 }
640 }
641 rcu_read_unlock();
642 return found;
643 }
644
645 static void
646 batadv_tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
647 struct orig_node *orig_node, int ttvn)
648 {
649 struct tt_orig_list_entry *orig_entry;
650
651 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
652 if (!orig_entry)
653 return;
654
655 INIT_HLIST_NODE(&orig_entry->list);
656 atomic_inc(&orig_node->refcount);
657 atomic_inc(&orig_node->tt_size);
658 orig_entry->orig_node = orig_node;
659 orig_entry->ttvn = ttvn;
660
661 spin_lock_bh(&tt_global_entry->list_lock);
662 hlist_add_head_rcu(&orig_entry->list,
663 &tt_global_entry->orig_list);
664 spin_unlock_bh(&tt_global_entry->list_lock);
665 }
666
667 /* caller must hold orig_node refcount */
668 int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
669 const unsigned char *tt_addr, uint8_t flags,
670 uint8_t ttvn)
671 {
672 struct tt_global_entry *tt_global_entry = NULL;
673 int ret = 0;
674 int hash_added;
675 struct tt_common_entry *common;
676
677 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
678
679 if (!tt_global_entry) {
680 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
681 if (!tt_global_entry)
682 goto out;
683
684 common = &tt_global_entry->common;
685 memcpy(common->addr, tt_addr, ETH_ALEN);
686
687 common->flags = flags;
688 tt_global_entry->roam_at = 0;
689 atomic_set(&common->refcount, 2);
690
691 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
692 spin_lock_init(&tt_global_entry->list_lock);
693
694 hash_added = batadv_hash_add(bat_priv->tt_global_hash,
695 batadv_compare_tt,
696 batadv_choose_orig, common,
697 &common->hash_entry);
698
699 if (unlikely(hash_added != 0)) {
700 /* remove the reference for the hash */
701 batadv_tt_global_entry_free_ref(tt_global_entry);
702 goto out_remove;
703 }
704
705 batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
706 ttvn);
707 } else {
708 /* there is already a global entry, use this one. */
709
710 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
711 * one originator left in the list and we previously received a
712 * delete + roaming change for this originator.
713 *
714 * We should first delete the old originator before adding the
715 * new one.
716 */
717 if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
718 batadv_tt_global_del_orig_list(tt_global_entry);
719 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
720 tt_global_entry->roam_at = 0;
721 }
722
723 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
724 orig_node))
725 batadv_tt_global_add_orig_entry(tt_global_entry,
726 orig_node, ttvn);
727 }
728
729 batadv_dbg(BATADV_DBG_TT, bat_priv,
730 "Creating new global tt entry: %pM (via %pM)\n",
731 tt_global_entry->common.addr, orig_node->orig);
732
733 out_remove:
734 /* remove address from local hash if present */
735 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
736 "global tt received",
737 flags & BATADV_TT_CLIENT_ROAM);
738 ret = 1;
739 out:
740 if (tt_global_entry)
741 batadv_tt_global_entry_free_ref(tt_global_entry);
742 return ret;
743 }
744
745 /* print all orig nodes who announce the address for this global entry.
746 * it is assumed that the caller holds rcu_read_lock();
747 */
748 static void
749 batadv_tt_global_print_entry(struct tt_global_entry *tt_global_entry,
750 struct seq_file *seq)
751 {
752 struct hlist_head *head;
753 struct hlist_node *node;
754 struct tt_orig_list_entry *orig_entry;
755 struct tt_common_entry *tt_common_entry;
756 uint16_t flags;
757 uint8_t last_ttvn;
758
759 tt_common_entry = &tt_global_entry->common;
760
761 head = &tt_global_entry->orig_list;
762
763 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
764 flags = tt_common_entry->flags;
765 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
766 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
767 tt_global_entry->common.addr, orig_entry->ttvn,
768 orig_entry->orig_node->orig, last_ttvn,
769 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
770 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
771 }
772 }
773
774 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
775 {
776 struct net_device *net_dev = (struct net_device *)seq->private;
777 struct bat_priv *bat_priv = netdev_priv(net_dev);
778 struct hashtable_t *hash = bat_priv->tt_global_hash;
779 struct tt_common_entry *tt_common_entry;
780 struct tt_global_entry *tt_global_entry;
781 struct hard_iface *primary_if;
782 struct hlist_node *node;
783 struct hlist_head *head;
784 uint32_t i;
785 int ret = 0;
786
787 primary_if = batadv_primary_if_get_selected(bat_priv);
788 if (!primary_if) {
789 ret = seq_printf(seq,
790 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
791 net_dev->name);
792 goto out;
793 }
794
795 if (primary_if->if_status != BATADV_IF_ACTIVE) {
796 ret = seq_printf(seq,
797 "BATMAN mesh %s disabled - primary interface not active\n",
798 net_dev->name);
799 goto out;
800 }
801
802 seq_printf(seq,
803 "Globally announced TT entries received via the mesh %s\n",
804 net_dev->name);
805 seq_printf(seq, " %-13s %s %-15s %s %s\n",
806 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
807
808 for (i = 0; i < hash->size; i++) {
809 head = &hash->table[i];
810
811 rcu_read_lock();
812 hlist_for_each_entry_rcu(tt_common_entry, node,
813 head, hash_entry) {
814 tt_global_entry = container_of(tt_common_entry,
815 struct tt_global_entry,
816 common);
817 batadv_tt_global_print_entry(tt_global_entry, seq);
818 }
819 rcu_read_unlock();
820 }
821 out:
822 if (primary_if)
823 batadv_hardif_free_ref(primary_if);
824 return ret;
825 }
826
827 /* deletes the orig list of a tt_global_entry */
828 static void
829 batadv_tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
830 {
831 struct hlist_head *head;
832 struct hlist_node *node, *safe;
833 struct tt_orig_list_entry *orig_entry;
834
835 spin_lock_bh(&tt_global_entry->list_lock);
836 head = &tt_global_entry->orig_list;
837 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
838 hlist_del_rcu(node);
839 batadv_tt_orig_list_entry_free_ref(orig_entry);
840 }
841 spin_unlock_bh(&tt_global_entry->list_lock);
842
843 }
844
845 static void
846 batadv_tt_global_del_orig_entry(struct bat_priv *bat_priv,
847 struct tt_global_entry *tt_global_entry,
848 struct orig_node *orig_node,
849 const char *message)
850 {
851 struct hlist_head *head;
852 struct hlist_node *node, *safe;
853 struct tt_orig_list_entry *orig_entry;
854
855 spin_lock_bh(&tt_global_entry->list_lock);
856 head = &tt_global_entry->orig_list;
857 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
858 if (orig_entry->orig_node == orig_node) {
859 batadv_dbg(BATADV_DBG_TT, bat_priv,
860 "Deleting %pM from global tt entry %pM: %s\n",
861 orig_node->orig,
862 tt_global_entry->common.addr, message);
863 hlist_del_rcu(node);
864 batadv_tt_orig_list_entry_free_ref(orig_entry);
865 }
866 }
867 spin_unlock_bh(&tt_global_entry->list_lock);
868 }
869
870 static void batadv_tt_global_del_struct(struct bat_priv *bat_priv,
871 struct tt_global_entry *tt_global_entry,
872 const char *message)
873 {
874 batadv_dbg(BATADV_DBG_TT, bat_priv,
875 "Deleting global tt entry %pM: %s\n",
876 tt_global_entry->common.addr, message);
877
878 batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
879 batadv_choose_orig, tt_global_entry->common.addr);
880 batadv_tt_global_entry_free_ref(tt_global_entry);
881
882 }
883
884 /* If the client is to be deleted, we check if it is the last origantor entry
885 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
886 * timer, otherwise we simply remove the originator scheduled for deletion.
887 */
888 static void
889 batadv_tt_global_del_roaming(struct bat_priv *bat_priv,
890 struct tt_global_entry *tt_global_entry,
891 struct orig_node *orig_node, const char *message)
892 {
893 bool last_entry = true;
894 struct hlist_head *head;
895 struct hlist_node *node;
896 struct tt_orig_list_entry *orig_entry;
897
898 /* no local entry exists, case 1:
899 * Check if this is the last one or if other entries exist.
900 */
901
902 rcu_read_lock();
903 head = &tt_global_entry->orig_list;
904 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
905 if (orig_entry->orig_node != orig_node) {
906 last_entry = false;
907 break;
908 }
909 }
910 rcu_read_unlock();
911
912 if (last_entry) {
913 /* its the last one, mark for roaming. */
914 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
915 tt_global_entry->roam_at = jiffies;
916 } else
917 /* there is another entry, we can simply delete this
918 * one and can still use the other one.
919 */
920 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
921 orig_node, message);
922 }
923
924
925
926 static void batadv_tt_global_del(struct bat_priv *bat_priv,
927 struct orig_node *orig_node,
928 const unsigned char *addr,
929 const char *message, bool roaming)
930 {
931 struct tt_global_entry *tt_global_entry = NULL;
932 struct tt_local_entry *local_entry = NULL;
933
934 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
935 if (!tt_global_entry)
936 goto out;
937
938 if (!roaming) {
939 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
940 orig_node, message);
941
942 if (hlist_empty(&tt_global_entry->orig_list))
943 batadv_tt_global_del_struct(bat_priv, tt_global_entry,
944 message);
945
946 goto out;
947 }
948
949 /* if we are deleting a global entry due to a roam
950 * event, there are two possibilities:
951 * 1) the client roamed from node A to node B => if there
952 * is only one originator left for this client, we mark
953 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
954 * wait for node B to claim it. In case of timeout
955 * the entry is purged.
956 *
957 * If there are other originators left, we directly delete
958 * the originator.
959 * 2) the client roamed to us => we can directly delete
960 * the global entry, since it is useless now.
961 */
962 local_entry = batadv_tt_local_hash_find(bat_priv,
963 tt_global_entry->common.addr);
964 if (local_entry) {
965 /* local entry exists, case 2: client roamed to us. */
966 batadv_tt_global_del_orig_list(tt_global_entry);
967 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
968 } else
969 /* no local entry exists, case 1: check for roaming */
970 batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
971 orig_node, message);
972
973
974 out:
975 if (tt_global_entry)
976 batadv_tt_global_entry_free_ref(tt_global_entry);
977 if (local_entry)
978 batadv_tt_local_entry_free_ref(local_entry);
979 }
980
981 void batadv_tt_global_del_orig(struct bat_priv *bat_priv,
982 struct orig_node *orig_node, const char *message)
983 {
984 struct tt_global_entry *global_entry;
985 struct tt_common_entry *tt_common_entry;
986 uint32_t i;
987 struct hashtable_t *hash = bat_priv->tt_global_hash;
988 struct hlist_node *node, *safe;
989 struct hlist_head *head;
990 spinlock_t *list_lock; /* protects write access to the hash lists */
991
992 if (!hash)
993 return;
994
995 for (i = 0; i < hash->size; i++) {
996 head = &hash->table[i];
997 list_lock = &hash->list_locks[i];
998
999 spin_lock_bh(list_lock);
1000 hlist_for_each_entry_safe(tt_common_entry, node, safe,
1001 head, hash_entry) {
1002 global_entry = container_of(tt_common_entry,
1003 struct tt_global_entry,
1004 common);
1005
1006 batadv_tt_global_del_orig_entry(bat_priv, global_entry,
1007 orig_node, message);
1008
1009 if (hlist_empty(&global_entry->orig_list)) {
1010 batadv_dbg(BATADV_DBG_TT, bat_priv,
1011 "Deleting global tt entry %pM: %s\n",
1012 global_entry->common.addr, message);
1013 hlist_del_rcu(node);
1014 batadv_tt_global_entry_free_ref(global_entry);
1015 }
1016 }
1017 spin_unlock_bh(list_lock);
1018 }
1019 orig_node->tt_initialised = false;
1020 }
1021
1022 static void batadv_tt_global_roam_purge_list(struct bat_priv *bat_priv,
1023 struct hlist_head *head)
1024 {
1025 struct tt_common_entry *tt_common_entry;
1026 struct tt_global_entry *tt_global_entry;
1027 struct hlist_node *node, *node_tmp;
1028
1029 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
1030 hash_entry) {
1031 tt_global_entry = container_of(tt_common_entry,
1032 struct tt_global_entry, common);
1033 if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
1034 continue;
1035 if (!batadv_has_timed_out(tt_global_entry->roam_at,
1036 BATADV_TT_CLIENT_ROAM_TIMEOUT))
1037 continue;
1038
1039 batadv_dbg(BATADV_DBG_TT, bat_priv,
1040 "Deleting global tt entry (%pM): Roaming timeout\n",
1041 tt_global_entry->common.addr);
1042
1043 hlist_del_rcu(node);
1044 batadv_tt_global_entry_free_ref(tt_global_entry);
1045 }
1046 }
1047
1048 static void batadv_tt_global_roam_purge(struct bat_priv *bat_priv)
1049 {
1050 struct hashtable_t *hash = bat_priv->tt_global_hash;
1051 struct hlist_head *head;
1052 spinlock_t *list_lock; /* protects write access to the hash lists */
1053 uint32_t i;
1054
1055 for (i = 0; i < hash->size; i++) {
1056 head = &hash->table[i];
1057 list_lock = &hash->list_locks[i];
1058
1059 spin_lock_bh(list_lock);
1060 batadv_tt_global_roam_purge_list(bat_priv, head);
1061 spin_unlock_bh(list_lock);
1062 }
1063
1064 }
1065
1066 static void batadv_tt_global_table_free(struct bat_priv *bat_priv)
1067 {
1068 struct hashtable_t *hash;
1069 spinlock_t *list_lock; /* protects write access to the hash lists */
1070 struct tt_common_entry *tt_common_entry;
1071 struct tt_global_entry *tt_global_entry;
1072 struct hlist_node *node, *node_tmp;
1073 struct hlist_head *head;
1074 uint32_t i;
1075
1076 if (!bat_priv->tt_global_hash)
1077 return;
1078
1079 hash = bat_priv->tt_global_hash;
1080
1081 for (i = 0; i < hash->size; i++) {
1082 head = &hash->table[i];
1083 list_lock = &hash->list_locks[i];
1084
1085 spin_lock_bh(list_lock);
1086 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1087 head, hash_entry) {
1088 hlist_del_rcu(node);
1089 tt_global_entry = container_of(tt_common_entry,
1090 struct tt_global_entry,
1091 common);
1092 batadv_tt_global_entry_free_ref(tt_global_entry);
1093 }
1094 spin_unlock_bh(list_lock);
1095 }
1096
1097 batadv_hash_destroy(hash);
1098
1099 bat_priv->tt_global_hash = NULL;
1100 }
1101
1102 static bool _batadv_is_ap_isolated(struct tt_local_entry *tt_local_entry,
1103 struct tt_global_entry *tt_global_entry)
1104 {
1105 bool ret = false;
1106
1107 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
1108 tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
1109 ret = true;
1110
1111 return ret;
1112 }
1113
1114 struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv,
1115 const uint8_t *src,
1116 const uint8_t *addr)
1117 {
1118 struct tt_local_entry *tt_local_entry = NULL;
1119 struct tt_global_entry *tt_global_entry = NULL;
1120 struct orig_node *orig_node = NULL;
1121 struct neigh_node *router = NULL;
1122 struct hlist_head *head;
1123 struct hlist_node *node;
1124 struct tt_orig_list_entry *orig_entry;
1125 int best_tq;
1126
1127 if (src && atomic_read(&bat_priv->ap_isolation)) {
1128 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1129 if (!tt_local_entry)
1130 goto out;
1131 }
1132
1133 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1134 if (!tt_global_entry)
1135 goto out;
1136
1137 /* check whether the clients should not communicate due to AP
1138 * isolation
1139 */
1140 if (tt_local_entry &&
1141 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1142 goto out;
1143
1144 best_tq = 0;
1145
1146 rcu_read_lock();
1147 head = &tt_global_entry->orig_list;
1148 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1149 router = batadv_orig_node_get_router(orig_entry->orig_node);
1150 if (!router)
1151 continue;
1152
1153 if (router->tq_avg > best_tq) {
1154 orig_node = orig_entry->orig_node;
1155 best_tq = router->tq_avg;
1156 }
1157 batadv_neigh_node_free_ref(router);
1158 }
1159 /* found anything? */
1160 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1161 orig_node = NULL;
1162 rcu_read_unlock();
1163 out:
1164 if (tt_global_entry)
1165 batadv_tt_global_entry_free_ref(tt_global_entry);
1166 if (tt_local_entry)
1167 batadv_tt_local_entry_free_ref(tt_local_entry);
1168
1169 return orig_node;
1170 }
1171
1172 /* Calculates the checksum of the local table of a given orig_node */
1173 static uint16_t batadv_tt_global_crc(struct bat_priv *bat_priv,
1174 struct orig_node *orig_node)
1175 {
1176 uint16_t total = 0, total_one;
1177 struct hashtable_t *hash = bat_priv->tt_global_hash;
1178 struct tt_common_entry *tt_common;
1179 struct tt_global_entry *tt_global_entry;
1180 struct hlist_node *node;
1181 struct hlist_head *head;
1182 uint32_t i;
1183 int j;
1184
1185 for (i = 0; i < hash->size; i++) {
1186 head = &hash->table[i];
1187
1188 rcu_read_lock();
1189 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1190 tt_global_entry = container_of(tt_common,
1191 struct tt_global_entry,
1192 common);
1193 /* Roaming clients are in the global table for
1194 * consistency only. They don't have to be
1195 * taken into account while computing the
1196 * global crc
1197 */
1198 if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1199 continue;
1200
1201 /* find out if this global entry is announced by this
1202 * originator
1203 */
1204 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
1205 orig_node))
1206 continue;
1207
1208 total_one = 0;
1209 for (j = 0; j < ETH_ALEN; j++)
1210 total_one = crc16_byte(total_one,
1211 tt_common->addr[j]);
1212 total ^= total_one;
1213 }
1214 rcu_read_unlock();
1215 }
1216
1217 return total;
1218 }
1219
1220 /* Calculates the checksum of the local table */
1221 static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
1222 {
1223 uint16_t total = 0, total_one;
1224 struct hashtable_t *hash = bat_priv->tt_local_hash;
1225 struct tt_common_entry *tt_common;
1226 struct hlist_node *node;
1227 struct hlist_head *head;
1228 uint32_t i;
1229 int j;
1230
1231 for (i = 0; i < hash->size; i++) {
1232 head = &hash->table[i];
1233
1234 rcu_read_lock();
1235 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1236 /* not yet committed clients have not to be taken into
1237 * account while computing the CRC
1238 */
1239 if (tt_common->flags & BATADV_TT_CLIENT_NEW)
1240 continue;
1241 total_one = 0;
1242 for (j = 0; j < ETH_ALEN; j++)
1243 total_one = crc16_byte(total_one,
1244 tt_common->addr[j]);
1245 total ^= total_one;
1246 }
1247 rcu_read_unlock();
1248 }
1249
1250 return total;
1251 }
1252
1253 static void batadv_tt_req_list_free(struct bat_priv *bat_priv)
1254 {
1255 struct tt_req_node *node, *safe;
1256
1257 spin_lock_bh(&bat_priv->tt_req_list_lock);
1258
1259 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1260 list_del(&node->list);
1261 kfree(node);
1262 }
1263
1264 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1265 }
1266
1267 static void batadv_tt_save_orig_buffer(struct bat_priv *bat_priv,
1268 struct orig_node *orig_node,
1269 const unsigned char *tt_buff,
1270 uint8_t tt_num_changes)
1271 {
1272 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1273
1274 /* Replace the old buffer only if I received something in the
1275 * last OGM (the OGM could carry no changes)
1276 */
1277 spin_lock_bh(&orig_node->tt_buff_lock);
1278 if (tt_buff_len > 0) {
1279 kfree(orig_node->tt_buff);
1280 orig_node->tt_buff_len = 0;
1281 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1282 if (orig_node->tt_buff) {
1283 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1284 orig_node->tt_buff_len = tt_buff_len;
1285 }
1286 }
1287 spin_unlock_bh(&orig_node->tt_buff_lock);
1288 }
1289
1290 static void batadv_tt_req_purge(struct bat_priv *bat_priv)
1291 {
1292 struct tt_req_node *node, *safe;
1293
1294 spin_lock_bh(&bat_priv->tt_req_list_lock);
1295 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1296 if (batadv_has_timed_out(node->issued_at,
1297 BATADV_TT_REQUEST_TIMEOUT)) {
1298 list_del(&node->list);
1299 kfree(node);
1300 }
1301 }
1302 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1303 }
1304
1305 /* returns the pointer to the new tt_req_node struct if no request
1306 * has already been issued for this orig_node, NULL otherwise
1307 */
1308 static struct tt_req_node *batadv_new_tt_req_node(struct bat_priv *bat_priv,
1309 struct orig_node *orig_node)
1310 {
1311 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1312
1313 spin_lock_bh(&bat_priv->tt_req_list_lock);
1314 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1315 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1316 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1317 BATADV_TT_REQUEST_TIMEOUT))
1318 goto unlock;
1319 }
1320
1321 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1322 if (!tt_req_node)
1323 goto unlock;
1324
1325 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1326 tt_req_node->issued_at = jiffies;
1327
1328 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1329 unlock:
1330 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1331 return tt_req_node;
1332 }
1333
1334 /* data_ptr is useless here, but has to be kept to respect the prototype */
1335 static int batadv_tt_local_valid_entry(const void *entry_ptr,
1336 const void *data_ptr)
1337 {
1338 const struct tt_common_entry *tt_common_entry = entry_ptr;
1339
1340 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
1341 return 0;
1342 return 1;
1343 }
1344
1345 static int batadv_tt_global_valid(const void *entry_ptr,
1346 const void *data_ptr)
1347 {
1348 const struct tt_common_entry *tt_common_entry = entry_ptr;
1349 const struct tt_global_entry *tt_global_entry;
1350 const struct orig_node *orig_node = data_ptr;
1351
1352 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
1353 return 0;
1354
1355 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1356 common);
1357
1358 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
1359 }
1360
1361 static struct sk_buff *
1362 batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1363 struct hashtable_t *hash,
1364 struct hard_iface *primary_if,
1365 int (*valid_cb)(const void *, const void *),
1366 void *cb_data)
1367 {
1368 struct tt_common_entry *tt_common_entry;
1369 struct tt_query_packet *tt_response;
1370 struct tt_change *tt_change;
1371 struct hlist_node *node;
1372 struct hlist_head *head;
1373 struct sk_buff *skb = NULL;
1374 uint16_t tt_tot, tt_count;
1375 ssize_t tt_query_size = sizeof(struct tt_query_packet);
1376 uint32_t i;
1377
1378 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1379 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1380 tt_len -= tt_len % sizeof(struct tt_change);
1381 }
1382 tt_tot = tt_len / sizeof(struct tt_change);
1383
1384 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1385 if (!skb)
1386 goto out;
1387
1388 skb_reserve(skb, ETH_HLEN);
1389 tt_response = (struct tt_query_packet *)skb_put(skb,
1390 tt_query_size + tt_len);
1391 tt_response->ttvn = ttvn;
1392
1393 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1394 tt_count = 0;
1395
1396 rcu_read_lock();
1397 for (i = 0; i < hash->size; i++) {
1398 head = &hash->table[i];
1399
1400 hlist_for_each_entry_rcu(tt_common_entry, node,
1401 head, hash_entry) {
1402 if (tt_count == tt_tot)
1403 break;
1404
1405 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1406 continue;
1407
1408 memcpy(tt_change->addr, tt_common_entry->addr,
1409 ETH_ALEN);
1410 tt_change->flags = BATADV_NO_FLAGS;
1411
1412 tt_count++;
1413 tt_change++;
1414 }
1415 }
1416 rcu_read_unlock();
1417
1418 /* store in the message the number of entries we have successfully
1419 * copied
1420 */
1421 tt_response->tt_data = htons(tt_count);
1422
1423 out:
1424 return skb;
1425 }
1426
1427 static int batadv_send_tt_request(struct bat_priv *bat_priv,
1428 struct orig_node *dst_orig_node,
1429 uint8_t ttvn, uint16_t tt_crc,
1430 bool full_table)
1431 {
1432 struct sk_buff *skb = NULL;
1433 struct tt_query_packet *tt_request;
1434 struct neigh_node *neigh_node = NULL;
1435 struct hard_iface *primary_if;
1436 struct tt_req_node *tt_req_node = NULL;
1437 int ret = 1;
1438
1439 primary_if = batadv_primary_if_get_selected(bat_priv);
1440 if (!primary_if)
1441 goto out;
1442
1443 /* The new tt_req will be issued only if I'm not waiting for a
1444 * reply from the same orig_node yet
1445 */
1446 tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
1447 if (!tt_req_node)
1448 goto out;
1449
1450 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1451 if (!skb)
1452 goto out;
1453
1454 skb_reserve(skb, ETH_HLEN);
1455
1456 tt_request = (struct tt_query_packet *)skb_put(skb,
1457 sizeof(struct tt_query_packet));
1458
1459 tt_request->header.packet_type = BATADV_TT_QUERY;
1460 tt_request->header.version = BATADV_COMPAT_VERSION;
1461 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1462 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1463 tt_request->header.ttl = BATADV_TTL;
1464 tt_request->ttvn = ttvn;
1465 tt_request->tt_data = htons(tt_crc);
1466 tt_request->flags = BATADV_TT_REQUEST;
1467
1468 if (full_table)
1469 tt_request->flags |= BATADV_TT_FULL_TABLE;
1470
1471 neigh_node = batadv_orig_node_get_router(dst_orig_node);
1472 if (!neigh_node)
1473 goto out;
1474
1475 batadv_dbg(BATADV_DBG_TT, bat_priv,
1476 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1477 dst_orig_node->orig, neigh_node->addr,
1478 (full_table ? 'F' : '.'));
1479
1480 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
1481
1482 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1483 ret = 0;
1484
1485 out:
1486 if (neigh_node)
1487 batadv_neigh_node_free_ref(neigh_node);
1488 if (primary_if)
1489 batadv_hardif_free_ref(primary_if);
1490 if (ret)
1491 kfree_skb(skb);
1492 if (ret && tt_req_node) {
1493 spin_lock_bh(&bat_priv->tt_req_list_lock);
1494 list_del(&tt_req_node->list);
1495 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1496 kfree(tt_req_node);
1497 }
1498 return ret;
1499 }
1500
1501 static bool batadv_send_other_tt_response(struct bat_priv *bat_priv,
1502 struct tt_query_packet *tt_request)
1503 {
1504 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1505 struct neigh_node *neigh_node = NULL;
1506 struct hard_iface *primary_if = NULL;
1507 uint8_t orig_ttvn, req_ttvn, ttvn;
1508 int ret = false;
1509 unsigned char *tt_buff;
1510 bool full_table;
1511 uint16_t tt_len, tt_tot;
1512 struct sk_buff *skb = NULL;
1513 struct tt_query_packet *tt_response;
1514
1515 batadv_dbg(BATADV_DBG_TT, bat_priv,
1516 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1517 tt_request->src, tt_request->ttvn, tt_request->dst,
1518 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1519
1520 /* Let's get the orig node of the REAL destination */
1521 req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1522 if (!req_dst_orig_node)
1523 goto out;
1524
1525 res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1526 if (!res_dst_orig_node)
1527 goto out;
1528
1529 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1530 if (!neigh_node)
1531 goto out;
1532
1533 primary_if = batadv_primary_if_get_selected(bat_priv);
1534 if (!primary_if)
1535 goto out;
1536
1537 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1538 req_ttvn = tt_request->ttvn;
1539
1540 /* I don't have the requested data */
1541 if (orig_ttvn != req_ttvn ||
1542 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1543 goto out;
1544
1545 /* If the full table has been explicitly requested */
1546 if (tt_request->flags & BATADV_TT_FULL_TABLE ||
1547 !req_dst_orig_node->tt_buff)
1548 full_table = true;
1549 else
1550 full_table = false;
1551
1552 /* In this version, fragmentation is not implemented, then
1553 * I'll send only one packet with as much TT entries as I can
1554 */
1555 if (!full_table) {
1556 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1557 tt_len = req_dst_orig_node->tt_buff_len;
1558 tt_tot = tt_len / sizeof(struct tt_change);
1559
1560 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1561 tt_len + ETH_HLEN);
1562 if (!skb)
1563 goto unlock;
1564
1565 skb_reserve(skb, ETH_HLEN);
1566 tt_response = (struct tt_query_packet *)skb_put(skb,
1567 sizeof(struct tt_query_packet) + tt_len);
1568 tt_response->ttvn = req_ttvn;
1569 tt_response->tt_data = htons(tt_tot);
1570
1571 tt_buff = skb->data + sizeof(struct tt_query_packet);
1572 /* Copy the last orig_node's OGM buffer */
1573 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1574 req_dst_orig_node->tt_buff_len);
1575
1576 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1577 } else {
1578 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1579 sizeof(struct tt_change);
1580 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1581
1582 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1583 bat_priv->tt_global_hash,
1584 primary_if,
1585 batadv_tt_global_valid,
1586 req_dst_orig_node);
1587 if (!skb)
1588 goto out;
1589
1590 tt_response = (struct tt_query_packet *)skb->data;
1591 }
1592
1593 tt_response->header.packet_type = BATADV_TT_QUERY;
1594 tt_response->header.version = BATADV_COMPAT_VERSION;
1595 tt_response->header.ttl = BATADV_TTL;
1596 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1597 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1598 tt_response->flags = BATADV_TT_RESPONSE;
1599
1600 if (full_table)
1601 tt_response->flags |= BATADV_TT_FULL_TABLE;
1602
1603 batadv_dbg(BATADV_DBG_TT, bat_priv,
1604 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1605 res_dst_orig_node->orig, neigh_node->addr,
1606 req_dst_orig_node->orig, req_ttvn);
1607
1608 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1609
1610 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1611 ret = true;
1612 goto out;
1613
1614 unlock:
1615 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1616
1617 out:
1618 if (res_dst_orig_node)
1619 batadv_orig_node_free_ref(res_dst_orig_node);
1620 if (req_dst_orig_node)
1621 batadv_orig_node_free_ref(req_dst_orig_node);
1622 if (neigh_node)
1623 batadv_neigh_node_free_ref(neigh_node);
1624 if (primary_if)
1625 batadv_hardif_free_ref(primary_if);
1626 if (!ret)
1627 kfree_skb(skb);
1628 return ret;
1629
1630 }
1631 static bool batadv_send_my_tt_response(struct bat_priv *bat_priv,
1632 struct tt_query_packet *tt_request)
1633 {
1634 struct orig_node *orig_node = NULL;
1635 struct neigh_node *neigh_node = NULL;
1636 struct hard_iface *primary_if = NULL;
1637 uint8_t my_ttvn, req_ttvn, ttvn;
1638 int ret = false;
1639 unsigned char *tt_buff;
1640 bool full_table;
1641 uint16_t tt_len, tt_tot;
1642 struct sk_buff *skb = NULL;
1643 struct tt_query_packet *tt_response;
1644
1645 batadv_dbg(BATADV_DBG_TT, bat_priv,
1646 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1647 tt_request->src, tt_request->ttvn,
1648 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1649
1650
1651 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1652 req_ttvn = tt_request->ttvn;
1653
1654 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1655 if (!orig_node)
1656 goto out;
1657
1658 neigh_node = batadv_orig_node_get_router(orig_node);
1659 if (!neigh_node)
1660 goto out;
1661
1662 primary_if = batadv_primary_if_get_selected(bat_priv);
1663 if (!primary_if)
1664 goto out;
1665
1666 /* If the full table has been explicitly requested or the gap
1667 * is too big send the whole local translation table
1668 */
1669 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1670 !bat_priv->tt_buff)
1671 full_table = true;
1672 else
1673 full_table = false;
1674
1675 /* In this version, fragmentation is not implemented, then
1676 * I'll send only one packet with as much TT entries as I can
1677 */
1678 if (!full_table) {
1679 spin_lock_bh(&bat_priv->tt_buff_lock);
1680 tt_len = bat_priv->tt_buff_len;
1681 tt_tot = tt_len / sizeof(struct tt_change);
1682
1683 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1684 tt_len + ETH_HLEN);
1685 if (!skb)
1686 goto unlock;
1687
1688 skb_reserve(skb, ETH_HLEN);
1689 tt_response = (struct tt_query_packet *)skb_put(skb,
1690 sizeof(struct tt_query_packet) + tt_len);
1691 tt_response->ttvn = req_ttvn;
1692 tt_response->tt_data = htons(tt_tot);
1693
1694 tt_buff = skb->data + sizeof(struct tt_query_packet);
1695 memcpy(tt_buff, bat_priv->tt_buff,
1696 bat_priv->tt_buff_len);
1697 spin_unlock_bh(&bat_priv->tt_buff_lock);
1698 } else {
1699 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1700 sizeof(struct tt_change);
1701 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1702
1703 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1704 bat_priv->tt_local_hash,
1705 primary_if,
1706 batadv_tt_local_valid_entry,
1707 NULL);
1708 if (!skb)
1709 goto out;
1710
1711 tt_response = (struct tt_query_packet *)skb->data;
1712 }
1713
1714 tt_response->header.packet_type = BATADV_TT_QUERY;
1715 tt_response->header.version = BATADV_COMPAT_VERSION;
1716 tt_response->header.ttl = BATADV_TTL;
1717 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1718 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1719 tt_response->flags = BATADV_TT_RESPONSE;
1720
1721 if (full_table)
1722 tt_response->flags |= BATADV_TT_FULL_TABLE;
1723
1724 batadv_dbg(BATADV_DBG_TT, bat_priv,
1725 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1726 orig_node->orig, neigh_node->addr,
1727 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1728
1729 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1730
1731 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1732 ret = true;
1733 goto out;
1734
1735 unlock:
1736 spin_unlock_bh(&bat_priv->tt_buff_lock);
1737 out:
1738 if (orig_node)
1739 batadv_orig_node_free_ref(orig_node);
1740 if (neigh_node)
1741 batadv_neigh_node_free_ref(neigh_node);
1742 if (primary_if)
1743 batadv_hardif_free_ref(primary_if);
1744 if (!ret)
1745 kfree_skb(skb);
1746 /* This packet was for me, so it doesn't need to be re-routed */
1747 return true;
1748 }
1749
1750 bool batadv_send_tt_response(struct bat_priv *bat_priv,
1751 struct tt_query_packet *tt_request)
1752 {
1753 if (batadv_is_my_mac(tt_request->dst)) {
1754 /* don't answer backbone gws! */
1755 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1756 return true;
1757
1758 return batadv_send_my_tt_response(bat_priv, tt_request);
1759 } else {
1760 return batadv_send_other_tt_response(bat_priv, tt_request);
1761 }
1762 }
1763
1764 static void _batadv_tt_update_changes(struct bat_priv *bat_priv,
1765 struct orig_node *orig_node,
1766 struct tt_change *tt_change,
1767 uint16_t tt_num_changes, uint8_t ttvn)
1768 {
1769 int i;
1770 int roams;
1771
1772 for (i = 0; i < tt_num_changes; i++) {
1773 if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
1774 roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
1775 batadv_tt_global_del(bat_priv, orig_node,
1776 (tt_change + i)->addr,
1777 "tt removed by changes",
1778 roams);
1779 } else {
1780 if (!batadv_tt_global_add(bat_priv, orig_node,
1781 (tt_change + i)->addr,
1782 (tt_change + i)->flags, ttvn))
1783 /* In case of problem while storing a
1784 * global_entry, we stop the updating
1785 * procedure without committing the
1786 * ttvn change. This will avoid to send
1787 * corrupted data on tt_request
1788 */
1789 return;
1790 }
1791 }
1792 orig_node->tt_initialised = true;
1793 }
1794
1795 static void batadv_tt_fill_gtable(struct bat_priv *bat_priv,
1796 struct tt_query_packet *tt_response)
1797 {
1798 struct orig_node *orig_node = NULL;
1799
1800 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1801 if (!orig_node)
1802 goto out;
1803
1804 /* Purge the old table first.. */
1805 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1806
1807 _batadv_tt_update_changes(bat_priv, orig_node,
1808 (struct tt_change *)(tt_response + 1),
1809 ntohs(tt_response->tt_data),
1810 tt_response->ttvn);
1811
1812 spin_lock_bh(&orig_node->tt_buff_lock);
1813 kfree(orig_node->tt_buff);
1814 orig_node->tt_buff_len = 0;
1815 orig_node->tt_buff = NULL;
1816 spin_unlock_bh(&orig_node->tt_buff_lock);
1817
1818 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1819
1820 out:
1821 if (orig_node)
1822 batadv_orig_node_free_ref(orig_node);
1823 }
1824
1825 static void batadv_tt_update_changes(struct bat_priv *bat_priv,
1826 struct orig_node *orig_node,
1827 uint16_t tt_num_changes, uint8_t ttvn,
1828 struct tt_change *tt_change)
1829 {
1830 _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
1831 tt_num_changes, ttvn);
1832
1833 batadv_tt_save_orig_buffer(bat_priv, orig_node,
1834 (unsigned char *)tt_change, tt_num_changes);
1835 atomic_set(&orig_node->last_ttvn, ttvn);
1836 }
1837
1838 bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1839 {
1840 struct tt_local_entry *tt_local_entry = NULL;
1841 bool ret = false;
1842
1843 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
1844 if (!tt_local_entry)
1845 goto out;
1846 /* Check if the client has been logically deleted (but is kept for
1847 * consistency purpose)
1848 */
1849 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
1850 goto out;
1851 ret = true;
1852 out:
1853 if (tt_local_entry)
1854 batadv_tt_local_entry_free_ref(tt_local_entry);
1855 return ret;
1856 }
1857
1858 void batadv_handle_tt_response(struct bat_priv *bat_priv,
1859 struct tt_query_packet *tt_response)
1860 {
1861 struct tt_req_node *node, *safe;
1862 struct orig_node *orig_node = NULL;
1863
1864 batadv_dbg(BATADV_DBG_TT, bat_priv,
1865 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1866 tt_response->src, tt_response->ttvn,
1867 ntohs(tt_response->tt_data),
1868 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1869
1870 /* we should have never asked a backbone gw */
1871 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1872 goto out;
1873
1874 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1875 if (!orig_node)
1876 goto out;
1877
1878 if (tt_response->flags & BATADV_TT_FULL_TABLE)
1879 batadv_tt_fill_gtable(bat_priv, tt_response);
1880 else
1881 batadv_tt_update_changes(bat_priv, orig_node,
1882 ntohs(tt_response->tt_data),
1883 tt_response->ttvn,
1884 (struct tt_change *)(tt_response + 1));
1885
1886 /* Delete the tt_req_node from pending tt_requests list */
1887 spin_lock_bh(&bat_priv->tt_req_list_lock);
1888 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1889 if (!batadv_compare_eth(node->addr, tt_response->src))
1890 continue;
1891 list_del(&node->list);
1892 kfree(node);
1893 }
1894 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1895
1896 /* Recalculate the CRC for this orig_node and store it */
1897 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
1898 /* Roaming phase is over: tables are in sync again. I can
1899 * unset the flag
1900 */
1901 orig_node->tt_poss_change = false;
1902 out:
1903 if (orig_node)
1904 batadv_orig_node_free_ref(orig_node);
1905 }
1906
1907 int batadv_tt_init(struct bat_priv *bat_priv)
1908 {
1909 int ret;
1910
1911 ret = batadv_tt_local_init(bat_priv);
1912 if (ret < 0)
1913 return ret;
1914
1915 ret = batadv_tt_global_init(bat_priv);
1916 if (ret < 0)
1917 return ret;
1918
1919 batadv_tt_start_timer(bat_priv);
1920
1921 return 1;
1922 }
1923
1924 static void batadv_tt_roam_list_free(struct bat_priv *bat_priv)
1925 {
1926 struct tt_roam_node *node, *safe;
1927
1928 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1929
1930 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1931 list_del(&node->list);
1932 kfree(node);
1933 }
1934
1935 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1936 }
1937
1938 static void batadv_tt_roam_purge(struct bat_priv *bat_priv)
1939 {
1940 struct tt_roam_node *node, *safe;
1941
1942 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1943 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1944 if (!batadv_has_timed_out(node->first_time,
1945 BATADV_ROAMING_MAX_TIME))
1946 continue;
1947
1948 list_del(&node->list);
1949 kfree(node);
1950 }
1951 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1952 }
1953
1954 /* This function checks whether the client already reached the
1955 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1956 * will not be sent.
1957 *
1958 * returns true if the ROAMING_ADV can be sent, false otherwise
1959 */
1960 static bool batadv_tt_check_roam_count(struct bat_priv *bat_priv,
1961 uint8_t *client)
1962 {
1963 struct tt_roam_node *tt_roam_node;
1964 bool ret = false;
1965
1966 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1967 /* The new tt_req will be issued only if I'm not waiting for a
1968 * reply from the same orig_node yet
1969 */
1970 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1971 if (!batadv_compare_eth(tt_roam_node->addr, client))
1972 continue;
1973
1974 if (batadv_has_timed_out(tt_roam_node->first_time,
1975 BATADV_ROAMING_MAX_TIME))
1976 continue;
1977
1978 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
1979 /* Sorry, you roamed too many times! */
1980 goto unlock;
1981 ret = true;
1982 break;
1983 }
1984
1985 if (!ret) {
1986 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1987 if (!tt_roam_node)
1988 goto unlock;
1989
1990 tt_roam_node->first_time = jiffies;
1991 atomic_set(&tt_roam_node->counter,
1992 BATADV_ROAMING_MAX_COUNT - 1);
1993 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1994
1995 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1996 ret = true;
1997 }
1998
1999 unlock:
2000 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
2001 return ret;
2002 }
2003
2004 static void batadv_send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
2005 struct orig_node *orig_node)
2006 {
2007 struct neigh_node *neigh_node = NULL;
2008 struct sk_buff *skb = NULL;
2009 struct roam_adv_packet *roam_adv_packet;
2010 int ret = 1;
2011 struct hard_iface *primary_if;
2012
2013 /* before going on we have to check whether the client has
2014 * already roamed to us too many times
2015 */
2016 if (!batadv_tt_check_roam_count(bat_priv, client))
2017 goto out;
2018
2019 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
2020 if (!skb)
2021 goto out;
2022
2023 skb_reserve(skb, ETH_HLEN);
2024
2025 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
2026 sizeof(struct roam_adv_packet));
2027
2028 roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
2029 roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
2030 roam_adv_packet->header.ttl = BATADV_TTL;
2031 primary_if = batadv_primary_if_get_selected(bat_priv);
2032 if (!primary_if)
2033 goto out;
2034 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
2035 batadv_hardif_free_ref(primary_if);
2036 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
2037 memcpy(roam_adv_packet->client, client, ETH_ALEN);
2038
2039 neigh_node = batadv_orig_node_get_router(orig_node);
2040 if (!neigh_node)
2041 goto out;
2042
2043 batadv_dbg(BATADV_DBG_TT, bat_priv,
2044 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2045 orig_node->orig, client, neigh_node->addr);
2046
2047 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
2048
2049 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
2050 ret = 0;
2051
2052 out:
2053 if (neigh_node)
2054 batadv_neigh_node_free_ref(neigh_node);
2055 if (ret)
2056 kfree_skb(skb);
2057 return;
2058 }
2059
2060 static void batadv_tt_purge(struct work_struct *work)
2061 {
2062 struct delayed_work *delayed_work =
2063 container_of(work, struct delayed_work, work);
2064 struct bat_priv *bat_priv =
2065 container_of(delayed_work, struct bat_priv, tt_work);
2066
2067 batadv_tt_local_purge(bat_priv);
2068 batadv_tt_global_roam_purge(bat_priv);
2069 batadv_tt_req_purge(bat_priv);
2070 batadv_tt_roam_purge(bat_priv);
2071
2072 batadv_tt_start_timer(bat_priv);
2073 }
2074
2075 void batadv_tt_free(struct bat_priv *bat_priv)
2076 {
2077 cancel_delayed_work_sync(&bat_priv->tt_work);
2078
2079 batadv_tt_local_table_free(bat_priv);
2080 batadv_tt_global_table_free(bat_priv);
2081 batadv_tt_req_list_free(bat_priv);
2082 batadv_tt_changes_list_free(bat_priv);
2083 batadv_tt_roam_list_free(bat_priv);
2084
2085 kfree(bat_priv->tt_buff);
2086 }
2087
2088 /* This function will enable or disable the specified flags for all the entries
2089 * in the given hash table and returns the number of modified entries
2090 */
2091 static uint16_t batadv_tt_set_flags(struct hashtable_t *hash, uint16_t flags,
2092 bool enable)
2093 {
2094 uint32_t i;
2095 uint16_t changed_num = 0;
2096 struct hlist_head *head;
2097 struct hlist_node *node;
2098 struct tt_common_entry *tt_common_entry;
2099
2100 if (!hash)
2101 goto out;
2102
2103 for (i = 0; i < hash->size; i++) {
2104 head = &hash->table[i];
2105
2106 rcu_read_lock();
2107 hlist_for_each_entry_rcu(tt_common_entry, node,
2108 head, hash_entry) {
2109 if (enable) {
2110 if ((tt_common_entry->flags & flags) == flags)
2111 continue;
2112 tt_common_entry->flags |= flags;
2113 } else {
2114 if (!(tt_common_entry->flags & flags))
2115 continue;
2116 tt_common_entry->flags &= ~flags;
2117 }
2118 changed_num++;
2119 }
2120 rcu_read_unlock();
2121 }
2122 out:
2123 return changed_num;
2124 }
2125
2126 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2127 static void batadv_tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2128 {
2129 struct hashtable_t *hash = bat_priv->tt_local_hash;
2130 struct tt_common_entry *tt_common;
2131 struct tt_local_entry *tt_local_entry;
2132 struct hlist_node *node, *node_tmp;
2133 struct hlist_head *head;
2134 spinlock_t *list_lock; /* protects write access to the hash lists */
2135 uint32_t i;
2136
2137 if (!hash)
2138 return;
2139
2140 for (i = 0; i < hash->size; i++) {
2141 head = &hash->table[i];
2142 list_lock = &hash->list_locks[i];
2143
2144 spin_lock_bh(list_lock);
2145 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
2146 hash_entry) {
2147 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
2148 continue;
2149
2150 batadv_dbg(BATADV_DBG_TT, bat_priv,
2151 "Deleting local tt entry (%pM): pending\n",
2152 tt_common->addr);
2153
2154 atomic_dec(&bat_priv->num_local_tt);
2155 hlist_del_rcu(node);
2156 tt_local_entry = container_of(tt_common,
2157 struct tt_local_entry,
2158 common);
2159 batadv_tt_local_entry_free_ref(tt_local_entry);
2160 }
2161 spin_unlock_bh(list_lock);
2162 }
2163
2164 }
2165
2166 static int batadv_tt_commit_changes(struct bat_priv *bat_priv,
2167 unsigned char **packet_buff,
2168 int *packet_buff_len, int packet_min_len)
2169 {
2170 uint16_t changed_num = 0;
2171
2172 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2173 return -ENOENT;
2174
2175 changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
2176 BATADV_TT_CLIENT_NEW, false);
2177
2178 /* all reset entries have to be counted as local entries */
2179 atomic_add(changed_num, &bat_priv->num_local_tt);
2180 batadv_tt_local_purge_pending_clients(bat_priv);
2181 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2182
2183 /* Increment the TTVN only once per OGM interval */
2184 atomic_inc(&bat_priv->ttvn);
2185 batadv_dbg(BATADV_DBG_TT, bat_priv,
2186 "Local changes committed, updating to ttvn %u\n",
2187 (uint8_t)atomic_read(&bat_priv->ttvn));
2188 bat_priv->tt_poss_change = false;
2189
2190 /* reset the sending counter */
2191 atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2192
2193 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2194 packet_buff_len, packet_min_len);
2195 }
2196
2197 /* when calling this function (hard_iface == primary_if) has to be true */
2198 int batadv_tt_append_diff(struct bat_priv *bat_priv,
2199 unsigned char **packet_buff, int *packet_buff_len,
2200 int packet_min_len)
2201 {
2202 int tt_num_changes;
2203
2204 /* if at least one change happened */
2205 tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
2206 packet_buff_len,
2207 packet_min_len);
2208
2209 /* if the changes have been sent often enough */
2210 if ((tt_num_changes < 0) &&
2211 (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2212 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2213 packet_min_len, packet_min_len);
2214 tt_num_changes = 0;
2215 }
2216
2217 return tt_num_changes;
2218 }
2219
2220 bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src,
2221 uint8_t *dst)
2222 {
2223 struct tt_local_entry *tt_local_entry = NULL;
2224 struct tt_global_entry *tt_global_entry = NULL;
2225 bool ret = false;
2226
2227 if (!atomic_read(&bat_priv->ap_isolation))
2228 goto out;
2229
2230 tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
2231 if (!tt_local_entry)
2232 goto out;
2233
2234 tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
2235 if (!tt_global_entry)
2236 goto out;
2237
2238 if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
2239 goto out;
2240
2241 ret = true;
2242
2243 out:
2244 if (tt_global_entry)
2245 batadv_tt_global_entry_free_ref(tt_global_entry);
2246 if (tt_local_entry)
2247 batadv_tt_local_entry_free_ref(tt_local_entry);
2248 return ret;
2249 }
2250
2251 void batadv_tt_update_orig(struct bat_priv *bat_priv,
2252 struct orig_node *orig_node,
2253 const unsigned char *tt_buff, uint8_t tt_num_changes,
2254 uint8_t ttvn, uint16_t tt_crc)
2255 {
2256 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2257 bool full_table = true;
2258
2259 /* don't care about a backbone gateways updates. */
2260 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2261 return;
2262
2263 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2264 * increased by one -> we can apply the attached changes
2265 */
2266 if ((!orig_node->tt_initialised && ttvn == 1) ||
2267 ttvn - orig_ttvn == 1) {
2268 /* the OGM could not contain the changes due to their size or
2269 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2270 * times.
2271 * In this case send a tt request
2272 */
2273 if (!tt_num_changes) {
2274 full_table = false;
2275 goto request_table;
2276 }
2277
2278 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
2279 ttvn, (struct tt_change *)tt_buff);
2280
2281 /* Even if we received the precomputed crc with the OGM, we
2282 * prefer to recompute it to spot any possible inconsistency
2283 * in the global table
2284 */
2285 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2286
2287 /* The ttvn alone is not enough to guarantee consistency
2288 * because a single value could represent different states
2289 * (due to the wrap around). Thus a node has to check whether
2290 * the resulting table (after applying the changes) is still
2291 * consistent or not. E.g. a node could disconnect while its
2292 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2293 * checking the CRC value is mandatory to detect the
2294 * inconsistency
2295 */
2296 if (orig_node->tt_crc != tt_crc)
2297 goto request_table;
2298
2299 /* Roaming phase is over: tables are in sync again. I can
2300 * unset the flag
2301 */
2302 orig_node->tt_poss_change = false;
2303 } else {
2304 /* if we missed more than one change or our tables are not
2305 * in sync anymore -> request fresh tt data
2306 */
2307 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2308 orig_node->tt_crc != tt_crc) {
2309 request_table:
2310 batadv_dbg(BATADV_DBG_TT, bat_priv,
2311 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2312 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2313 orig_node->tt_crc, tt_num_changes);
2314 batadv_send_tt_request(bat_priv, orig_node, ttvn,
2315 tt_crc, full_table);
2316 return;
2317 }
2318 }
2319 }
2320
2321 /* returns true whether we know that the client has moved from its old
2322 * originator to another one. This entry is kept is still kept for consistency
2323 * purposes
2324 */
2325 bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv,
2326 uint8_t *addr)
2327 {
2328 struct tt_global_entry *tt_global_entry;
2329 bool ret = false;
2330
2331 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
2332 if (!tt_global_entry)
2333 goto out;
2334
2335 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2336 batadv_tt_global_entry_free_ref(tt_global_entry);
2337 out:
2338 return ret;
2339 }