1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
26 #include "originator.h"
28 #include "bridge_loop_avoidance.h"
30 #include <linux/crc16.h>
32 static void batadv_send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
33 struct orig_node
*orig_node
);
34 static void batadv_tt_purge(struct work_struct
*work
);
36 batadv_tt_global_del_orig_list(struct tt_global_entry
*tt_global_entry
);
38 /* returns 1 if they are the same mac addr */
39 static int batadv_compare_tt(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct tt_common_entry
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 static void batadv_tt_start_timer(struct bat_priv
*bat_priv
)
49 INIT_DELAYED_WORK(&bat_priv
->tt_work
, batadv_tt_purge
);
50 queue_delayed_work(batadv_event_workqueue
, &bat_priv
->tt_work
,
51 msecs_to_jiffies(5000));
54 static struct tt_common_entry
*batadv_tt_hash_find(struct hashtable_t
*hash
,
57 struct hlist_head
*head
;
58 struct hlist_node
*node
;
59 struct tt_common_entry
*tt_common_entry
, *tt_common_entry_tmp
= NULL
;
65 index
= batadv_choose_orig(data
, hash
->size
);
66 head
= &hash
->table
[index
];
69 hlist_for_each_entry_rcu(tt_common_entry
, node
, head
, hash_entry
) {
70 if (!batadv_compare_eth(tt_common_entry
, data
))
73 if (!atomic_inc_not_zero(&tt_common_entry
->refcount
))
76 tt_common_entry_tmp
= tt_common_entry
;
81 return tt_common_entry_tmp
;
84 static struct tt_local_entry
*
85 batadv_tt_local_hash_find(struct bat_priv
*bat_priv
, const void *data
)
87 struct tt_common_entry
*tt_common_entry
;
88 struct tt_local_entry
*tt_local_entry
= NULL
;
90 tt_common_entry
= batadv_tt_hash_find(bat_priv
->tt_local_hash
, data
);
92 tt_local_entry
= container_of(tt_common_entry
,
93 struct tt_local_entry
, common
);
94 return tt_local_entry
;
97 static struct tt_global_entry
*
98 batadv_tt_global_hash_find(struct bat_priv
*bat_priv
, const void *data
)
100 struct tt_common_entry
*tt_common_entry
;
101 struct tt_global_entry
*tt_global_entry
= NULL
;
103 tt_common_entry
= batadv_tt_hash_find(bat_priv
->tt_global_hash
, data
);
105 tt_global_entry
= container_of(tt_common_entry
,
106 struct tt_global_entry
, common
);
107 return tt_global_entry
;
112 batadv_tt_local_entry_free_ref(struct tt_local_entry
*tt_local_entry
)
114 if (atomic_dec_and_test(&tt_local_entry
->common
.refcount
))
115 kfree_rcu(tt_local_entry
, common
.rcu
);
118 static void batadv_tt_global_entry_free_rcu(struct rcu_head
*rcu
)
120 struct tt_common_entry
*tt_common_entry
;
121 struct tt_global_entry
*tt_global_entry
;
123 tt_common_entry
= container_of(rcu
, struct tt_common_entry
, rcu
);
124 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
127 kfree(tt_global_entry
);
131 batadv_tt_global_entry_free_ref(struct tt_global_entry
*tt_global_entry
)
133 if (atomic_dec_and_test(&tt_global_entry
->common
.refcount
)) {
134 batadv_tt_global_del_orig_list(tt_global_entry
);
135 call_rcu(&tt_global_entry
->common
.rcu
,
136 batadv_tt_global_entry_free_rcu
);
140 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head
*rcu
)
142 struct tt_orig_list_entry
*orig_entry
;
144 orig_entry
= container_of(rcu
, struct tt_orig_list_entry
, rcu
);
145 batadv_orig_node_free_ref(orig_entry
->orig_node
);
150 batadv_tt_orig_list_entry_free_ref(struct tt_orig_list_entry
*orig_entry
)
152 /* to avoid race conditions, immediately decrease the tt counter */
153 atomic_dec(&orig_entry
->orig_node
->tt_size
);
154 call_rcu(&orig_entry
->rcu
, batadv_tt_orig_list_entry_free_rcu
);
157 static void batadv_tt_local_event(struct bat_priv
*bat_priv
,
158 const uint8_t *addr
, uint8_t flags
)
160 struct tt_change_node
*tt_change_node
, *entry
, *safe
;
161 bool event_removed
= false;
162 bool del_op_requested
, del_op_entry
;
164 tt_change_node
= kmalloc(sizeof(*tt_change_node
), GFP_ATOMIC
);
169 tt_change_node
->change
.flags
= flags
;
170 memcpy(tt_change_node
->change
.addr
, addr
, ETH_ALEN
);
172 del_op_requested
= flags
& BATADV_TT_CLIENT_DEL
;
174 /* check for ADD+DEL or DEL+ADD events */
175 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
176 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
178 if (!batadv_compare_eth(entry
->change
.addr
, addr
))
181 /* DEL+ADD in the same orig interval have no effect and can be
182 * removed to avoid silly behaviour on the receiver side. The
183 * other way around (ADD+DEL) can happen in case of roaming of
184 * a client still in the NEW state. Roaming of NEW clients is
185 * now possible due to automatically recognition of "temporary"
188 del_op_entry
= entry
->change
.flags
& BATADV_TT_CLIENT_DEL
;
189 if (!del_op_requested
&& del_op_entry
)
191 if (del_op_requested
&& !del_op_entry
)
195 list_del(&entry
->list
);
197 event_removed
= true;
201 /* track the change in the OGMinterval list */
202 list_add_tail(&tt_change_node
->list
, &bat_priv
->tt_changes_list
);
205 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
208 atomic_dec(&bat_priv
->tt_local_changes
);
210 atomic_inc(&bat_priv
->tt_local_changes
);
213 int batadv_tt_len(int changes_num
)
215 return changes_num
* sizeof(struct tt_change
);
218 static int batadv_tt_local_init(struct bat_priv
*bat_priv
)
220 if (bat_priv
->tt_local_hash
)
223 bat_priv
->tt_local_hash
= batadv_hash_new(1024);
225 if (!bat_priv
->tt_local_hash
)
231 void batadv_tt_local_add(struct net_device
*soft_iface
, const uint8_t *addr
,
234 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
235 struct tt_local_entry
*tt_local_entry
= NULL
;
236 struct tt_global_entry
*tt_global_entry
= NULL
;
237 struct hlist_head
*head
;
238 struct hlist_node
*node
;
239 struct tt_orig_list_entry
*orig_entry
;
242 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
244 if (tt_local_entry
) {
245 tt_local_entry
->last_seen
= jiffies
;
246 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
247 tt_local_entry
->common
.flags
&= ~BATADV_TT_CLIENT_PENDING
;
251 tt_local_entry
= kmalloc(sizeof(*tt_local_entry
), GFP_ATOMIC
);
255 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
256 "Creating new local tt entry: %pM (ttvn: %d)\n", addr
,
257 (uint8_t)atomic_read(&bat_priv
->ttvn
));
259 memcpy(tt_local_entry
->common
.addr
, addr
, ETH_ALEN
);
260 tt_local_entry
->common
.flags
= BATADV_NO_FLAGS
;
261 if (batadv_is_wifi_iface(ifindex
))
262 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_WIFI
;
263 atomic_set(&tt_local_entry
->common
.refcount
, 2);
264 tt_local_entry
->last_seen
= jiffies
;
266 /* the batman interface mac address should never be purged */
267 if (batadv_compare_eth(addr
, soft_iface
->dev_addr
))
268 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_NOPURGE
;
270 /* The local entry has to be marked as NEW to avoid to send it in
271 * a full table response going out before the next ttvn increment
272 * (consistency check)
274 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_NEW
;
276 hash_added
= batadv_hash_add(bat_priv
->tt_local_hash
, batadv_compare_tt
,
278 &tt_local_entry
->common
,
279 &tt_local_entry
->common
.hash_entry
);
281 if (unlikely(hash_added
!= 0)) {
282 /* remove the reference for the hash */
283 batadv_tt_local_entry_free_ref(tt_local_entry
);
287 batadv_tt_local_event(bat_priv
, addr
, tt_local_entry
->common
.flags
);
289 /* remove address from global hash if present */
290 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
292 /* Check whether it is a roaming! */
293 if (tt_global_entry
) {
294 /* These node are probably going to update their tt table */
295 head
= &tt_global_entry
->orig_list
;
297 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
298 orig_entry
->orig_node
->tt_poss_change
= true;
300 batadv_send_roam_adv(bat_priv
,
301 tt_global_entry
->common
.addr
,
302 orig_entry
->orig_node
);
305 /* The global entry has to be marked as ROAMING and
306 * has to be kept for consistency purpose
308 tt_global_entry
->common
.flags
|= BATADV_TT_CLIENT_ROAM
;
309 tt_global_entry
->roam_at
= jiffies
;
313 batadv_tt_local_entry_free_ref(tt_local_entry
);
315 batadv_tt_global_entry_free_ref(tt_global_entry
);
318 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff
,
319 int *packet_buff_len
,
323 unsigned char *new_buff
;
325 new_buff
= kmalloc(new_packet_len
, GFP_ATOMIC
);
327 /* keep old buffer if kmalloc should fail */
329 memcpy(new_buff
, *packet_buff
, min_packet_len
);
331 *packet_buff
= new_buff
;
332 *packet_buff_len
= new_packet_len
;
336 static void batadv_tt_prepare_packet_buff(struct bat_priv
*bat_priv
,
337 unsigned char **packet_buff
,
338 int *packet_buff_len
,
341 struct hard_iface
*primary_if
;
344 primary_if
= batadv_primary_if_get_selected(bat_priv
);
346 req_len
= min_packet_len
;
347 req_len
+= batadv_tt_len(atomic_read(&bat_priv
->tt_local_changes
));
349 /* if we have too many changes for one packet don't send any
350 * and wait for the tt table request which will be fragmented
352 if ((!primary_if
) || (req_len
> primary_if
->soft_iface
->mtu
))
353 req_len
= min_packet_len
;
355 batadv_tt_realloc_packet_buff(packet_buff
, packet_buff_len
,
356 min_packet_len
, req_len
);
359 batadv_hardif_free_ref(primary_if
);
362 static int batadv_tt_changes_fill_buff(struct bat_priv
*bat_priv
,
363 unsigned char **packet_buff
,
364 int *packet_buff_len
,
367 struct tt_change_node
*entry
, *safe
;
368 int count
= 0, tot_changes
= 0, new_len
;
369 unsigned char *tt_buff
;
371 batadv_tt_prepare_packet_buff(bat_priv
, packet_buff
,
372 packet_buff_len
, min_packet_len
);
374 new_len
= *packet_buff_len
- min_packet_len
;
375 tt_buff
= *packet_buff
+ min_packet_len
;
378 tot_changes
= new_len
/ batadv_tt_len(1);
380 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
381 atomic_set(&bat_priv
->tt_local_changes
, 0);
383 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
385 if (count
< tot_changes
) {
386 memcpy(tt_buff
+ batadv_tt_len(count
),
387 &entry
->change
, sizeof(struct tt_change
));
390 list_del(&entry
->list
);
393 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
395 /* Keep the buffer for possible tt_request */
396 spin_lock_bh(&bat_priv
->tt_buff_lock
);
397 kfree(bat_priv
->tt_buff
);
398 bat_priv
->tt_buff_len
= 0;
399 bat_priv
->tt_buff
= NULL
;
400 /* check whether this new OGM has no changes due to size problems */
402 /* if kmalloc() fails we will reply with the full table
403 * instead of providing the diff
405 bat_priv
->tt_buff
= kmalloc(new_len
, GFP_ATOMIC
);
406 if (bat_priv
->tt_buff
) {
407 memcpy(bat_priv
->tt_buff
, tt_buff
, new_len
);
408 bat_priv
->tt_buff_len
= new_len
;
411 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
416 int batadv_tt_local_seq_print_text(struct seq_file
*seq
, void *offset
)
418 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
419 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
420 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
421 struct tt_common_entry
*tt_common_entry
;
422 struct hard_iface
*primary_if
;
423 struct hlist_node
*node
;
424 struct hlist_head
*head
;
428 primary_if
= batadv_primary_if_get_selected(bat_priv
);
430 ret
= seq_printf(seq
,
431 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
436 if (primary_if
->if_status
!= BATADV_IF_ACTIVE
) {
437 ret
= seq_printf(seq
,
438 "BATMAN mesh %s disabled - primary interface not active\n",
444 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
445 net_dev
->name
, (uint8_t)atomic_read(&bat_priv
->ttvn
));
447 for (i
= 0; i
< hash
->size
; i
++) {
448 head
= &hash
->table
[i
];
451 hlist_for_each_entry_rcu(tt_common_entry
, node
,
453 seq_printf(seq
, " * %pM [%c%c%c%c%c]\n",
454 tt_common_entry
->addr
,
455 (tt_common_entry
->flags
&
456 BATADV_TT_CLIENT_ROAM
? 'R' : '.'),
457 (tt_common_entry
->flags
&
458 BATADV_TT_CLIENT_NOPURGE
? 'P' : '.'),
459 (tt_common_entry
->flags
&
460 BATADV_TT_CLIENT_NEW
? 'N' : '.'),
461 (tt_common_entry
->flags
&
462 BATADV_TT_CLIENT_PENDING
? 'X' : '.'),
463 (tt_common_entry
->flags
&
464 BATADV_TT_CLIENT_WIFI
? 'W' : '.'));
470 batadv_hardif_free_ref(primary_if
);
474 static void batadv_tt_local_set_pending(struct bat_priv
*bat_priv
,
475 struct tt_local_entry
*tt_local_entry
,
476 uint16_t flags
, const char *message
)
478 batadv_tt_local_event(bat_priv
, tt_local_entry
->common
.addr
,
479 tt_local_entry
->common
.flags
| flags
);
481 /* The local client has to be marked as "pending to be removed" but has
482 * to be kept in the table in order to send it in a full table
483 * response issued before the net ttvn increment (consistency check)
485 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_PENDING
;
487 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
488 "Local tt entry (%pM) pending to be removed: %s\n",
489 tt_local_entry
->common
.addr
, message
);
492 void batadv_tt_local_remove(struct bat_priv
*bat_priv
, const uint8_t *addr
,
493 const char *message
, bool roaming
)
495 struct tt_local_entry
*tt_local_entry
= NULL
;
498 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
502 flags
= BATADV_TT_CLIENT_DEL
;
504 flags
|= BATADV_TT_CLIENT_ROAM
;
506 batadv_tt_local_set_pending(bat_priv
, tt_local_entry
, flags
, message
);
509 batadv_tt_local_entry_free_ref(tt_local_entry
);
512 static void batadv_tt_local_purge_list(struct bat_priv
*bat_priv
,
513 struct hlist_head
*head
)
515 struct tt_local_entry
*tt_local_entry
;
516 struct tt_common_entry
*tt_common_entry
;
517 struct hlist_node
*node
, *node_tmp
;
519 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
, head
,
521 tt_local_entry
= container_of(tt_common_entry
,
522 struct tt_local_entry
, common
);
523 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_NOPURGE
)
526 /* entry already marked for deletion */
527 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_PENDING
)
530 if (!batadv_has_timed_out(tt_local_entry
->last_seen
,
531 BATADV_TT_LOCAL_TIMEOUT
))
534 batadv_tt_local_set_pending(bat_priv
, tt_local_entry
,
535 BATADV_TT_CLIENT_DEL
, "timed out");
539 static void batadv_tt_local_purge(struct bat_priv
*bat_priv
)
541 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
542 struct hlist_head
*head
;
543 spinlock_t
*list_lock
; /* protects write access to the hash lists */
546 for (i
= 0; i
< hash
->size
; i
++) {
547 head
= &hash
->table
[i
];
548 list_lock
= &hash
->list_locks
[i
];
550 spin_lock_bh(list_lock
);
551 batadv_tt_local_purge_list(bat_priv
, head
);
552 spin_unlock_bh(list_lock
);
557 static void batadv_tt_local_table_free(struct bat_priv
*bat_priv
)
559 struct hashtable_t
*hash
;
560 spinlock_t
*list_lock
; /* protects write access to the hash lists */
561 struct tt_common_entry
*tt_common_entry
;
562 struct tt_local_entry
*tt_local_entry
;
563 struct hlist_node
*node
, *node_tmp
;
564 struct hlist_head
*head
;
567 if (!bat_priv
->tt_local_hash
)
570 hash
= bat_priv
->tt_local_hash
;
572 for (i
= 0; i
< hash
->size
; i
++) {
573 head
= &hash
->table
[i
];
574 list_lock
= &hash
->list_locks
[i
];
576 spin_lock_bh(list_lock
);
577 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
580 tt_local_entry
= container_of(tt_common_entry
,
581 struct tt_local_entry
,
583 batadv_tt_local_entry_free_ref(tt_local_entry
);
585 spin_unlock_bh(list_lock
);
588 batadv_hash_destroy(hash
);
590 bat_priv
->tt_local_hash
= NULL
;
593 static int batadv_tt_global_init(struct bat_priv
*bat_priv
)
595 if (bat_priv
->tt_global_hash
)
598 bat_priv
->tt_global_hash
= batadv_hash_new(1024);
600 if (!bat_priv
->tt_global_hash
)
606 static void batadv_tt_changes_list_free(struct bat_priv
*bat_priv
)
608 struct tt_change_node
*entry
, *safe
;
610 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
612 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
614 list_del(&entry
->list
);
618 atomic_set(&bat_priv
->tt_local_changes
, 0);
619 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
622 /* find out if an orig_node is already in the list of a tt_global_entry.
623 * returns 1 if found, 0 otherwise
625 static bool batadv_tt_global_entry_has_orig(const struct tt_global_entry
*entry
,
626 const struct orig_node
*orig_node
)
628 struct tt_orig_list_entry
*tmp_orig_entry
;
629 const struct hlist_head
*head
;
630 struct hlist_node
*node
;
634 head
= &entry
->orig_list
;
635 hlist_for_each_entry_rcu(tmp_orig_entry
, node
, head
, list
) {
636 if (tmp_orig_entry
->orig_node
== orig_node
) {
646 batadv_tt_global_add_orig_entry(struct tt_global_entry
*tt_global_entry
,
647 struct orig_node
*orig_node
, int ttvn
)
649 struct tt_orig_list_entry
*orig_entry
;
651 orig_entry
= kzalloc(sizeof(*orig_entry
), GFP_ATOMIC
);
655 INIT_HLIST_NODE(&orig_entry
->list
);
656 atomic_inc(&orig_node
->refcount
);
657 atomic_inc(&orig_node
->tt_size
);
658 orig_entry
->orig_node
= orig_node
;
659 orig_entry
->ttvn
= ttvn
;
661 spin_lock_bh(&tt_global_entry
->list_lock
);
662 hlist_add_head_rcu(&orig_entry
->list
,
663 &tt_global_entry
->orig_list
);
664 spin_unlock_bh(&tt_global_entry
->list_lock
);
667 /* caller must hold orig_node refcount */
668 int batadv_tt_global_add(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
669 const unsigned char *tt_addr
, uint8_t flags
,
672 struct tt_global_entry
*tt_global_entry
= NULL
;
675 struct tt_common_entry
*common
;
677 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, tt_addr
);
679 if (!tt_global_entry
) {
680 tt_global_entry
= kzalloc(sizeof(*tt_global_entry
), GFP_ATOMIC
);
681 if (!tt_global_entry
)
684 common
= &tt_global_entry
->common
;
685 memcpy(common
->addr
, tt_addr
, ETH_ALEN
);
687 common
->flags
= flags
;
688 tt_global_entry
->roam_at
= 0;
689 atomic_set(&common
->refcount
, 2);
691 INIT_HLIST_HEAD(&tt_global_entry
->orig_list
);
692 spin_lock_init(&tt_global_entry
->list_lock
);
694 hash_added
= batadv_hash_add(bat_priv
->tt_global_hash
,
696 batadv_choose_orig
, common
,
697 &common
->hash_entry
);
699 if (unlikely(hash_added
!= 0)) {
700 /* remove the reference for the hash */
701 batadv_tt_global_entry_free_ref(tt_global_entry
);
705 batadv_tt_global_add_orig_entry(tt_global_entry
, orig_node
,
708 /* there is already a global entry, use this one. */
710 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
711 * one originator left in the list and we previously received a
712 * delete + roaming change for this originator.
714 * We should first delete the old originator before adding the
717 if (tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_ROAM
) {
718 batadv_tt_global_del_orig_list(tt_global_entry
);
719 tt_global_entry
->common
.flags
&= ~BATADV_TT_CLIENT_ROAM
;
720 tt_global_entry
->roam_at
= 0;
723 if (!batadv_tt_global_entry_has_orig(tt_global_entry
,
725 batadv_tt_global_add_orig_entry(tt_global_entry
,
729 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
730 "Creating new global tt entry: %pM (via %pM)\n",
731 tt_global_entry
->common
.addr
, orig_node
->orig
);
734 /* remove address from local hash if present */
735 batadv_tt_local_remove(bat_priv
, tt_global_entry
->common
.addr
,
736 "global tt received",
737 flags
& BATADV_TT_CLIENT_ROAM
);
741 batadv_tt_global_entry_free_ref(tt_global_entry
);
745 /* print all orig nodes who announce the address for this global entry.
746 * it is assumed that the caller holds rcu_read_lock();
749 batadv_tt_global_print_entry(struct tt_global_entry
*tt_global_entry
,
750 struct seq_file
*seq
)
752 struct hlist_head
*head
;
753 struct hlist_node
*node
;
754 struct tt_orig_list_entry
*orig_entry
;
755 struct tt_common_entry
*tt_common_entry
;
759 tt_common_entry
= &tt_global_entry
->common
;
761 head
= &tt_global_entry
->orig_list
;
763 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
764 flags
= tt_common_entry
->flags
;
765 last_ttvn
= atomic_read(&orig_entry
->orig_node
->last_ttvn
);
766 seq_printf(seq
, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
767 tt_global_entry
->common
.addr
, orig_entry
->ttvn
,
768 orig_entry
->orig_node
->orig
, last_ttvn
,
769 (flags
& BATADV_TT_CLIENT_ROAM
? 'R' : '.'),
770 (flags
& BATADV_TT_CLIENT_WIFI
? 'W' : '.'));
774 int batadv_tt_global_seq_print_text(struct seq_file
*seq
, void *offset
)
776 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
777 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
778 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
779 struct tt_common_entry
*tt_common_entry
;
780 struct tt_global_entry
*tt_global_entry
;
781 struct hard_iface
*primary_if
;
782 struct hlist_node
*node
;
783 struct hlist_head
*head
;
787 primary_if
= batadv_primary_if_get_selected(bat_priv
);
789 ret
= seq_printf(seq
,
790 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
795 if (primary_if
->if_status
!= BATADV_IF_ACTIVE
) {
796 ret
= seq_printf(seq
,
797 "BATMAN mesh %s disabled - primary interface not active\n",
803 "Globally announced TT entries received via the mesh %s\n",
805 seq_printf(seq
, " %-13s %s %-15s %s %s\n",
806 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
808 for (i
= 0; i
< hash
->size
; i
++) {
809 head
= &hash
->table
[i
];
812 hlist_for_each_entry_rcu(tt_common_entry
, node
,
814 tt_global_entry
= container_of(tt_common_entry
,
815 struct tt_global_entry
,
817 batadv_tt_global_print_entry(tt_global_entry
, seq
);
823 batadv_hardif_free_ref(primary_if
);
827 /* deletes the orig list of a tt_global_entry */
829 batadv_tt_global_del_orig_list(struct tt_global_entry
*tt_global_entry
)
831 struct hlist_head
*head
;
832 struct hlist_node
*node
, *safe
;
833 struct tt_orig_list_entry
*orig_entry
;
835 spin_lock_bh(&tt_global_entry
->list_lock
);
836 head
= &tt_global_entry
->orig_list
;
837 hlist_for_each_entry_safe(orig_entry
, node
, safe
, head
, list
) {
839 batadv_tt_orig_list_entry_free_ref(orig_entry
);
841 spin_unlock_bh(&tt_global_entry
->list_lock
);
846 batadv_tt_global_del_orig_entry(struct bat_priv
*bat_priv
,
847 struct tt_global_entry
*tt_global_entry
,
848 struct orig_node
*orig_node
,
851 struct hlist_head
*head
;
852 struct hlist_node
*node
, *safe
;
853 struct tt_orig_list_entry
*orig_entry
;
855 spin_lock_bh(&tt_global_entry
->list_lock
);
856 head
= &tt_global_entry
->orig_list
;
857 hlist_for_each_entry_safe(orig_entry
, node
, safe
, head
, list
) {
858 if (orig_entry
->orig_node
== orig_node
) {
859 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
860 "Deleting %pM from global tt entry %pM: %s\n",
862 tt_global_entry
->common
.addr
, message
);
864 batadv_tt_orig_list_entry_free_ref(orig_entry
);
867 spin_unlock_bh(&tt_global_entry
->list_lock
);
870 static void batadv_tt_global_del_struct(struct bat_priv
*bat_priv
,
871 struct tt_global_entry
*tt_global_entry
,
874 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
875 "Deleting global tt entry %pM: %s\n",
876 tt_global_entry
->common
.addr
, message
);
878 batadv_hash_remove(bat_priv
->tt_global_hash
, batadv_compare_tt
,
879 batadv_choose_orig
, tt_global_entry
->common
.addr
);
880 batadv_tt_global_entry_free_ref(tt_global_entry
);
884 /* If the client is to be deleted, we check if it is the last origantor entry
885 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
886 * timer, otherwise we simply remove the originator scheduled for deletion.
889 batadv_tt_global_del_roaming(struct bat_priv
*bat_priv
,
890 struct tt_global_entry
*tt_global_entry
,
891 struct orig_node
*orig_node
, const char *message
)
893 bool last_entry
= true;
894 struct hlist_head
*head
;
895 struct hlist_node
*node
;
896 struct tt_orig_list_entry
*orig_entry
;
898 /* no local entry exists, case 1:
899 * Check if this is the last one or if other entries exist.
903 head
= &tt_global_entry
->orig_list
;
904 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
905 if (orig_entry
->orig_node
!= orig_node
) {
913 /* its the last one, mark for roaming. */
914 tt_global_entry
->common
.flags
|= BATADV_TT_CLIENT_ROAM
;
915 tt_global_entry
->roam_at
= jiffies
;
917 /* there is another entry, we can simply delete this
918 * one and can still use the other one.
920 batadv_tt_global_del_orig_entry(bat_priv
, tt_global_entry
,
926 static void batadv_tt_global_del(struct bat_priv
*bat_priv
,
927 struct orig_node
*orig_node
,
928 const unsigned char *addr
,
929 const char *message
, bool roaming
)
931 struct tt_global_entry
*tt_global_entry
= NULL
;
932 struct tt_local_entry
*local_entry
= NULL
;
934 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
935 if (!tt_global_entry
)
939 batadv_tt_global_del_orig_entry(bat_priv
, tt_global_entry
,
942 if (hlist_empty(&tt_global_entry
->orig_list
))
943 batadv_tt_global_del_struct(bat_priv
, tt_global_entry
,
949 /* if we are deleting a global entry due to a roam
950 * event, there are two possibilities:
951 * 1) the client roamed from node A to node B => if there
952 * is only one originator left for this client, we mark
953 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
954 * wait for node B to claim it. In case of timeout
955 * the entry is purged.
957 * If there are other originators left, we directly delete
959 * 2) the client roamed to us => we can directly delete
960 * the global entry, since it is useless now.
962 local_entry
= batadv_tt_local_hash_find(bat_priv
,
963 tt_global_entry
->common
.addr
);
965 /* local entry exists, case 2: client roamed to us. */
966 batadv_tt_global_del_orig_list(tt_global_entry
);
967 batadv_tt_global_del_struct(bat_priv
, tt_global_entry
, message
);
969 /* no local entry exists, case 1: check for roaming */
970 batadv_tt_global_del_roaming(bat_priv
, tt_global_entry
,
976 batadv_tt_global_entry_free_ref(tt_global_entry
);
978 batadv_tt_local_entry_free_ref(local_entry
);
981 void batadv_tt_global_del_orig(struct bat_priv
*bat_priv
,
982 struct orig_node
*orig_node
, const char *message
)
984 struct tt_global_entry
*global_entry
;
985 struct tt_common_entry
*tt_common_entry
;
987 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
988 struct hlist_node
*node
, *safe
;
989 struct hlist_head
*head
;
990 spinlock_t
*list_lock
; /* protects write access to the hash lists */
995 for (i
= 0; i
< hash
->size
; i
++) {
996 head
= &hash
->table
[i
];
997 list_lock
= &hash
->list_locks
[i
];
999 spin_lock_bh(list_lock
);
1000 hlist_for_each_entry_safe(tt_common_entry
, node
, safe
,
1002 global_entry
= container_of(tt_common_entry
,
1003 struct tt_global_entry
,
1006 batadv_tt_global_del_orig_entry(bat_priv
, global_entry
,
1007 orig_node
, message
);
1009 if (hlist_empty(&global_entry
->orig_list
)) {
1010 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1011 "Deleting global tt entry %pM: %s\n",
1012 global_entry
->common
.addr
, message
);
1013 hlist_del_rcu(node
);
1014 batadv_tt_global_entry_free_ref(global_entry
);
1017 spin_unlock_bh(list_lock
);
1019 orig_node
->tt_initialised
= false;
1022 static void batadv_tt_global_roam_purge_list(struct bat_priv
*bat_priv
,
1023 struct hlist_head
*head
)
1025 struct tt_common_entry
*tt_common_entry
;
1026 struct tt_global_entry
*tt_global_entry
;
1027 struct hlist_node
*node
, *node_tmp
;
1029 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
, head
,
1031 tt_global_entry
= container_of(tt_common_entry
,
1032 struct tt_global_entry
, common
);
1033 if (!(tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_ROAM
))
1035 if (!batadv_has_timed_out(tt_global_entry
->roam_at
,
1036 BATADV_TT_CLIENT_ROAM_TIMEOUT
))
1039 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1040 "Deleting global tt entry (%pM): Roaming timeout\n",
1041 tt_global_entry
->common
.addr
);
1043 hlist_del_rcu(node
);
1044 batadv_tt_global_entry_free_ref(tt_global_entry
);
1048 static void batadv_tt_global_roam_purge(struct bat_priv
*bat_priv
)
1050 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
1051 struct hlist_head
*head
;
1052 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1055 for (i
= 0; i
< hash
->size
; i
++) {
1056 head
= &hash
->table
[i
];
1057 list_lock
= &hash
->list_locks
[i
];
1059 spin_lock_bh(list_lock
);
1060 batadv_tt_global_roam_purge_list(bat_priv
, head
);
1061 spin_unlock_bh(list_lock
);
1066 static void batadv_tt_global_table_free(struct bat_priv
*bat_priv
)
1068 struct hashtable_t
*hash
;
1069 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1070 struct tt_common_entry
*tt_common_entry
;
1071 struct tt_global_entry
*tt_global_entry
;
1072 struct hlist_node
*node
, *node_tmp
;
1073 struct hlist_head
*head
;
1076 if (!bat_priv
->tt_global_hash
)
1079 hash
= bat_priv
->tt_global_hash
;
1081 for (i
= 0; i
< hash
->size
; i
++) {
1082 head
= &hash
->table
[i
];
1083 list_lock
= &hash
->list_locks
[i
];
1085 spin_lock_bh(list_lock
);
1086 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
1088 hlist_del_rcu(node
);
1089 tt_global_entry
= container_of(tt_common_entry
,
1090 struct tt_global_entry
,
1092 batadv_tt_global_entry_free_ref(tt_global_entry
);
1094 spin_unlock_bh(list_lock
);
1097 batadv_hash_destroy(hash
);
1099 bat_priv
->tt_global_hash
= NULL
;
1102 static bool _batadv_is_ap_isolated(struct tt_local_entry
*tt_local_entry
,
1103 struct tt_global_entry
*tt_global_entry
)
1107 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_WIFI
&&
1108 tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_WIFI
)
1114 struct orig_node
*batadv_transtable_search(struct bat_priv
*bat_priv
,
1116 const uint8_t *addr
)
1118 struct tt_local_entry
*tt_local_entry
= NULL
;
1119 struct tt_global_entry
*tt_global_entry
= NULL
;
1120 struct orig_node
*orig_node
= NULL
;
1121 struct neigh_node
*router
= NULL
;
1122 struct hlist_head
*head
;
1123 struct hlist_node
*node
;
1124 struct tt_orig_list_entry
*orig_entry
;
1127 if (src
&& atomic_read(&bat_priv
->ap_isolation
)) {
1128 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, src
);
1129 if (!tt_local_entry
)
1133 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
1134 if (!tt_global_entry
)
1137 /* check whether the clients should not communicate due to AP
1140 if (tt_local_entry
&&
1141 _batadv_is_ap_isolated(tt_local_entry
, tt_global_entry
))
1147 head
= &tt_global_entry
->orig_list
;
1148 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
1149 router
= batadv_orig_node_get_router(orig_entry
->orig_node
);
1153 if (router
->tq_avg
> best_tq
) {
1154 orig_node
= orig_entry
->orig_node
;
1155 best_tq
= router
->tq_avg
;
1157 batadv_neigh_node_free_ref(router
);
1159 /* found anything? */
1160 if (orig_node
&& !atomic_inc_not_zero(&orig_node
->refcount
))
1164 if (tt_global_entry
)
1165 batadv_tt_global_entry_free_ref(tt_global_entry
);
1167 batadv_tt_local_entry_free_ref(tt_local_entry
);
1172 /* Calculates the checksum of the local table of a given orig_node */
1173 static uint16_t batadv_tt_global_crc(struct bat_priv
*bat_priv
,
1174 struct orig_node
*orig_node
)
1176 uint16_t total
= 0, total_one
;
1177 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
1178 struct tt_common_entry
*tt_common
;
1179 struct tt_global_entry
*tt_global_entry
;
1180 struct hlist_node
*node
;
1181 struct hlist_head
*head
;
1185 for (i
= 0; i
< hash
->size
; i
++) {
1186 head
= &hash
->table
[i
];
1189 hlist_for_each_entry_rcu(tt_common
, node
, head
, hash_entry
) {
1190 tt_global_entry
= container_of(tt_common
,
1191 struct tt_global_entry
,
1193 /* Roaming clients are in the global table for
1194 * consistency only. They don't have to be
1195 * taken into account while computing the
1198 if (tt_common
->flags
& BATADV_TT_CLIENT_ROAM
)
1201 /* find out if this global entry is announced by this
1204 if (!batadv_tt_global_entry_has_orig(tt_global_entry
,
1209 for (j
= 0; j
< ETH_ALEN
; j
++)
1210 total_one
= crc16_byte(total_one
,
1211 tt_common
->addr
[j
]);
1220 /* Calculates the checksum of the local table */
1221 static uint16_t batadv_tt_local_crc(struct bat_priv
*bat_priv
)
1223 uint16_t total
= 0, total_one
;
1224 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1225 struct tt_common_entry
*tt_common
;
1226 struct hlist_node
*node
;
1227 struct hlist_head
*head
;
1231 for (i
= 0; i
< hash
->size
; i
++) {
1232 head
= &hash
->table
[i
];
1235 hlist_for_each_entry_rcu(tt_common
, node
, head
, hash_entry
) {
1236 /* not yet committed clients have not to be taken into
1237 * account while computing the CRC
1239 if (tt_common
->flags
& BATADV_TT_CLIENT_NEW
)
1242 for (j
= 0; j
< ETH_ALEN
; j
++)
1243 total_one
= crc16_byte(total_one
,
1244 tt_common
->addr
[j
]);
1253 static void batadv_tt_req_list_free(struct bat_priv
*bat_priv
)
1255 struct tt_req_node
*node
, *safe
;
1257 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1259 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1260 list_del(&node
->list
);
1264 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1267 static void batadv_tt_save_orig_buffer(struct bat_priv
*bat_priv
,
1268 struct orig_node
*orig_node
,
1269 const unsigned char *tt_buff
,
1270 uint8_t tt_num_changes
)
1272 uint16_t tt_buff_len
= batadv_tt_len(tt_num_changes
);
1274 /* Replace the old buffer only if I received something in the
1275 * last OGM (the OGM could carry no changes)
1277 spin_lock_bh(&orig_node
->tt_buff_lock
);
1278 if (tt_buff_len
> 0) {
1279 kfree(orig_node
->tt_buff
);
1280 orig_node
->tt_buff_len
= 0;
1281 orig_node
->tt_buff
= kmalloc(tt_buff_len
, GFP_ATOMIC
);
1282 if (orig_node
->tt_buff
) {
1283 memcpy(orig_node
->tt_buff
, tt_buff
, tt_buff_len
);
1284 orig_node
->tt_buff_len
= tt_buff_len
;
1287 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1290 static void batadv_tt_req_purge(struct bat_priv
*bat_priv
)
1292 struct tt_req_node
*node
, *safe
;
1294 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1295 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1296 if (batadv_has_timed_out(node
->issued_at
,
1297 BATADV_TT_REQUEST_TIMEOUT
)) {
1298 list_del(&node
->list
);
1302 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1305 /* returns the pointer to the new tt_req_node struct if no request
1306 * has already been issued for this orig_node, NULL otherwise
1308 static struct tt_req_node
*batadv_new_tt_req_node(struct bat_priv
*bat_priv
,
1309 struct orig_node
*orig_node
)
1311 struct tt_req_node
*tt_req_node_tmp
, *tt_req_node
= NULL
;
1313 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1314 list_for_each_entry(tt_req_node_tmp
, &bat_priv
->tt_req_list
, list
) {
1315 if (batadv_compare_eth(tt_req_node_tmp
, orig_node
) &&
1316 !batadv_has_timed_out(tt_req_node_tmp
->issued_at
,
1317 BATADV_TT_REQUEST_TIMEOUT
))
1321 tt_req_node
= kmalloc(sizeof(*tt_req_node
), GFP_ATOMIC
);
1325 memcpy(tt_req_node
->addr
, orig_node
->orig
, ETH_ALEN
);
1326 tt_req_node
->issued_at
= jiffies
;
1328 list_add(&tt_req_node
->list
, &bat_priv
->tt_req_list
);
1330 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1334 /* data_ptr is useless here, but has to be kept to respect the prototype */
1335 static int batadv_tt_local_valid_entry(const void *entry_ptr
,
1336 const void *data_ptr
)
1338 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1340 if (tt_common_entry
->flags
& BATADV_TT_CLIENT_NEW
)
1345 static int batadv_tt_global_valid(const void *entry_ptr
,
1346 const void *data_ptr
)
1348 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1349 const struct tt_global_entry
*tt_global_entry
;
1350 const struct orig_node
*orig_node
= data_ptr
;
1352 if (tt_common_entry
->flags
& BATADV_TT_CLIENT_ROAM
)
1355 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
1358 return batadv_tt_global_entry_has_orig(tt_global_entry
, orig_node
);
1361 static struct sk_buff
*
1362 batadv_tt_response_fill_table(uint16_t tt_len
, uint8_t ttvn
,
1363 struct hashtable_t
*hash
,
1364 struct hard_iface
*primary_if
,
1365 int (*valid_cb
)(const void *, const void *),
1368 struct tt_common_entry
*tt_common_entry
;
1369 struct tt_query_packet
*tt_response
;
1370 struct tt_change
*tt_change
;
1371 struct hlist_node
*node
;
1372 struct hlist_head
*head
;
1373 struct sk_buff
*skb
= NULL
;
1374 uint16_t tt_tot
, tt_count
;
1375 ssize_t tt_query_size
= sizeof(struct tt_query_packet
);
1378 if (tt_query_size
+ tt_len
> primary_if
->soft_iface
->mtu
) {
1379 tt_len
= primary_if
->soft_iface
->mtu
- tt_query_size
;
1380 tt_len
-= tt_len
% sizeof(struct tt_change
);
1382 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1384 skb
= dev_alloc_skb(tt_query_size
+ tt_len
+ ETH_HLEN
);
1388 skb_reserve(skb
, ETH_HLEN
);
1389 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1390 tt_query_size
+ tt_len
);
1391 tt_response
->ttvn
= ttvn
;
1393 tt_change
= (struct tt_change
*)(skb
->data
+ tt_query_size
);
1397 for (i
= 0; i
< hash
->size
; i
++) {
1398 head
= &hash
->table
[i
];
1400 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1402 if (tt_count
== tt_tot
)
1405 if ((valid_cb
) && (!valid_cb(tt_common_entry
, cb_data
)))
1408 memcpy(tt_change
->addr
, tt_common_entry
->addr
,
1410 tt_change
->flags
= BATADV_NO_FLAGS
;
1418 /* store in the message the number of entries we have successfully
1421 tt_response
->tt_data
= htons(tt_count
);
1427 static int batadv_send_tt_request(struct bat_priv
*bat_priv
,
1428 struct orig_node
*dst_orig_node
,
1429 uint8_t ttvn
, uint16_t tt_crc
,
1432 struct sk_buff
*skb
= NULL
;
1433 struct tt_query_packet
*tt_request
;
1434 struct neigh_node
*neigh_node
= NULL
;
1435 struct hard_iface
*primary_if
;
1436 struct tt_req_node
*tt_req_node
= NULL
;
1439 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1443 /* The new tt_req will be issued only if I'm not waiting for a
1444 * reply from the same orig_node yet
1446 tt_req_node
= batadv_new_tt_req_node(bat_priv
, dst_orig_node
);
1450 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) + ETH_HLEN
);
1454 skb_reserve(skb
, ETH_HLEN
);
1456 tt_request
= (struct tt_query_packet
*)skb_put(skb
,
1457 sizeof(struct tt_query_packet
));
1459 tt_request
->header
.packet_type
= BATADV_TT_QUERY
;
1460 tt_request
->header
.version
= BATADV_COMPAT_VERSION
;
1461 memcpy(tt_request
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1462 memcpy(tt_request
->dst
, dst_orig_node
->orig
, ETH_ALEN
);
1463 tt_request
->header
.ttl
= BATADV_TTL
;
1464 tt_request
->ttvn
= ttvn
;
1465 tt_request
->tt_data
= htons(tt_crc
);
1466 tt_request
->flags
= BATADV_TT_REQUEST
;
1469 tt_request
->flags
|= BATADV_TT_FULL_TABLE
;
1471 neigh_node
= batadv_orig_node_get_router(dst_orig_node
);
1475 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1476 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1477 dst_orig_node
->orig
, neigh_node
->addr
,
1478 (full_table
? 'F' : '.'));
1480 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_REQUEST_TX
);
1482 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1487 batadv_neigh_node_free_ref(neigh_node
);
1489 batadv_hardif_free_ref(primary_if
);
1492 if (ret
&& tt_req_node
) {
1493 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1494 list_del(&tt_req_node
->list
);
1495 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1501 static bool batadv_send_other_tt_response(struct bat_priv
*bat_priv
,
1502 struct tt_query_packet
*tt_request
)
1504 struct orig_node
*req_dst_orig_node
= NULL
, *res_dst_orig_node
= NULL
;
1505 struct neigh_node
*neigh_node
= NULL
;
1506 struct hard_iface
*primary_if
= NULL
;
1507 uint8_t orig_ttvn
, req_ttvn
, ttvn
;
1509 unsigned char *tt_buff
;
1511 uint16_t tt_len
, tt_tot
;
1512 struct sk_buff
*skb
= NULL
;
1513 struct tt_query_packet
*tt_response
;
1515 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1516 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1517 tt_request
->src
, tt_request
->ttvn
, tt_request
->dst
,
1518 (tt_request
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1520 /* Let's get the orig node of the REAL destination */
1521 req_dst_orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->dst
);
1522 if (!req_dst_orig_node
)
1525 res_dst_orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->src
);
1526 if (!res_dst_orig_node
)
1529 neigh_node
= batadv_orig_node_get_router(res_dst_orig_node
);
1533 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1537 orig_ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1538 req_ttvn
= tt_request
->ttvn
;
1540 /* I don't have the requested data */
1541 if (orig_ttvn
!= req_ttvn
||
1542 tt_request
->tt_data
!= htons(req_dst_orig_node
->tt_crc
))
1545 /* If the full table has been explicitly requested */
1546 if (tt_request
->flags
& BATADV_TT_FULL_TABLE
||
1547 !req_dst_orig_node
->tt_buff
)
1552 /* In this version, fragmentation is not implemented, then
1553 * I'll send only one packet with as much TT entries as I can
1556 spin_lock_bh(&req_dst_orig_node
->tt_buff_lock
);
1557 tt_len
= req_dst_orig_node
->tt_buff_len
;
1558 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1560 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1565 skb_reserve(skb
, ETH_HLEN
);
1566 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1567 sizeof(struct tt_query_packet
) + tt_len
);
1568 tt_response
->ttvn
= req_ttvn
;
1569 tt_response
->tt_data
= htons(tt_tot
);
1571 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1572 /* Copy the last orig_node's OGM buffer */
1573 memcpy(tt_buff
, req_dst_orig_node
->tt_buff
,
1574 req_dst_orig_node
->tt_buff_len
);
1576 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1578 tt_len
= (uint16_t)atomic_read(&req_dst_orig_node
->tt_size
) *
1579 sizeof(struct tt_change
);
1580 ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1582 skb
= batadv_tt_response_fill_table(tt_len
, ttvn
,
1583 bat_priv
->tt_global_hash
,
1585 batadv_tt_global_valid
,
1590 tt_response
= (struct tt_query_packet
*)skb
->data
;
1593 tt_response
->header
.packet_type
= BATADV_TT_QUERY
;
1594 tt_response
->header
.version
= BATADV_COMPAT_VERSION
;
1595 tt_response
->header
.ttl
= BATADV_TTL
;
1596 memcpy(tt_response
->src
, req_dst_orig_node
->orig
, ETH_ALEN
);
1597 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1598 tt_response
->flags
= BATADV_TT_RESPONSE
;
1601 tt_response
->flags
|= BATADV_TT_FULL_TABLE
;
1603 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1604 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1605 res_dst_orig_node
->orig
, neigh_node
->addr
,
1606 req_dst_orig_node
->orig
, req_ttvn
);
1608 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_RESPONSE_TX
);
1610 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1615 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1618 if (res_dst_orig_node
)
1619 batadv_orig_node_free_ref(res_dst_orig_node
);
1620 if (req_dst_orig_node
)
1621 batadv_orig_node_free_ref(req_dst_orig_node
);
1623 batadv_neigh_node_free_ref(neigh_node
);
1625 batadv_hardif_free_ref(primary_if
);
1631 static bool batadv_send_my_tt_response(struct bat_priv
*bat_priv
,
1632 struct tt_query_packet
*tt_request
)
1634 struct orig_node
*orig_node
= NULL
;
1635 struct neigh_node
*neigh_node
= NULL
;
1636 struct hard_iface
*primary_if
= NULL
;
1637 uint8_t my_ttvn
, req_ttvn
, ttvn
;
1639 unsigned char *tt_buff
;
1641 uint16_t tt_len
, tt_tot
;
1642 struct sk_buff
*skb
= NULL
;
1643 struct tt_query_packet
*tt_response
;
1645 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1646 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1647 tt_request
->src
, tt_request
->ttvn
,
1648 (tt_request
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1651 my_ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1652 req_ttvn
= tt_request
->ttvn
;
1654 orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->src
);
1658 neigh_node
= batadv_orig_node_get_router(orig_node
);
1662 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1666 /* If the full table has been explicitly requested or the gap
1667 * is too big send the whole local translation table
1669 if (tt_request
->flags
& BATADV_TT_FULL_TABLE
|| my_ttvn
!= req_ttvn
||
1675 /* In this version, fragmentation is not implemented, then
1676 * I'll send only one packet with as much TT entries as I can
1679 spin_lock_bh(&bat_priv
->tt_buff_lock
);
1680 tt_len
= bat_priv
->tt_buff_len
;
1681 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1683 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1688 skb_reserve(skb
, ETH_HLEN
);
1689 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1690 sizeof(struct tt_query_packet
) + tt_len
);
1691 tt_response
->ttvn
= req_ttvn
;
1692 tt_response
->tt_data
= htons(tt_tot
);
1694 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1695 memcpy(tt_buff
, bat_priv
->tt_buff
,
1696 bat_priv
->tt_buff_len
);
1697 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1699 tt_len
= (uint16_t)atomic_read(&bat_priv
->num_local_tt
) *
1700 sizeof(struct tt_change
);
1701 ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1703 skb
= batadv_tt_response_fill_table(tt_len
, ttvn
,
1704 bat_priv
->tt_local_hash
,
1706 batadv_tt_local_valid_entry
,
1711 tt_response
= (struct tt_query_packet
*)skb
->data
;
1714 tt_response
->header
.packet_type
= BATADV_TT_QUERY
;
1715 tt_response
->header
.version
= BATADV_COMPAT_VERSION
;
1716 tt_response
->header
.ttl
= BATADV_TTL
;
1717 memcpy(tt_response
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1718 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1719 tt_response
->flags
= BATADV_TT_RESPONSE
;
1722 tt_response
->flags
|= BATADV_TT_FULL_TABLE
;
1724 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1725 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1726 orig_node
->orig
, neigh_node
->addr
,
1727 (tt_response
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1729 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_RESPONSE_TX
);
1731 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1736 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1739 batadv_orig_node_free_ref(orig_node
);
1741 batadv_neigh_node_free_ref(neigh_node
);
1743 batadv_hardif_free_ref(primary_if
);
1746 /* This packet was for me, so it doesn't need to be re-routed */
1750 bool batadv_send_tt_response(struct bat_priv
*bat_priv
,
1751 struct tt_query_packet
*tt_request
)
1753 if (batadv_is_my_mac(tt_request
->dst
)) {
1754 /* don't answer backbone gws! */
1755 if (batadv_bla_is_backbone_gw_orig(bat_priv
, tt_request
->src
))
1758 return batadv_send_my_tt_response(bat_priv
, tt_request
);
1760 return batadv_send_other_tt_response(bat_priv
, tt_request
);
1764 static void _batadv_tt_update_changes(struct bat_priv
*bat_priv
,
1765 struct orig_node
*orig_node
,
1766 struct tt_change
*tt_change
,
1767 uint16_t tt_num_changes
, uint8_t ttvn
)
1772 for (i
= 0; i
< tt_num_changes
; i
++) {
1773 if ((tt_change
+ i
)->flags
& BATADV_TT_CLIENT_DEL
) {
1774 roams
= (tt_change
+ i
)->flags
& BATADV_TT_CLIENT_ROAM
;
1775 batadv_tt_global_del(bat_priv
, orig_node
,
1776 (tt_change
+ i
)->addr
,
1777 "tt removed by changes",
1780 if (!batadv_tt_global_add(bat_priv
, orig_node
,
1781 (tt_change
+ i
)->addr
,
1782 (tt_change
+ i
)->flags
, ttvn
))
1783 /* In case of problem while storing a
1784 * global_entry, we stop the updating
1785 * procedure without committing the
1786 * ttvn change. This will avoid to send
1787 * corrupted data on tt_request
1792 orig_node
->tt_initialised
= true;
1795 static void batadv_tt_fill_gtable(struct bat_priv
*bat_priv
,
1796 struct tt_query_packet
*tt_response
)
1798 struct orig_node
*orig_node
= NULL
;
1800 orig_node
= batadv_orig_hash_find(bat_priv
, tt_response
->src
);
1804 /* Purge the old table first.. */
1805 batadv_tt_global_del_orig(bat_priv
, orig_node
, "Received full table");
1807 _batadv_tt_update_changes(bat_priv
, orig_node
,
1808 (struct tt_change
*)(tt_response
+ 1),
1809 ntohs(tt_response
->tt_data
),
1812 spin_lock_bh(&orig_node
->tt_buff_lock
);
1813 kfree(orig_node
->tt_buff
);
1814 orig_node
->tt_buff_len
= 0;
1815 orig_node
->tt_buff
= NULL
;
1816 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1818 atomic_set(&orig_node
->last_ttvn
, tt_response
->ttvn
);
1822 batadv_orig_node_free_ref(orig_node
);
1825 static void batadv_tt_update_changes(struct bat_priv
*bat_priv
,
1826 struct orig_node
*orig_node
,
1827 uint16_t tt_num_changes
, uint8_t ttvn
,
1828 struct tt_change
*tt_change
)
1830 _batadv_tt_update_changes(bat_priv
, orig_node
, tt_change
,
1831 tt_num_changes
, ttvn
);
1833 batadv_tt_save_orig_buffer(bat_priv
, orig_node
,
1834 (unsigned char *)tt_change
, tt_num_changes
);
1835 atomic_set(&orig_node
->last_ttvn
, ttvn
);
1838 bool batadv_is_my_client(struct bat_priv
*bat_priv
, const uint8_t *addr
)
1840 struct tt_local_entry
*tt_local_entry
= NULL
;
1843 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
1844 if (!tt_local_entry
)
1846 /* Check if the client has been logically deleted (but is kept for
1847 * consistency purpose)
1849 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_PENDING
)
1854 batadv_tt_local_entry_free_ref(tt_local_entry
);
1858 void batadv_handle_tt_response(struct bat_priv
*bat_priv
,
1859 struct tt_query_packet
*tt_response
)
1861 struct tt_req_node
*node
, *safe
;
1862 struct orig_node
*orig_node
= NULL
;
1864 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1865 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1866 tt_response
->src
, tt_response
->ttvn
,
1867 ntohs(tt_response
->tt_data
),
1868 (tt_response
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1870 /* we should have never asked a backbone gw */
1871 if (batadv_bla_is_backbone_gw_orig(bat_priv
, tt_response
->src
))
1874 orig_node
= batadv_orig_hash_find(bat_priv
, tt_response
->src
);
1878 if (tt_response
->flags
& BATADV_TT_FULL_TABLE
)
1879 batadv_tt_fill_gtable(bat_priv
, tt_response
);
1881 batadv_tt_update_changes(bat_priv
, orig_node
,
1882 ntohs(tt_response
->tt_data
),
1884 (struct tt_change
*)(tt_response
+ 1));
1886 /* Delete the tt_req_node from pending tt_requests list */
1887 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1888 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1889 if (!batadv_compare_eth(node
->addr
, tt_response
->src
))
1891 list_del(&node
->list
);
1894 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1896 /* Recalculate the CRC for this orig_node and store it */
1897 orig_node
->tt_crc
= batadv_tt_global_crc(bat_priv
, orig_node
);
1898 /* Roaming phase is over: tables are in sync again. I can
1901 orig_node
->tt_poss_change
= false;
1904 batadv_orig_node_free_ref(orig_node
);
1907 int batadv_tt_init(struct bat_priv
*bat_priv
)
1911 ret
= batadv_tt_local_init(bat_priv
);
1915 ret
= batadv_tt_global_init(bat_priv
);
1919 batadv_tt_start_timer(bat_priv
);
1924 static void batadv_tt_roam_list_free(struct bat_priv
*bat_priv
)
1926 struct tt_roam_node
*node
, *safe
;
1928 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1930 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1931 list_del(&node
->list
);
1935 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1938 static void batadv_tt_roam_purge(struct bat_priv
*bat_priv
)
1940 struct tt_roam_node
*node
, *safe
;
1942 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1943 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1944 if (!batadv_has_timed_out(node
->first_time
,
1945 BATADV_ROAMING_MAX_TIME
))
1948 list_del(&node
->list
);
1951 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1954 /* This function checks whether the client already reached the
1955 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1958 * returns true if the ROAMING_ADV can be sent, false otherwise
1960 static bool batadv_tt_check_roam_count(struct bat_priv
*bat_priv
,
1963 struct tt_roam_node
*tt_roam_node
;
1966 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1967 /* The new tt_req will be issued only if I'm not waiting for a
1968 * reply from the same orig_node yet
1970 list_for_each_entry(tt_roam_node
, &bat_priv
->tt_roam_list
, list
) {
1971 if (!batadv_compare_eth(tt_roam_node
->addr
, client
))
1974 if (batadv_has_timed_out(tt_roam_node
->first_time
,
1975 BATADV_ROAMING_MAX_TIME
))
1978 if (!batadv_atomic_dec_not_zero(&tt_roam_node
->counter
))
1979 /* Sorry, you roamed too many times! */
1986 tt_roam_node
= kmalloc(sizeof(*tt_roam_node
), GFP_ATOMIC
);
1990 tt_roam_node
->first_time
= jiffies
;
1991 atomic_set(&tt_roam_node
->counter
,
1992 BATADV_ROAMING_MAX_COUNT
- 1);
1993 memcpy(tt_roam_node
->addr
, client
, ETH_ALEN
);
1995 list_add(&tt_roam_node
->list
, &bat_priv
->tt_roam_list
);
2000 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
2004 static void batadv_send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
2005 struct orig_node
*orig_node
)
2007 struct neigh_node
*neigh_node
= NULL
;
2008 struct sk_buff
*skb
= NULL
;
2009 struct roam_adv_packet
*roam_adv_packet
;
2011 struct hard_iface
*primary_if
;
2013 /* before going on we have to check whether the client has
2014 * already roamed to us too many times
2016 if (!batadv_tt_check_roam_count(bat_priv
, client
))
2019 skb
= dev_alloc_skb(sizeof(struct roam_adv_packet
) + ETH_HLEN
);
2023 skb_reserve(skb
, ETH_HLEN
);
2025 roam_adv_packet
= (struct roam_adv_packet
*)skb_put(skb
,
2026 sizeof(struct roam_adv_packet
));
2028 roam_adv_packet
->header
.packet_type
= BATADV_ROAM_ADV
;
2029 roam_adv_packet
->header
.version
= BATADV_COMPAT_VERSION
;
2030 roam_adv_packet
->header
.ttl
= BATADV_TTL
;
2031 primary_if
= batadv_primary_if_get_selected(bat_priv
);
2034 memcpy(roam_adv_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
2035 batadv_hardif_free_ref(primary_if
);
2036 memcpy(roam_adv_packet
->dst
, orig_node
->orig
, ETH_ALEN
);
2037 memcpy(roam_adv_packet
->client
, client
, ETH_ALEN
);
2039 neigh_node
= batadv_orig_node_get_router(orig_node
);
2043 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2044 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2045 orig_node
->orig
, client
, neigh_node
->addr
);
2047 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_ROAM_ADV_TX
);
2049 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
2054 batadv_neigh_node_free_ref(neigh_node
);
2060 static void batadv_tt_purge(struct work_struct
*work
)
2062 struct delayed_work
*delayed_work
=
2063 container_of(work
, struct delayed_work
, work
);
2064 struct bat_priv
*bat_priv
=
2065 container_of(delayed_work
, struct bat_priv
, tt_work
);
2067 batadv_tt_local_purge(bat_priv
);
2068 batadv_tt_global_roam_purge(bat_priv
);
2069 batadv_tt_req_purge(bat_priv
);
2070 batadv_tt_roam_purge(bat_priv
);
2072 batadv_tt_start_timer(bat_priv
);
2075 void batadv_tt_free(struct bat_priv
*bat_priv
)
2077 cancel_delayed_work_sync(&bat_priv
->tt_work
);
2079 batadv_tt_local_table_free(bat_priv
);
2080 batadv_tt_global_table_free(bat_priv
);
2081 batadv_tt_req_list_free(bat_priv
);
2082 batadv_tt_changes_list_free(bat_priv
);
2083 batadv_tt_roam_list_free(bat_priv
);
2085 kfree(bat_priv
->tt_buff
);
2088 /* This function will enable or disable the specified flags for all the entries
2089 * in the given hash table and returns the number of modified entries
2091 static uint16_t batadv_tt_set_flags(struct hashtable_t
*hash
, uint16_t flags
,
2095 uint16_t changed_num
= 0;
2096 struct hlist_head
*head
;
2097 struct hlist_node
*node
;
2098 struct tt_common_entry
*tt_common_entry
;
2103 for (i
= 0; i
< hash
->size
; i
++) {
2104 head
= &hash
->table
[i
];
2107 hlist_for_each_entry_rcu(tt_common_entry
, node
,
2110 if ((tt_common_entry
->flags
& flags
) == flags
)
2112 tt_common_entry
->flags
|= flags
;
2114 if (!(tt_common_entry
->flags
& flags
))
2116 tt_common_entry
->flags
&= ~flags
;
2126 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2127 static void batadv_tt_local_purge_pending_clients(struct bat_priv
*bat_priv
)
2129 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
2130 struct tt_common_entry
*tt_common
;
2131 struct tt_local_entry
*tt_local_entry
;
2132 struct hlist_node
*node
, *node_tmp
;
2133 struct hlist_head
*head
;
2134 spinlock_t
*list_lock
; /* protects write access to the hash lists */
2140 for (i
= 0; i
< hash
->size
; i
++) {
2141 head
= &hash
->table
[i
];
2142 list_lock
= &hash
->list_locks
[i
];
2144 spin_lock_bh(list_lock
);
2145 hlist_for_each_entry_safe(tt_common
, node
, node_tmp
, head
,
2147 if (!(tt_common
->flags
& BATADV_TT_CLIENT_PENDING
))
2150 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2151 "Deleting local tt entry (%pM): pending\n",
2154 atomic_dec(&bat_priv
->num_local_tt
);
2155 hlist_del_rcu(node
);
2156 tt_local_entry
= container_of(tt_common
,
2157 struct tt_local_entry
,
2159 batadv_tt_local_entry_free_ref(tt_local_entry
);
2161 spin_unlock_bh(list_lock
);
2166 static int batadv_tt_commit_changes(struct bat_priv
*bat_priv
,
2167 unsigned char **packet_buff
,
2168 int *packet_buff_len
, int packet_min_len
)
2170 uint16_t changed_num
= 0;
2172 if (atomic_read(&bat_priv
->tt_local_changes
) < 1)
2175 changed_num
= batadv_tt_set_flags(bat_priv
->tt_local_hash
,
2176 BATADV_TT_CLIENT_NEW
, false);
2178 /* all reset entries have to be counted as local entries */
2179 atomic_add(changed_num
, &bat_priv
->num_local_tt
);
2180 batadv_tt_local_purge_pending_clients(bat_priv
);
2181 bat_priv
->tt_crc
= batadv_tt_local_crc(bat_priv
);
2183 /* Increment the TTVN only once per OGM interval */
2184 atomic_inc(&bat_priv
->ttvn
);
2185 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2186 "Local changes committed, updating to ttvn %u\n",
2187 (uint8_t)atomic_read(&bat_priv
->ttvn
));
2188 bat_priv
->tt_poss_change
= false;
2190 /* reset the sending counter */
2191 atomic_set(&bat_priv
->tt_ogm_append_cnt
, BATADV_TT_OGM_APPEND_MAX
);
2193 return batadv_tt_changes_fill_buff(bat_priv
, packet_buff
,
2194 packet_buff_len
, packet_min_len
);
2197 /* when calling this function (hard_iface == primary_if) has to be true */
2198 int batadv_tt_append_diff(struct bat_priv
*bat_priv
,
2199 unsigned char **packet_buff
, int *packet_buff_len
,
2204 /* if at least one change happened */
2205 tt_num_changes
= batadv_tt_commit_changes(bat_priv
, packet_buff
,
2209 /* if the changes have been sent often enough */
2210 if ((tt_num_changes
< 0) &&
2211 (!batadv_atomic_dec_not_zero(&bat_priv
->tt_ogm_append_cnt
))) {
2212 batadv_tt_realloc_packet_buff(packet_buff
, packet_buff_len
,
2213 packet_min_len
, packet_min_len
);
2217 return tt_num_changes
;
2220 bool batadv_is_ap_isolated(struct bat_priv
*bat_priv
, uint8_t *src
,
2223 struct tt_local_entry
*tt_local_entry
= NULL
;
2224 struct tt_global_entry
*tt_global_entry
= NULL
;
2227 if (!atomic_read(&bat_priv
->ap_isolation
))
2230 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, dst
);
2231 if (!tt_local_entry
)
2234 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, src
);
2235 if (!tt_global_entry
)
2238 if (!_batadv_is_ap_isolated(tt_local_entry
, tt_global_entry
))
2244 if (tt_global_entry
)
2245 batadv_tt_global_entry_free_ref(tt_global_entry
);
2247 batadv_tt_local_entry_free_ref(tt_local_entry
);
2251 void batadv_tt_update_orig(struct bat_priv
*bat_priv
,
2252 struct orig_node
*orig_node
,
2253 const unsigned char *tt_buff
, uint8_t tt_num_changes
,
2254 uint8_t ttvn
, uint16_t tt_crc
)
2256 uint8_t orig_ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
2257 bool full_table
= true;
2259 /* don't care about a backbone gateways updates. */
2260 if (batadv_bla_is_backbone_gw_orig(bat_priv
, orig_node
->orig
))
2263 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2264 * increased by one -> we can apply the attached changes
2266 if ((!orig_node
->tt_initialised
&& ttvn
== 1) ||
2267 ttvn
- orig_ttvn
== 1) {
2268 /* the OGM could not contain the changes due to their size or
2269 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2271 * In this case send a tt request
2273 if (!tt_num_changes
) {
2278 batadv_tt_update_changes(bat_priv
, orig_node
, tt_num_changes
,
2279 ttvn
, (struct tt_change
*)tt_buff
);
2281 /* Even if we received the precomputed crc with the OGM, we
2282 * prefer to recompute it to spot any possible inconsistency
2283 * in the global table
2285 orig_node
->tt_crc
= batadv_tt_global_crc(bat_priv
, orig_node
);
2287 /* The ttvn alone is not enough to guarantee consistency
2288 * because a single value could represent different states
2289 * (due to the wrap around). Thus a node has to check whether
2290 * the resulting table (after applying the changes) is still
2291 * consistent or not. E.g. a node could disconnect while its
2292 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2293 * checking the CRC value is mandatory to detect the
2296 if (orig_node
->tt_crc
!= tt_crc
)
2299 /* Roaming phase is over: tables are in sync again. I can
2302 orig_node
->tt_poss_change
= false;
2304 /* if we missed more than one change or our tables are not
2305 * in sync anymore -> request fresh tt data
2307 if (!orig_node
->tt_initialised
|| ttvn
!= orig_ttvn
||
2308 orig_node
->tt_crc
!= tt_crc
) {
2310 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2311 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2312 orig_node
->orig
, ttvn
, orig_ttvn
, tt_crc
,
2313 orig_node
->tt_crc
, tt_num_changes
);
2314 batadv_send_tt_request(bat_priv
, orig_node
, ttvn
,
2315 tt_crc
, full_table
);
2321 /* returns true whether we know that the client has moved from its old
2322 * originator to another one. This entry is kept is still kept for consistency
2325 bool batadv_tt_global_client_is_roaming(struct bat_priv
*bat_priv
,
2328 struct tt_global_entry
*tt_global_entry
;
2331 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
2332 if (!tt_global_entry
)
2335 ret
= tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_ROAM
;
2336 batadv_tt_global_entry_free_ref(tt_global_entry
);