]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/batman-adv/bridge_loop_avoidance.c
ipmi: remove trydefaults parameter and default init
[mirror_ubuntu-artful-kernel.git] / net / batman-adv / bridge_loop_avoidance.c
1 /* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
2 *
3 * Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "bridge_loop_avoidance.h"
19 #include "main.h"
20
21 #include <linux/atomic.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/compiler.h>
24 #include <linux/crc16.h>
25 #include <linux/errno.h>
26 #include <linux/etherdevice.h>
27 #include <linux/fs.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/jhash.h>
32 #include <linux/jiffies.h>
33 #include <linux/kernel.h>
34 #include <linux/kref.h>
35 #include <linux/list.h>
36 #include <linux/lockdep.h>
37 #include <linux/netdevice.h>
38 #include <linux/rculist.h>
39 #include <linux/rcupdate.h>
40 #include <linux/seq_file.h>
41 #include <linux/skbuff.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <net/arp.h>
48
49 #include "hard-interface.h"
50 #include "hash.h"
51 #include "originator.h"
52 #include "packet.h"
53 #include "sysfs.h"
54 #include "translation-table.h"
55
56 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
57
58 static void batadv_bla_periodic_work(struct work_struct *work);
59 static void
60 batadv_bla_send_announce(struct batadv_priv *bat_priv,
61 struct batadv_bla_backbone_gw *backbone_gw);
62
63 /**
64 * batadv_choose_claim - choose the right bucket for a claim.
65 * @data: data to hash
66 * @size: size of the hash table
67 *
68 * Return: the hash index of the claim
69 */
70 static inline u32 batadv_choose_claim(const void *data, u32 size)
71 {
72 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
73 u32 hash = 0;
74
75 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
76 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
77
78 return hash % size;
79 }
80
81 /**
82 * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
83 * @data: data to hash
84 * @size: size of the hash table
85 *
86 * Return: the hash index of the backbone gateway
87 */
88 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
89 {
90 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
91 u32 hash = 0;
92
93 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
94 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
95
96 return hash % size;
97 }
98
99 /**
100 * batadv_compare_backbone_gw - compare address and vid of two backbone gws
101 * @node: list node of the first entry to compare
102 * @data2: pointer to the second backbone gateway
103 *
104 * Return: true if the backbones have the same data, false otherwise
105 */
106 static bool batadv_compare_backbone_gw(const struct hlist_node *node,
107 const void *data2)
108 {
109 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
110 hash_entry);
111 const struct batadv_bla_backbone_gw *gw1 = data1;
112 const struct batadv_bla_backbone_gw *gw2 = data2;
113
114 if (!batadv_compare_eth(gw1->orig, gw2->orig))
115 return false;
116
117 if (gw1->vid != gw2->vid)
118 return false;
119
120 return true;
121 }
122
123 /**
124 * batadv_compare_claim - compare address and vid of two claims
125 * @node: list node of the first entry to compare
126 * @data2: pointer to the second claims
127 *
128 * Return: true if the claim have the same data, 0 otherwise
129 */
130 static bool batadv_compare_claim(const struct hlist_node *node,
131 const void *data2)
132 {
133 const void *data1 = container_of(node, struct batadv_bla_claim,
134 hash_entry);
135 const struct batadv_bla_claim *cl1 = data1;
136 const struct batadv_bla_claim *cl2 = data2;
137
138 if (!batadv_compare_eth(cl1->addr, cl2->addr))
139 return false;
140
141 if (cl1->vid != cl2->vid)
142 return false;
143
144 return true;
145 }
146
147 /**
148 * batadv_backbone_gw_release - release backbone gw from lists and queue for
149 * free after rcu grace period
150 * @ref: kref pointer of the backbone gw
151 */
152 static void batadv_backbone_gw_release(struct kref *ref)
153 {
154 struct batadv_bla_backbone_gw *backbone_gw;
155
156 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
157 refcount);
158
159 kfree_rcu(backbone_gw, rcu);
160 }
161
162 /**
163 * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
164 * release it
165 * @backbone_gw: backbone gateway to be free'd
166 */
167 static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
168 {
169 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
170 }
171
172 /**
173 * batadv_claim_release - release claim from lists and queue for free after rcu
174 * grace period
175 * @ref: kref pointer of the claim
176 */
177 static void batadv_claim_release(struct kref *ref)
178 {
179 struct batadv_bla_claim *claim;
180 struct batadv_bla_backbone_gw *old_backbone_gw;
181
182 claim = container_of(ref, struct batadv_bla_claim, refcount);
183
184 spin_lock_bh(&claim->backbone_lock);
185 old_backbone_gw = claim->backbone_gw;
186 claim->backbone_gw = NULL;
187 spin_unlock_bh(&claim->backbone_lock);
188
189 spin_lock_bh(&old_backbone_gw->crc_lock);
190 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
191 spin_unlock_bh(&old_backbone_gw->crc_lock);
192
193 batadv_backbone_gw_put(old_backbone_gw);
194
195 kfree_rcu(claim, rcu);
196 }
197
198 /**
199 * batadv_claim_put - decrement the claim refcounter and possibly
200 * release it
201 * @claim: claim to be free'd
202 */
203 static void batadv_claim_put(struct batadv_bla_claim *claim)
204 {
205 kref_put(&claim->refcount, batadv_claim_release);
206 }
207
208 /**
209 * batadv_claim_hash_find - looks for a claim in the claim hash
210 * @bat_priv: the bat priv with all the soft interface information
211 * @data: search data (may be local/static data)
212 *
213 * Return: claim if found or NULL otherwise.
214 */
215 static struct batadv_bla_claim *
216 batadv_claim_hash_find(struct batadv_priv *bat_priv,
217 struct batadv_bla_claim *data)
218 {
219 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
220 struct hlist_head *head;
221 struct batadv_bla_claim *claim;
222 struct batadv_bla_claim *claim_tmp = NULL;
223 int index;
224
225 if (!hash)
226 return NULL;
227
228 index = batadv_choose_claim(data, hash->size);
229 head = &hash->table[index];
230
231 rcu_read_lock();
232 hlist_for_each_entry_rcu(claim, head, hash_entry) {
233 if (!batadv_compare_claim(&claim->hash_entry, data))
234 continue;
235
236 if (!kref_get_unless_zero(&claim->refcount))
237 continue;
238
239 claim_tmp = claim;
240 break;
241 }
242 rcu_read_unlock();
243
244 return claim_tmp;
245 }
246
247 /**
248 * batadv_backbone_hash_find - looks for a backbone gateway in the hash
249 * @bat_priv: the bat priv with all the soft interface information
250 * @addr: the address of the originator
251 * @vid: the VLAN ID
252 *
253 * Return: backbone gateway if found or NULL otherwise
254 */
255 static struct batadv_bla_backbone_gw *
256 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
257 unsigned short vid)
258 {
259 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
260 struct hlist_head *head;
261 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
262 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
263 int index;
264
265 if (!hash)
266 return NULL;
267
268 ether_addr_copy(search_entry.orig, addr);
269 search_entry.vid = vid;
270
271 index = batadv_choose_backbone_gw(&search_entry, hash->size);
272 head = &hash->table[index];
273
274 rcu_read_lock();
275 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
276 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
277 &search_entry))
278 continue;
279
280 if (!kref_get_unless_zero(&backbone_gw->refcount))
281 continue;
282
283 backbone_gw_tmp = backbone_gw;
284 break;
285 }
286 rcu_read_unlock();
287
288 return backbone_gw_tmp;
289 }
290
291 /**
292 * batadv_bla_del_backbone_claims - delete all claims for a backbone
293 * @backbone_gw: backbone gateway where the claims should be removed
294 */
295 static void
296 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
297 {
298 struct batadv_hashtable *hash;
299 struct hlist_node *node_tmp;
300 struct hlist_head *head;
301 struct batadv_bla_claim *claim;
302 int i;
303 spinlock_t *list_lock; /* protects write access to the hash lists */
304
305 hash = backbone_gw->bat_priv->bla.claim_hash;
306 if (!hash)
307 return;
308
309 for (i = 0; i < hash->size; i++) {
310 head = &hash->table[i];
311 list_lock = &hash->list_locks[i];
312
313 spin_lock_bh(list_lock);
314 hlist_for_each_entry_safe(claim, node_tmp,
315 head, hash_entry) {
316 if (claim->backbone_gw != backbone_gw)
317 continue;
318
319 batadv_claim_put(claim);
320 hlist_del_rcu(&claim->hash_entry);
321 }
322 spin_unlock_bh(list_lock);
323 }
324
325 /* all claims gone, initialize CRC */
326 spin_lock_bh(&backbone_gw->crc_lock);
327 backbone_gw->crc = BATADV_BLA_CRC_INIT;
328 spin_unlock_bh(&backbone_gw->crc_lock);
329 }
330
331 /**
332 * batadv_bla_send_claim - sends a claim frame according to the provided info
333 * @bat_priv: the bat priv with all the soft interface information
334 * @mac: the mac address to be announced within the claim
335 * @vid: the VLAN ID
336 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
337 */
338 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
339 unsigned short vid, int claimtype)
340 {
341 struct sk_buff *skb;
342 struct ethhdr *ethhdr;
343 struct batadv_hard_iface *primary_if;
344 struct net_device *soft_iface;
345 u8 *hw_src;
346 struct batadv_bla_claim_dst local_claim_dest;
347 __be32 zeroip = 0;
348
349 primary_if = batadv_primary_if_get_selected(bat_priv);
350 if (!primary_if)
351 return;
352
353 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
354 sizeof(local_claim_dest));
355 local_claim_dest.type = claimtype;
356
357 soft_iface = primary_if->soft_iface;
358
359 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
360 /* IP DST: 0.0.0.0 */
361 zeroip,
362 primary_if->soft_iface,
363 /* IP SRC: 0.0.0.0 */
364 zeroip,
365 /* Ethernet DST: Broadcast */
366 NULL,
367 /* Ethernet SRC/HW SRC: originator mac */
368 primary_if->net_dev->dev_addr,
369 /* HW DST: FF:43:05:XX:YY:YY
370 * with XX = claim type
371 * and YY:YY = group id
372 */
373 (u8 *)&local_claim_dest);
374
375 if (!skb)
376 goto out;
377
378 ethhdr = (struct ethhdr *)skb->data;
379 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
380
381 /* now we pretend that the client would have sent this ... */
382 switch (claimtype) {
383 case BATADV_CLAIM_TYPE_CLAIM:
384 /* normal claim frame
385 * set Ethernet SRC to the clients mac
386 */
387 ether_addr_copy(ethhdr->h_source, mac);
388 batadv_dbg(BATADV_DBG_BLA, bat_priv,
389 "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
390 BATADV_PRINT_VID(vid));
391 break;
392 case BATADV_CLAIM_TYPE_UNCLAIM:
393 /* unclaim frame
394 * set HW SRC to the clients mac
395 */
396 ether_addr_copy(hw_src, mac);
397 batadv_dbg(BATADV_DBG_BLA, bat_priv,
398 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
399 BATADV_PRINT_VID(vid));
400 break;
401 case BATADV_CLAIM_TYPE_ANNOUNCE:
402 /* announcement frame
403 * set HW SRC to the special mac containg the crc
404 */
405 ether_addr_copy(hw_src, mac);
406 batadv_dbg(BATADV_DBG_BLA, bat_priv,
407 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
408 ethhdr->h_source, BATADV_PRINT_VID(vid));
409 break;
410 case BATADV_CLAIM_TYPE_REQUEST:
411 /* request frame
412 * set HW SRC and header destination to the receiving backbone
413 * gws mac
414 */
415 ether_addr_copy(hw_src, mac);
416 ether_addr_copy(ethhdr->h_dest, mac);
417 batadv_dbg(BATADV_DBG_BLA, bat_priv,
418 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
419 ethhdr->h_source, ethhdr->h_dest,
420 BATADV_PRINT_VID(vid));
421 break;
422 case BATADV_CLAIM_TYPE_LOOPDETECT:
423 ether_addr_copy(ethhdr->h_source, mac);
424 batadv_dbg(BATADV_DBG_BLA, bat_priv,
425 "bla_send_claim(): LOOPDETECT of %pM to %pM on vid %d\n",
426 ethhdr->h_source, ethhdr->h_dest,
427 BATADV_PRINT_VID(vid));
428
429 break;
430 }
431
432 if (vid & BATADV_VLAN_HAS_TAG) {
433 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
434 vid & VLAN_VID_MASK);
435 if (!skb)
436 goto out;
437 }
438
439 skb_reset_mac_header(skb);
440 skb->protocol = eth_type_trans(skb, soft_iface);
441 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
442 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
443 skb->len + ETH_HLEN);
444 soft_iface->last_rx = jiffies;
445
446 netif_rx(skb);
447 out:
448 if (primary_if)
449 batadv_hardif_put(primary_if);
450 }
451
452 /**
453 * batadv_bla_loopdetect_report - worker for reporting the loop
454 * @work: work queue item
455 *
456 * Throws an uevent, as the loopdetect check function can't do that itself
457 * since the kernel may sleep while throwing uevents.
458 */
459 static void batadv_bla_loopdetect_report(struct work_struct *work)
460 {
461 struct batadv_bla_backbone_gw *backbone_gw;
462 struct batadv_priv *bat_priv;
463 char vid_str[6] = { '\0' };
464
465 backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
466 report_work);
467 bat_priv = backbone_gw->bat_priv;
468
469 batadv_info(bat_priv->soft_iface,
470 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
471 BATADV_PRINT_VID(backbone_gw->vid));
472 snprintf(vid_str, sizeof(vid_str), "%d",
473 BATADV_PRINT_VID(backbone_gw->vid));
474 vid_str[sizeof(vid_str) - 1] = 0;
475
476 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
477 vid_str);
478
479 batadv_backbone_gw_put(backbone_gw);
480 }
481
482 /**
483 * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
484 * @bat_priv: the bat priv with all the soft interface information
485 * @orig: the mac address of the originator
486 * @vid: the VLAN ID
487 * @own_backbone: set if the requested backbone is local
488 *
489 * Return: the (possibly created) backbone gateway or NULL on error
490 */
491 static struct batadv_bla_backbone_gw *
492 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
493 unsigned short vid, bool own_backbone)
494 {
495 struct batadv_bla_backbone_gw *entry;
496 struct batadv_orig_node *orig_node;
497 int hash_added;
498
499 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
500
501 if (entry)
502 return entry;
503
504 batadv_dbg(BATADV_DBG_BLA, bat_priv,
505 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
506 orig, BATADV_PRINT_VID(vid));
507
508 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
509 if (!entry)
510 return NULL;
511
512 entry->vid = vid;
513 entry->lasttime = jiffies;
514 entry->crc = BATADV_BLA_CRC_INIT;
515 entry->bat_priv = bat_priv;
516 spin_lock_init(&entry->crc_lock);
517 atomic_set(&entry->request_sent, 0);
518 atomic_set(&entry->wait_periods, 0);
519 ether_addr_copy(entry->orig, orig);
520 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
521
522 /* one for the hash, one for returning */
523 kref_init(&entry->refcount);
524 kref_get(&entry->refcount);
525
526 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
527 batadv_compare_backbone_gw,
528 batadv_choose_backbone_gw, entry,
529 &entry->hash_entry);
530
531 if (unlikely(hash_added != 0)) {
532 /* hash failed, free the structure */
533 kfree(entry);
534 return NULL;
535 }
536
537 /* this is a gateway now, remove any TT entry on this VLAN */
538 orig_node = batadv_orig_hash_find(bat_priv, orig);
539 if (orig_node) {
540 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
541 "became a backbone gateway");
542 batadv_orig_node_put(orig_node);
543 }
544
545 if (own_backbone) {
546 batadv_bla_send_announce(bat_priv, entry);
547
548 /* this will be decreased in the worker thread */
549 atomic_inc(&entry->request_sent);
550 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
551 atomic_inc(&bat_priv->bla.num_requests);
552 }
553
554 return entry;
555 }
556
557 /**
558 * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
559 * @bat_priv: the bat priv with all the soft interface information
560 * @primary_if: the selected primary interface
561 * @vid: VLAN identifier
562 *
563 * update or add the own backbone gw to make sure we announce
564 * where we receive other backbone gws
565 */
566 static void
567 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
568 struct batadv_hard_iface *primary_if,
569 unsigned short vid)
570 {
571 struct batadv_bla_backbone_gw *backbone_gw;
572
573 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
574 primary_if->net_dev->dev_addr,
575 vid, true);
576 if (unlikely(!backbone_gw))
577 return;
578
579 backbone_gw->lasttime = jiffies;
580 batadv_backbone_gw_put(backbone_gw);
581 }
582
583 /**
584 * batadv_bla_answer_request - answer a bla request by sending own claims
585 * @bat_priv: the bat priv with all the soft interface information
586 * @primary_if: interface where the request came on
587 * @vid: the vid where the request came on
588 *
589 * Repeat all of our own claims, and finally send an ANNOUNCE frame
590 * to allow the requester another check if the CRC is correct now.
591 */
592 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
593 struct batadv_hard_iface *primary_if,
594 unsigned short vid)
595 {
596 struct hlist_head *head;
597 struct batadv_hashtable *hash;
598 struct batadv_bla_claim *claim;
599 struct batadv_bla_backbone_gw *backbone_gw;
600 int i;
601
602 batadv_dbg(BATADV_DBG_BLA, bat_priv,
603 "bla_answer_request(): received a claim request, send all of our own claims again\n");
604
605 backbone_gw = batadv_backbone_hash_find(bat_priv,
606 primary_if->net_dev->dev_addr,
607 vid);
608 if (!backbone_gw)
609 return;
610
611 hash = bat_priv->bla.claim_hash;
612 for (i = 0; i < hash->size; i++) {
613 head = &hash->table[i];
614
615 rcu_read_lock();
616 hlist_for_each_entry_rcu(claim, head, hash_entry) {
617 /* only own claims are interesting */
618 if (claim->backbone_gw != backbone_gw)
619 continue;
620
621 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
622 BATADV_CLAIM_TYPE_CLAIM);
623 }
624 rcu_read_unlock();
625 }
626
627 /* finally, send an announcement frame */
628 batadv_bla_send_announce(bat_priv, backbone_gw);
629 batadv_backbone_gw_put(backbone_gw);
630 }
631
632 /**
633 * batadv_bla_send_request - send a request to repeat claims
634 * @backbone_gw: the backbone gateway from whom we are out of sync
635 *
636 * When the crc is wrong, ask the backbone gateway for a full table update.
637 * After the request, it will repeat all of his own claims and finally
638 * send an announcement claim with which we can check again.
639 */
640 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
641 {
642 /* first, remove all old entries */
643 batadv_bla_del_backbone_claims(backbone_gw);
644
645 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
646 "Sending REQUEST to %pM\n", backbone_gw->orig);
647
648 /* send request */
649 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
650 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
651
652 /* no local broadcasts should be sent or received, for now. */
653 if (!atomic_read(&backbone_gw->request_sent)) {
654 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
655 atomic_set(&backbone_gw->request_sent, 1);
656 }
657 }
658
659 /**
660 * batadv_bla_send_announce - Send an announcement frame
661 * @bat_priv: the bat priv with all the soft interface information
662 * @backbone_gw: our backbone gateway which should be announced
663 */
664 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
665 struct batadv_bla_backbone_gw *backbone_gw)
666 {
667 u8 mac[ETH_ALEN];
668 __be16 crc;
669
670 memcpy(mac, batadv_announce_mac, 4);
671 spin_lock_bh(&backbone_gw->crc_lock);
672 crc = htons(backbone_gw->crc);
673 spin_unlock_bh(&backbone_gw->crc_lock);
674 memcpy(&mac[4], &crc, 2);
675
676 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
677 BATADV_CLAIM_TYPE_ANNOUNCE);
678 }
679
680 /**
681 * batadv_bla_add_claim - Adds a claim in the claim hash
682 * @bat_priv: the bat priv with all the soft interface information
683 * @mac: the mac address of the claim
684 * @vid: the VLAN ID of the frame
685 * @backbone_gw: the backbone gateway which claims it
686 */
687 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
688 const u8 *mac, const unsigned short vid,
689 struct batadv_bla_backbone_gw *backbone_gw)
690 {
691 struct batadv_bla_backbone_gw *old_backbone_gw;
692 struct batadv_bla_claim *claim;
693 struct batadv_bla_claim search_claim;
694 bool remove_crc = false;
695 int hash_added;
696
697 ether_addr_copy(search_claim.addr, mac);
698 search_claim.vid = vid;
699 claim = batadv_claim_hash_find(bat_priv, &search_claim);
700
701 /* create a new claim entry if it does not exist yet. */
702 if (!claim) {
703 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
704 if (!claim)
705 return;
706
707 ether_addr_copy(claim->addr, mac);
708 spin_lock_init(&claim->backbone_lock);
709 claim->vid = vid;
710 claim->lasttime = jiffies;
711 kref_get(&backbone_gw->refcount);
712 claim->backbone_gw = backbone_gw;
713
714 kref_init(&claim->refcount);
715 kref_get(&claim->refcount);
716 batadv_dbg(BATADV_DBG_BLA, bat_priv,
717 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
718 mac, BATADV_PRINT_VID(vid));
719 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
720 batadv_compare_claim,
721 batadv_choose_claim, claim,
722 &claim->hash_entry);
723
724 if (unlikely(hash_added != 0)) {
725 /* only local changes happened. */
726 kfree(claim);
727 return;
728 }
729 } else {
730 claim->lasttime = jiffies;
731 if (claim->backbone_gw == backbone_gw)
732 /* no need to register a new backbone */
733 goto claim_free_ref;
734
735 batadv_dbg(BATADV_DBG_BLA, bat_priv,
736 "bla_add_claim(): changing ownership for %pM, vid %d\n",
737 mac, BATADV_PRINT_VID(vid));
738
739 remove_crc = true;
740 }
741
742 /* replace backbone_gw atomically and adjust reference counters */
743 spin_lock_bh(&claim->backbone_lock);
744 old_backbone_gw = claim->backbone_gw;
745 kref_get(&backbone_gw->refcount);
746 claim->backbone_gw = backbone_gw;
747 spin_unlock_bh(&claim->backbone_lock);
748
749 if (remove_crc) {
750 /* remove claim address from old backbone_gw */
751 spin_lock_bh(&old_backbone_gw->crc_lock);
752 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
753 spin_unlock_bh(&old_backbone_gw->crc_lock);
754 }
755
756 batadv_backbone_gw_put(old_backbone_gw);
757
758 /* add claim address to new backbone_gw */
759 spin_lock_bh(&backbone_gw->crc_lock);
760 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
761 spin_unlock_bh(&backbone_gw->crc_lock);
762 backbone_gw->lasttime = jiffies;
763
764 claim_free_ref:
765 batadv_claim_put(claim);
766 }
767
768 /**
769 * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
770 * claim
771 * @claim: claim whose backbone_gw should be returned
772 *
773 * Return: valid reference to claim::backbone_gw
774 */
775 static struct batadv_bla_backbone_gw *
776 batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
777 {
778 struct batadv_bla_backbone_gw *backbone_gw;
779
780 spin_lock_bh(&claim->backbone_lock);
781 backbone_gw = claim->backbone_gw;
782 kref_get(&backbone_gw->refcount);
783 spin_unlock_bh(&claim->backbone_lock);
784
785 return backbone_gw;
786 }
787
788 /**
789 * batadv_bla_del_claim - delete a claim from the claim hash
790 * @bat_priv: the bat priv with all the soft interface information
791 * @mac: mac address of the claim to be removed
792 * @vid: VLAN id for the claim to be removed
793 */
794 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
795 const u8 *mac, const unsigned short vid)
796 {
797 struct batadv_bla_claim search_claim, *claim;
798
799 ether_addr_copy(search_claim.addr, mac);
800 search_claim.vid = vid;
801 claim = batadv_claim_hash_find(bat_priv, &search_claim);
802 if (!claim)
803 return;
804
805 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
806 mac, BATADV_PRINT_VID(vid));
807
808 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
809 batadv_choose_claim, claim);
810 batadv_claim_put(claim); /* reference from the hash is gone */
811
812 /* don't need the reference from hash_find() anymore */
813 batadv_claim_put(claim);
814 }
815
816 /**
817 * batadv_handle_announce - check for ANNOUNCE frame
818 * @bat_priv: the bat priv with all the soft interface information
819 * @an_addr: announcement mac address (ARP Sender HW address)
820 * @backbone_addr: originator address of the sender (Ethernet source MAC)
821 * @vid: the VLAN ID of the frame
822 *
823 * Return: true if handled
824 */
825 static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
826 u8 *backbone_addr, unsigned short vid)
827 {
828 struct batadv_bla_backbone_gw *backbone_gw;
829 u16 backbone_crc, crc;
830
831 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
832 return false;
833
834 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
835 false);
836
837 if (unlikely(!backbone_gw))
838 return true;
839
840 /* handle as ANNOUNCE frame */
841 backbone_gw->lasttime = jiffies;
842 crc = ntohs(*((__be16 *)(&an_addr[4])));
843
844 batadv_dbg(BATADV_DBG_BLA, bat_priv,
845 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
846 BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
847
848 spin_lock_bh(&backbone_gw->crc_lock);
849 backbone_crc = backbone_gw->crc;
850 spin_unlock_bh(&backbone_gw->crc_lock);
851
852 if (backbone_crc != crc) {
853 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
854 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
855 backbone_gw->orig,
856 BATADV_PRINT_VID(backbone_gw->vid),
857 backbone_crc, crc);
858
859 batadv_bla_send_request(backbone_gw);
860 } else {
861 /* if we have sent a request and the crc was OK,
862 * we can allow traffic again.
863 */
864 if (atomic_read(&backbone_gw->request_sent)) {
865 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
866 atomic_set(&backbone_gw->request_sent, 0);
867 }
868 }
869
870 batadv_backbone_gw_put(backbone_gw);
871 return true;
872 }
873
874 /**
875 * batadv_handle_request - check for REQUEST frame
876 * @bat_priv: the bat priv with all the soft interface information
877 * @primary_if: the primary hard interface of this batman soft interface
878 * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
879 * @ethhdr: ethernet header of a packet
880 * @vid: the VLAN ID of the frame
881 *
882 * Return: true if handled
883 */
884 static bool batadv_handle_request(struct batadv_priv *bat_priv,
885 struct batadv_hard_iface *primary_if,
886 u8 *backbone_addr, struct ethhdr *ethhdr,
887 unsigned short vid)
888 {
889 /* check for REQUEST frame */
890 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
891 return false;
892
893 /* sanity check, this should not happen on a normal switch,
894 * we ignore it in this case.
895 */
896 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
897 return true;
898
899 batadv_dbg(BATADV_DBG_BLA, bat_priv,
900 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
901 BATADV_PRINT_VID(vid), ethhdr->h_source);
902
903 batadv_bla_answer_request(bat_priv, primary_if, vid);
904 return true;
905 }
906
907 /**
908 * batadv_handle_unclaim - check for UNCLAIM frame
909 * @bat_priv: the bat priv with all the soft interface information
910 * @primary_if: the primary hard interface of this batman soft interface
911 * @backbone_addr: originator address of the backbone (Ethernet source)
912 * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
913 * @vid: the VLAN ID of the frame
914 *
915 * Return: true if handled
916 */
917 static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
918 struct batadv_hard_iface *primary_if,
919 u8 *backbone_addr, u8 *claim_addr,
920 unsigned short vid)
921 {
922 struct batadv_bla_backbone_gw *backbone_gw;
923
924 /* unclaim in any case if it is our own */
925 if (primary_if && batadv_compare_eth(backbone_addr,
926 primary_if->net_dev->dev_addr))
927 batadv_bla_send_claim(bat_priv, claim_addr, vid,
928 BATADV_CLAIM_TYPE_UNCLAIM);
929
930 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
931
932 if (!backbone_gw)
933 return true;
934
935 /* this must be an UNCLAIM frame */
936 batadv_dbg(BATADV_DBG_BLA, bat_priv,
937 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
938 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
939
940 batadv_bla_del_claim(bat_priv, claim_addr, vid);
941 batadv_backbone_gw_put(backbone_gw);
942 return true;
943 }
944
945 /**
946 * batadv_handle_claim - check for CLAIM frame
947 * @bat_priv: the bat priv with all the soft interface information
948 * @primary_if: the primary hard interface of this batman soft interface
949 * @backbone_addr: originator address of the backbone (Ethernet Source)
950 * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
951 * @vid: the VLAN ID of the frame
952 *
953 * Return: true if handled
954 */
955 static bool batadv_handle_claim(struct batadv_priv *bat_priv,
956 struct batadv_hard_iface *primary_if,
957 u8 *backbone_addr, u8 *claim_addr,
958 unsigned short vid)
959 {
960 struct batadv_bla_backbone_gw *backbone_gw;
961
962 /* register the gateway if not yet available, and add the claim. */
963
964 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
965 false);
966
967 if (unlikely(!backbone_gw))
968 return true;
969
970 /* this must be a CLAIM frame */
971 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
972 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
973 batadv_bla_send_claim(bat_priv, claim_addr, vid,
974 BATADV_CLAIM_TYPE_CLAIM);
975
976 /* TODO: we could call something like tt_local_del() here. */
977
978 batadv_backbone_gw_put(backbone_gw);
979 return true;
980 }
981
982 /**
983 * batadv_check_claim_group - check for claim group membership
984 * @bat_priv: the bat priv with all the soft interface information
985 * @primary_if: the primary interface of this batman interface
986 * @hw_src: the Hardware source in the ARP Header
987 * @hw_dst: the Hardware destination in the ARP Header
988 * @ethhdr: pointer to the Ethernet header of the claim frame
989 *
990 * checks if it is a claim packet and if its on the same group.
991 * This function also applies the group ID of the sender
992 * if it is in the same mesh.
993 *
994 * Return:
995 * 2 - if it is a claim packet and on the same group
996 * 1 - if is a claim packet from another group
997 * 0 - if it is not a claim packet
998 */
999 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1000 struct batadv_hard_iface *primary_if,
1001 u8 *hw_src, u8 *hw_dst,
1002 struct ethhdr *ethhdr)
1003 {
1004 u8 *backbone_addr;
1005 struct batadv_orig_node *orig_node;
1006 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1007
1008 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1009 bla_dst_own = &bat_priv->bla.claim_dest;
1010
1011 /* if announcement packet, use the source,
1012 * otherwise assume it is in the hw_src
1013 */
1014 switch (bla_dst->type) {
1015 case BATADV_CLAIM_TYPE_CLAIM:
1016 backbone_addr = hw_src;
1017 break;
1018 case BATADV_CLAIM_TYPE_REQUEST:
1019 case BATADV_CLAIM_TYPE_ANNOUNCE:
1020 case BATADV_CLAIM_TYPE_UNCLAIM:
1021 backbone_addr = ethhdr->h_source;
1022 break;
1023 default:
1024 return 0;
1025 }
1026
1027 /* don't accept claim frames from ourselves */
1028 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1029 return 0;
1030
1031 /* if its already the same group, it is fine. */
1032 if (bla_dst->group == bla_dst_own->group)
1033 return 2;
1034
1035 /* lets see if this originator is in our mesh */
1036 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1037
1038 /* dont accept claims from gateways which are not in
1039 * the same mesh or group.
1040 */
1041 if (!orig_node)
1042 return 1;
1043
1044 /* if our mesh friends mac is bigger, use it for ourselves. */
1045 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1046 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1047 "taking other backbones claim group: %#.4x\n",
1048 ntohs(bla_dst->group));
1049 bla_dst_own->group = bla_dst->group;
1050 }
1051
1052 batadv_orig_node_put(orig_node);
1053
1054 return 2;
1055 }
1056
1057 /**
1058 * batadv_bla_process_claim - Check if this is a claim frame, and process it
1059 * @bat_priv: the bat priv with all the soft interface information
1060 * @primary_if: the primary hard interface of this batman soft interface
1061 * @skb: the frame to be checked
1062 *
1063 * Return: true if it was a claim frame, otherwise return false to
1064 * tell the callee that it can use the frame on its own.
1065 */
1066 static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1067 struct batadv_hard_iface *primary_if,
1068 struct sk_buff *skb)
1069 {
1070 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1071 u8 *hw_src, *hw_dst;
1072 struct vlan_hdr *vhdr, vhdr_buf;
1073 struct ethhdr *ethhdr;
1074 struct arphdr *arphdr;
1075 unsigned short vid;
1076 int vlan_depth = 0;
1077 __be16 proto;
1078 int headlen;
1079 int ret;
1080
1081 vid = batadv_get_vid(skb, 0);
1082 ethhdr = eth_hdr(skb);
1083
1084 proto = ethhdr->h_proto;
1085 headlen = ETH_HLEN;
1086 if (vid & BATADV_VLAN_HAS_TAG) {
1087 /* Traverse the VLAN/Ethertypes.
1088 *
1089 * At this point it is known that the first protocol is a VLAN
1090 * header, so start checking at the encapsulated protocol.
1091 *
1092 * The depth of the VLAN headers is recorded to drop BLA claim
1093 * frames encapsulated into multiple VLAN headers (QinQ).
1094 */
1095 do {
1096 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1097 &vhdr_buf);
1098 if (!vhdr)
1099 return false;
1100
1101 proto = vhdr->h_vlan_encapsulated_proto;
1102 headlen += VLAN_HLEN;
1103 vlan_depth++;
1104 } while (proto == htons(ETH_P_8021Q));
1105 }
1106
1107 if (proto != htons(ETH_P_ARP))
1108 return false; /* not a claim frame */
1109
1110 /* this must be a ARP frame. check if it is a claim. */
1111
1112 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1113 return false;
1114
1115 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
1116 ethhdr = eth_hdr(skb);
1117 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1118
1119 /* Check whether the ARP frame carries a valid
1120 * IP information
1121 */
1122 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1123 return false;
1124 if (arphdr->ar_pro != htons(ETH_P_IP))
1125 return false;
1126 if (arphdr->ar_hln != ETH_ALEN)
1127 return false;
1128 if (arphdr->ar_pln != 4)
1129 return false;
1130
1131 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1132 hw_dst = hw_src + ETH_ALEN + 4;
1133 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1134 bla_dst_own = &bat_priv->bla.claim_dest;
1135
1136 /* check if it is a claim frame in general */
1137 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1138 sizeof(bla_dst->magic)) != 0)
1139 return false;
1140
1141 /* check if there is a claim frame encapsulated deeper in (QinQ) and
1142 * drop that, as this is not supported by BLA but should also not be
1143 * sent via the mesh.
1144 */
1145 if (vlan_depth > 1)
1146 return true;
1147
1148 /* Let the loopdetect frames on the mesh in any case. */
1149 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1150 return 0;
1151
1152 /* check if it is a claim frame. */
1153 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1154 ethhdr);
1155 if (ret == 1)
1156 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1157 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1158 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
1159 hw_dst);
1160
1161 if (ret < 2)
1162 return !!ret;
1163
1164 /* become a backbone gw ourselves on this vlan if not happened yet */
1165 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1166
1167 /* check for the different types of claim frames ... */
1168 switch (bla_dst->type) {
1169 case BATADV_CLAIM_TYPE_CLAIM:
1170 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1171 ethhdr->h_source, vid))
1172 return true;
1173 break;
1174 case BATADV_CLAIM_TYPE_UNCLAIM:
1175 if (batadv_handle_unclaim(bat_priv, primary_if,
1176 ethhdr->h_source, hw_src, vid))
1177 return true;
1178 break;
1179
1180 case BATADV_CLAIM_TYPE_ANNOUNCE:
1181 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1182 vid))
1183 return true;
1184 break;
1185 case BATADV_CLAIM_TYPE_REQUEST:
1186 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1187 vid))
1188 return true;
1189 break;
1190 }
1191
1192 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1193 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1194 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
1195 return true;
1196 }
1197
1198 /**
1199 * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
1200 * immediately
1201 * @bat_priv: the bat priv with all the soft interface information
1202 * @now: whether the whole hash shall be wiped now
1203 *
1204 * Check when we last heard from other nodes, and remove them in case of
1205 * a time out, or clean all backbone gws if now is set.
1206 */
1207 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1208 {
1209 struct batadv_bla_backbone_gw *backbone_gw;
1210 struct hlist_node *node_tmp;
1211 struct hlist_head *head;
1212 struct batadv_hashtable *hash;
1213 spinlock_t *list_lock; /* protects write access to the hash lists */
1214 int i;
1215
1216 hash = bat_priv->bla.backbone_hash;
1217 if (!hash)
1218 return;
1219
1220 for (i = 0; i < hash->size; i++) {
1221 head = &hash->table[i];
1222 list_lock = &hash->list_locks[i];
1223
1224 spin_lock_bh(list_lock);
1225 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1226 head, hash_entry) {
1227 if (now)
1228 goto purge_now;
1229 if (!batadv_has_timed_out(backbone_gw->lasttime,
1230 BATADV_BLA_BACKBONE_TIMEOUT))
1231 continue;
1232
1233 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1234 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
1235 backbone_gw->orig);
1236
1237 purge_now:
1238 /* don't wait for the pending request anymore */
1239 if (atomic_read(&backbone_gw->request_sent))
1240 atomic_dec(&bat_priv->bla.num_requests);
1241
1242 batadv_bla_del_backbone_claims(backbone_gw);
1243
1244 hlist_del_rcu(&backbone_gw->hash_entry);
1245 batadv_backbone_gw_put(backbone_gw);
1246 }
1247 spin_unlock_bh(list_lock);
1248 }
1249 }
1250
1251 /**
1252 * batadv_bla_purge_claims - Remove claims after a timeout or immediately
1253 * @bat_priv: the bat priv with all the soft interface information
1254 * @primary_if: the selected primary interface, may be NULL if now is set
1255 * @now: whether the whole hash shall be wiped now
1256 *
1257 * Check when we heard last time from our own claims, and remove them in case of
1258 * a time out, or clean all claims if now is set
1259 */
1260 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1261 struct batadv_hard_iface *primary_if,
1262 int now)
1263 {
1264 struct batadv_bla_backbone_gw *backbone_gw;
1265 struct batadv_bla_claim *claim;
1266 struct hlist_head *head;
1267 struct batadv_hashtable *hash;
1268 int i;
1269
1270 hash = bat_priv->bla.claim_hash;
1271 if (!hash)
1272 return;
1273
1274 for (i = 0; i < hash->size; i++) {
1275 head = &hash->table[i];
1276
1277 rcu_read_lock();
1278 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1279 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1280 if (now)
1281 goto purge_now;
1282
1283 if (!batadv_compare_eth(backbone_gw->orig,
1284 primary_if->net_dev->dev_addr))
1285 goto skip;
1286
1287 if (!batadv_has_timed_out(claim->lasttime,
1288 BATADV_BLA_CLAIM_TIMEOUT))
1289 goto skip;
1290
1291 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1292 "bla_purge_claims(): %pM, vid %d, time out\n",
1293 claim->addr, claim->vid);
1294
1295 purge_now:
1296 batadv_handle_unclaim(bat_priv, primary_if,
1297 backbone_gw->orig,
1298 claim->addr, claim->vid);
1299 skip:
1300 batadv_backbone_gw_put(backbone_gw);
1301 }
1302 rcu_read_unlock();
1303 }
1304 }
1305
1306 /**
1307 * batadv_bla_update_orig_address - Update the backbone gateways when the own
1308 * originator address changes
1309 * @bat_priv: the bat priv with all the soft interface information
1310 * @primary_if: the new selected primary_if
1311 * @oldif: the old primary interface, may be NULL
1312 */
1313 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1314 struct batadv_hard_iface *primary_if,
1315 struct batadv_hard_iface *oldif)
1316 {
1317 struct batadv_bla_backbone_gw *backbone_gw;
1318 struct hlist_head *head;
1319 struct batadv_hashtable *hash;
1320 __be16 group;
1321 int i;
1322
1323 /* reset bridge loop avoidance group id */
1324 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1325 bat_priv->bla.claim_dest.group = group;
1326
1327 /* purge everything when bridge loop avoidance is turned off */
1328 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1329 oldif = NULL;
1330
1331 if (!oldif) {
1332 batadv_bla_purge_claims(bat_priv, NULL, 1);
1333 batadv_bla_purge_backbone_gw(bat_priv, 1);
1334 return;
1335 }
1336
1337 hash = bat_priv->bla.backbone_hash;
1338 if (!hash)
1339 return;
1340
1341 for (i = 0; i < hash->size; i++) {
1342 head = &hash->table[i];
1343
1344 rcu_read_lock();
1345 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1346 /* own orig still holds the old value. */
1347 if (!batadv_compare_eth(backbone_gw->orig,
1348 oldif->net_dev->dev_addr))
1349 continue;
1350
1351 ether_addr_copy(backbone_gw->orig,
1352 primary_if->net_dev->dev_addr);
1353 /* send an announce frame so others will ask for our
1354 * claims and update their tables.
1355 */
1356 batadv_bla_send_announce(bat_priv, backbone_gw);
1357 }
1358 rcu_read_unlock();
1359 }
1360 }
1361
1362 /**
1363 * batadv_bla_send_loopdetect - send a loopdetect frame
1364 * @bat_priv: the bat priv with all the soft interface information
1365 * @backbone_gw: the backbone gateway for which a loop should be detected
1366 *
1367 * To detect loops that the bridge loop avoidance can't handle, send a loop
1368 * detection packet on the backbone. Unlike other BLA frames, this frame will
1369 * be allowed on the mesh by other nodes. If it is received on the mesh, this
1370 * indicates that there is a loop.
1371 */
1372 static void
1373 batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1374 struct batadv_bla_backbone_gw *backbone_gw)
1375 {
1376 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1377 backbone_gw->vid);
1378 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1379 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1380 }
1381
1382 /**
1383 * batadv_bla_status_update - purge bla interfaces if necessary
1384 * @net_dev: the soft interface net device
1385 */
1386 void batadv_bla_status_update(struct net_device *net_dev)
1387 {
1388 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1389 struct batadv_hard_iface *primary_if;
1390
1391 primary_if = batadv_primary_if_get_selected(bat_priv);
1392 if (!primary_if)
1393 return;
1394
1395 /* this function already purges everything when bla is disabled,
1396 * so just call that one.
1397 */
1398 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1399 batadv_hardif_put(primary_if);
1400 }
1401
1402 /**
1403 * batadv_bla_periodic_work - performs periodic bla work
1404 * @work: kernel work struct
1405 *
1406 * periodic work to do:
1407 * * purge structures when they are too old
1408 * * send announcements
1409 */
1410 static void batadv_bla_periodic_work(struct work_struct *work)
1411 {
1412 struct delayed_work *delayed_work;
1413 struct batadv_priv *bat_priv;
1414 struct batadv_priv_bla *priv_bla;
1415 struct hlist_head *head;
1416 struct batadv_bla_backbone_gw *backbone_gw;
1417 struct batadv_hashtable *hash;
1418 struct batadv_hard_iface *primary_if;
1419 bool send_loopdetect = false;
1420 int i;
1421
1422 delayed_work = to_delayed_work(work);
1423 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1424 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1425 primary_if = batadv_primary_if_get_selected(bat_priv);
1426 if (!primary_if)
1427 goto out;
1428
1429 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1430 batadv_bla_purge_backbone_gw(bat_priv, 0);
1431
1432 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1433 goto out;
1434
1435 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1436 /* set a new random mac address for the next bridge loop
1437 * detection frames. Set the locally administered bit to avoid
1438 * collisions with users mac addresses.
1439 */
1440 random_ether_addr(bat_priv->bla.loopdetect_addr);
1441 bat_priv->bla.loopdetect_addr[0] = 0xba;
1442 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1443 bat_priv->bla.loopdetect_lasttime = jiffies;
1444 atomic_set(&bat_priv->bla.loopdetect_next,
1445 BATADV_BLA_LOOPDETECT_PERIODS);
1446
1447 /* mark for sending loop detect on all VLANs */
1448 send_loopdetect = true;
1449 }
1450
1451 hash = bat_priv->bla.backbone_hash;
1452 if (!hash)
1453 goto out;
1454
1455 for (i = 0; i < hash->size; i++) {
1456 head = &hash->table[i];
1457
1458 rcu_read_lock();
1459 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1460 if (!batadv_compare_eth(backbone_gw->orig,
1461 primary_if->net_dev->dev_addr))
1462 continue;
1463
1464 backbone_gw->lasttime = jiffies;
1465
1466 batadv_bla_send_announce(bat_priv, backbone_gw);
1467 if (send_loopdetect)
1468 batadv_bla_send_loopdetect(bat_priv,
1469 backbone_gw);
1470
1471 /* request_sent is only set after creation to avoid
1472 * problems when we are not yet known as backbone gw
1473 * in the backbone.
1474 *
1475 * We can reset this now after we waited some periods
1476 * to give bridge forward delays and bla group forming
1477 * some grace time.
1478 */
1479
1480 if (atomic_read(&backbone_gw->request_sent) == 0)
1481 continue;
1482
1483 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1484 continue;
1485
1486 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1487 atomic_set(&backbone_gw->request_sent, 0);
1488 }
1489 rcu_read_unlock();
1490 }
1491 out:
1492 if (primary_if)
1493 batadv_hardif_put(primary_if);
1494
1495 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1496 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1497 }
1498
1499 /* The hash for claim and backbone hash receive the same key because they
1500 * are getting initialized by hash_new with the same key. Reinitializing
1501 * them with to different keys to allow nested locking without generating
1502 * lockdep warnings
1503 */
1504 static struct lock_class_key batadv_claim_hash_lock_class_key;
1505 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1506
1507 /**
1508 * batadv_bla_init - initialize all bla structures
1509 * @bat_priv: the bat priv with all the soft interface information
1510 *
1511 * Return: 0 on success, < 0 on error.
1512 */
1513 int batadv_bla_init(struct batadv_priv *bat_priv)
1514 {
1515 int i;
1516 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1517 struct batadv_hard_iface *primary_if;
1518 u16 crc;
1519 unsigned long entrytime;
1520
1521 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1522
1523 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1524
1525 /* setting claim destination address */
1526 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1527 bat_priv->bla.claim_dest.type = 0;
1528 primary_if = batadv_primary_if_get_selected(bat_priv);
1529 if (primary_if) {
1530 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1531 bat_priv->bla.claim_dest.group = htons(crc);
1532 batadv_hardif_put(primary_if);
1533 } else {
1534 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1535 }
1536
1537 /* initialize the duplicate list */
1538 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1539 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1540 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1541 bat_priv->bla.bcast_duplist_curr = 0;
1542
1543 atomic_set(&bat_priv->bla.loopdetect_next,
1544 BATADV_BLA_LOOPDETECT_PERIODS);
1545
1546 if (bat_priv->bla.claim_hash)
1547 return 0;
1548
1549 bat_priv->bla.claim_hash = batadv_hash_new(128);
1550 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1551
1552 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1553 return -ENOMEM;
1554
1555 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1556 &batadv_claim_hash_lock_class_key);
1557 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1558 &batadv_backbone_hash_lock_class_key);
1559
1560 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1561
1562 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1563
1564 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1565 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1566 return 0;
1567 }
1568
1569 /**
1570 * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
1571 * @bat_priv: the bat priv with all the soft interface information
1572 * @skb: contains the bcast_packet to be checked
1573 *
1574 * check if it is on our broadcast list. Another gateway might
1575 * have sent the same packet because it is connected to the same backbone,
1576 * so we have to remove this duplicate.
1577 *
1578 * This is performed by checking the CRC, which will tell us
1579 * with a good chance that it is the same packet. If it is furthermore
1580 * sent by another host, drop it. We allow equal packets from
1581 * the same host however as this might be intended.
1582 *
1583 * Return: true if a packet is in the duplicate list, false otherwise.
1584 */
1585 bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1586 struct sk_buff *skb)
1587 {
1588 int i, curr;
1589 __be32 crc;
1590 struct batadv_bcast_packet *bcast_packet;
1591 struct batadv_bcast_duplist_entry *entry;
1592 bool ret = false;
1593
1594 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1595
1596 /* calculate the crc ... */
1597 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1598
1599 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1600
1601 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1602 curr = (bat_priv->bla.bcast_duplist_curr + i);
1603 curr %= BATADV_DUPLIST_SIZE;
1604 entry = &bat_priv->bla.bcast_duplist[curr];
1605
1606 /* we can stop searching if the entry is too old ;
1607 * later entries will be even older
1608 */
1609 if (batadv_has_timed_out(entry->entrytime,
1610 BATADV_DUPLIST_TIMEOUT))
1611 break;
1612
1613 if (entry->crc != crc)
1614 continue;
1615
1616 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1617 continue;
1618
1619 /* this entry seems to match: same crc, not too old,
1620 * and from another gw. therefore return true to forbid it.
1621 */
1622 ret = true;
1623 goto out;
1624 }
1625 /* not found, add a new entry (overwrite the oldest entry)
1626 * and allow it, its the first occurrence.
1627 */
1628 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1629 curr %= BATADV_DUPLIST_SIZE;
1630 entry = &bat_priv->bla.bcast_duplist[curr];
1631 entry->crc = crc;
1632 entry->entrytime = jiffies;
1633 ether_addr_copy(entry->orig, bcast_packet->orig);
1634 bat_priv->bla.bcast_duplist_curr = curr;
1635
1636 out:
1637 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1638
1639 return ret;
1640 }
1641
1642 /**
1643 * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
1644 * the VLAN identified by vid.
1645 * @bat_priv: the bat priv with all the soft interface information
1646 * @orig: originator mac address
1647 * @vid: VLAN identifier
1648 *
1649 * Return: true if orig is a backbone for this vid, false otherwise.
1650 */
1651 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1652 unsigned short vid)
1653 {
1654 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1655 struct hlist_head *head;
1656 struct batadv_bla_backbone_gw *backbone_gw;
1657 int i;
1658
1659 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1660 return false;
1661
1662 if (!hash)
1663 return false;
1664
1665 for (i = 0; i < hash->size; i++) {
1666 head = &hash->table[i];
1667
1668 rcu_read_lock();
1669 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1670 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1671 backbone_gw->vid == vid) {
1672 rcu_read_unlock();
1673 return true;
1674 }
1675 }
1676 rcu_read_unlock();
1677 }
1678
1679 return false;
1680 }
1681
1682 /**
1683 * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
1684 * @skb: the frame to be checked
1685 * @orig_node: the orig_node of the frame
1686 * @hdr_size: maximum length of the frame
1687 *
1688 * Return: true if the orig_node is also a gateway on the soft interface,
1689 * otherwise it returns false.
1690 */
1691 bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1692 struct batadv_orig_node *orig_node, int hdr_size)
1693 {
1694 struct batadv_bla_backbone_gw *backbone_gw;
1695 unsigned short vid;
1696
1697 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1698 return false;
1699
1700 /* first, find out the vid. */
1701 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1702 return false;
1703
1704 vid = batadv_get_vid(skb, hdr_size);
1705
1706 /* see if this originator is a backbone gw for this VLAN */
1707 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1708 orig_node->orig, vid);
1709 if (!backbone_gw)
1710 return false;
1711
1712 batadv_backbone_gw_put(backbone_gw);
1713 return true;
1714 }
1715
1716 /**
1717 * batadv_bla_free - free all bla structures
1718 * @bat_priv: the bat priv with all the soft interface information
1719 *
1720 * for softinterface free or module unload
1721 */
1722 void batadv_bla_free(struct batadv_priv *bat_priv)
1723 {
1724 struct batadv_hard_iface *primary_if;
1725
1726 cancel_delayed_work_sync(&bat_priv->bla.work);
1727 primary_if = batadv_primary_if_get_selected(bat_priv);
1728
1729 if (bat_priv->bla.claim_hash) {
1730 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1731 batadv_hash_destroy(bat_priv->bla.claim_hash);
1732 bat_priv->bla.claim_hash = NULL;
1733 }
1734 if (bat_priv->bla.backbone_hash) {
1735 batadv_bla_purge_backbone_gw(bat_priv, 1);
1736 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1737 bat_priv->bla.backbone_hash = NULL;
1738 }
1739 if (primary_if)
1740 batadv_hardif_put(primary_if);
1741 }
1742
1743 /**
1744 * batadv_bla_loopdetect_check - check and handle a detected loop
1745 * @bat_priv: the bat priv with all the soft interface information
1746 * @skb: the packet to check
1747 * @primary_if: interface where the request came on
1748 * @vid: the VLAN ID of the frame
1749 *
1750 * Checks if this packet is a loop detect frame which has been sent by us,
1751 * throw an uevent and log the event if that is the case.
1752 *
1753 * Return: true if it is a loop detect frame which is to be dropped, false
1754 * otherwise.
1755 */
1756 static bool
1757 batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1758 struct batadv_hard_iface *primary_if,
1759 unsigned short vid)
1760 {
1761 struct batadv_bla_backbone_gw *backbone_gw;
1762 struct ethhdr *ethhdr;
1763
1764 ethhdr = eth_hdr(skb);
1765
1766 /* Only check for the MAC address and skip more checks here for
1767 * performance reasons - this function is on the hotpath, after all.
1768 */
1769 if (!batadv_compare_eth(ethhdr->h_source,
1770 bat_priv->bla.loopdetect_addr))
1771 return false;
1772
1773 /* If the packet came too late, don't forward it on the mesh
1774 * but don't consider that as loop. It might be a coincidence.
1775 */
1776 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1777 BATADV_BLA_LOOPDETECT_TIMEOUT))
1778 return true;
1779
1780 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1781 primary_if->net_dev->dev_addr,
1782 vid, true);
1783 if (unlikely(!backbone_gw))
1784 return true;
1785
1786 queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1787 /* backbone_gw is unreferenced in the report work function function */
1788
1789 return true;
1790 }
1791
1792 /**
1793 * batadv_bla_rx - check packets coming from the mesh.
1794 * @bat_priv: the bat priv with all the soft interface information
1795 * @skb: the frame to be checked
1796 * @vid: the VLAN ID of the frame
1797 * @is_bcast: the packet came in a broadcast packet type.
1798 *
1799 * batadv_bla_rx avoidance checks if:
1800 * * we have to race for a claim
1801 * * if the frame is allowed on the LAN
1802 *
1803 * in these cases, the skb is further handled by this function
1804 *
1805 * Return: true if handled, otherwise it returns false and the caller shall
1806 * further process the skb.
1807 */
1808 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1809 unsigned short vid, bool is_bcast)
1810 {
1811 struct batadv_bla_backbone_gw *backbone_gw;
1812 struct ethhdr *ethhdr;
1813 struct batadv_bla_claim search_claim, *claim = NULL;
1814 struct batadv_hard_iface *primary_if;
1815 bool own_claim;
1816 bool ret;
1817
1818 ethhdr = eth_hdr(skb);
1819
1820 primary_if = batadv_primary_if_get_selected(bat_priv);
1821 if (!primary_if)
1822 goto handled;
1823
1824 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1825 goto allow;
1826
1827 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1828 goto handled;
1829
1830 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1831 /* don't allow broadcasts while requests are in flight */
1832 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1833 goto handled;
1834
1835 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1836 search_claim.vid = vid;
1837 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1838
1839 if (!claim) {
1840 /* possible optimization: race for a claim */
1841 /* No claim exists yet, claim it for us!
1842 */
1843 batadv_handle_claim(bat_priv, primary_if,
1844 primary_if->net_dev->dev_addr,
1845 ethhdr->h_source, vid);
1846 goto allow;
1847 }
1848
1849 /* if it is our own claim ... */
1850 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1851 own_claim = batadv_compare_eth(backbone_gw->orig,
1852 primary_if->net_dev->dev_addr);
1853 batadv_backbone_gw_put(backbone_gw);
1854
1855 if (own_claim) {
1856 /* ... allow it in any case */
1857 claim->lasttime = jiffies;
1858 goto allow;
1859 }
1860
1861 /* if it is a broadcast ... */
1862 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1863 /* ... drop it. the responsible gateway is in charge.
1864 *
1865 * We need to check is_bcast because with the gateway
1866 * feature, broadcasts (like DHCP requests) may be sent
1867 * using a unicast packet type.
1868 */
1869 goto handled;
1870 } else {
1871 /* seems the client considers us as its best gateway.
1872 * send a claim and update the claim table
1873 * immediately.
1874 */
1875 batadv_handle_claim(bat_priv, primary_if,
1876 primary_if->net_dev->dev_addr,
1877 ethhdr->h_source, vid);
1878 goto allow;
1879 }
1880 allow:
1881 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1882 ret = false;
1883 goto out;
1884
1885 handled:
1886 kfree_skb(skb);
1887 ret = true;
1888
1889 out:
1890 if (primary_if)
1891 batadv_hardif_put(primary_if);
1892 if (claim)
1893 batadv_claim_put(claim);
1894 return ret;
1895 }
1896
1897 /**
1898 * batadv_bla_tx - check packets going into the mesh
1899 * @bat_priv: the bat priv with all the soft interface information
1900 * @skb: the frame to be checked
1901 * @vid: the VLAN ID of the frame
1902 *
1903 * batadv_bla_tx checks if:
1904 * * a claim was received which has to be processed
1905 * * the frame is allowed on the mesh
1906 *
1907 * in these cases, the skb is further handled by this function.
1908 *
1909 * This call might reallocate skb data.
1910 *
1911 * Return: true if handled, otherwise it returns false and the caller shall
1912 * further process the skb.
1913 */
1914 bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1915 unsigned short vid)
1916 {
1917 struct ethhdr *ethhdr;
1918 struct batadv_bla_claim search_claim, *claim = NULL;
1919 struct batadv_bla_backbone_gw *backbone_gw;
1920 struct batadv_hard_iface *primary_if;
1921 bool client_roamed;
1922 bool ret = false;
1923
1924 primary_if = batadv_primary_if_get_selected(bat_priv);
1925 if (!primary_if)
1926 goto out;
1927
1928 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1929 goto allow;
1930
1931 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1932 goto handled;
1933
1934 ethhdr = eth_hdr(skb);
1935
1936 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1937 /* don't allow broadcasts while requests are in flight */
1938 if (is_multicast_ether_addr(ethhdr->h_dest))
1939 goto handled;
1940
1941 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1942 search_claim.vid = vid;
1943
1944 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1945
1946 /* if no claim exists, allow it. */
1947 if (!claim)
1948 goto allow;
1949
1950 /* check if we are responsible. */
1951 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1952 client_roamed = batadv_compare_eth(backbone_gw->orig,
1953 primary_if->net_dev->dev_addr);
1954 batadv_backbone_gw_put(backbone_gw);
1955
1956 if (client_roamed) {
1957 /* if yes, the client has roamed and we have
1958 * to unclaim it.
1959 */
1960 batadv_handle_unclaim(bat_priv, primary_if,
1961 primary_if->net_dev->dev_addr,
1962 ethhdr->h_source, vid);
1963 goto allow;
1964 }
1965
1966 /* check if it is a multicast/broadcast frame */
1967 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1968 /* drop it. the responsible gateway has forwarded it into
1969 * the backbone network.
1970 */
1971 goto handled;
1972 } else {
1973 /* we must allow it. at least if we are
1974 * responsible for the DESTINATION.
1975 */
1976 goto allow;
1977 }
1978 allow:
1979 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1980 ret = false;
1981 goto out;
1982 handled:
1983 ret = true;
1984 out:
1985 if (primary_if)
1986 batadv_hardif_put(primary_if);
1987 if (claim)
1988 batadv_claim_put(claim);
1989 return ret;
1990 }
1991
1992 /**
1993 * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
1994 * @seq: seq file to print on
1995 * @offset: not used
1996 *
1997 * Return: always 0
1998 */
1999 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2000 {
2001 struct net_device *net_dev = (struct net_device *)seq->private;
2002 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2003 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2004 struct batadv_bla_backbone_gw *backbone_gw;
2005 struct batadv_bla_claim *claim;
2006 struct batadv_hard_iface *primary_if;
2007 struct hlist_head *head;
2008 u16 backbone_crc;
2009 u32 i;
2010 bool is_own;
2011 u8 *primary_addr;
2012
2013 primary_if = batadv_seq_print_text_primary_if_get(seq);
2014 if (!primary_if)
2015 goto out;
2016
2017 primary_addr = primary_if->net_dev->dev_addr;
2018 seq_printf(seq,
2019 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2020 net_dev->name, primary_addr,
2021 ntohs(bat_priv->bla.claim_dest.group));
2022 seq_puts(seq,
2023 " Client VID Originator [o] (CRC )\n");
2024 for (i = 0; i < hash->size; i++) {
2025 head = &hash->table[i];
2026
2027 rcu_read_lock();
2028 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2029 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2030
2031 is_own = batadv_compare_eth(backbone_gw->orig,
2032 primary_addr);
2033
2034 spin_lock_bh(&backbone_gw->crc_lock);
2035 backbone_crc = backbone_gw->crc;
2036 spin_unlock_bh(&backbone_gw->crc_lock);
2037 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2038 claim->addr, BATADV_PRINT_VID(claim->vid),
2039 backbone_gw->orig,
2040 (is_own ? 'x' : ' '),
2041 backbone_crc);
2042
2043 batadv_backbone_gw_put(backbone_gw);
2044 }
2045 rcu_read_unlock();
2046 }
2047 out:
2048 if (primary_if)
2049 batadv_hardif_put(primary_if);
2050 return 0;
2051 }
2052
2053 /**
2054 * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
2055 * file
2056 * @seq: seq file to print on
2057 * @offset: not used
2058 *
2059 * Return: always 0
2060 */
2061 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
2062 {
2063 struct net_device *net_dev = (struct net_device *)seq->private;
2064 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2065 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2066 struct batadv_bla_backbone_gw *backbone_gw;
2067 struct batadv_hard_iface *primary_if;
2068 struct hlist_head *head;
2069 int secs, msecs;
2070 u16 backbone_crc;
2071 u32 i;
2072 bool is_own;
2073 u8 *primary_addr;
2074
2075 primary_if = batadv_seq_print_text_primary_if_get(seq);
2076 if (!primary_if)
2077 goto out;
2078
2079 primary_addr = primary_if->net_dev->dev_addr;
2080 seq_printf(seq,
2081 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2082 net_dev->name, primary_addr,
2083 ntohs(bat_priv->bla.claim_dest.group));
2084 seq_puts(seq, " Originator VID last seen (CRC )\n");
2085 for (i = 0; i < hash->size; i++) {
2086 head = &hash->table[i];
2087
2088 rcu_read_lock();
2089 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2090 msecs = jiffies_to_msecs(jiffies -
2091 backbone_gw->lasttime);
2092 secs = msecs / 1000;
2093 msecs = msecs % 1000;
2094
2095 is_own = batadv_compare_eth(backbone_gw->orig,
2096 primary_addr);
2097 if (is_own)
2098 continue;
2099
2100 spin_lock_bh(&backbone_gw->crc_lock);
2101 backbone_crc = backbone_gw->crc;
2102 spin_unlock_bh(&backbone_gw->crc_lock);
2103
2104 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2105 backbone_gw->orig,
2106 BATADV_PRINT_VID(backbone_gw->vid), secs,
2107 msecs, backbone_crc);
2108 }
2109 rcu_read_unlock();
2110 }
2111 out:
2112 if (primary_if)
2113 batadv_hardif_put(primary_if);
2114 return 0;
2115 }