]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/batman-adv/bridge_loop_avoidance.c
Merge tag 'arc-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[mirror_ubuntu-artful-kernel.git] / net / batman-adv / bridge_loop_avoidance.c
1 /* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
2 *
3 * Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "main.h"
19 #include "hash.h"
20 #include "hard-interface.h"
21 #include "originator.h"
22 #include "bridge_loop_avoidance.h"
23 #include "translation-table.h"
24 #include "send.h"
25
26 #include <linux/etherdevice.h>
27 #include <linux/crc16.h>
28 #include <linux/if_arp.h>
29 #include <net/arp.h>
30 #include <linux/if_vlan.h>
31
32 static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
33
34 static void batadv_bla_periodic_work(struct work_struct *work);
35 static void
36 batadv_bla_send_announce(struct batadv_priv *bat_priv,
37 struct batadv_bla_backbone_gw *backbone_gw);
38
39 /* return the index of the claim */
40 static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
41 {
42 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
43 uint32_t hash = 0;
44
45 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
46 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
47
48 hash += (hash << 3);
49 hash ^= (hash >> 11);
50 hash += (hash << 15);
51
52 return hash % size;
53 }
54
55 /* return the index of the backbone gateway */
56 static inline uint32_t batadv_choose_backbone_gw(const void *data,
57 uint32_t size)
58 {
59 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
60 uint32_t hash = 0;
61
62 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
63 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
64
65 hash += (hash << 3);
66 hash ^= (hash >> 11);
67 hash += (hash << 15);
68
69 return hash % size;
70 }
71
72
73 /* compares address and vid of two backbone gws */
74 static int batadv_compare_backbone_gw(const struct hlist_node *node,
75 const void *data2)
76 {
77 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
78 hash_entry);
79 const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
80
81 if (!batadv_compare_eth(gw1->orig, gw2->orig))
82 return 0;
83
84 if (gw1->vid != gw2->vid)
85 return 0;
86
87 return 1;
88 }
89
90 /* compares address and vid of two claims */
91 static int batadv_compare_claim(const struct hlist_node *node,
92 const void *data2)
93 {
94 const void *data1 = container_of(node, struct batadv_bla_claim,
95 hash_entry);
96 const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
97
98 if (!batadv_compare_eth(cl1->addr, cl2->addr))
99 return 0;
100
101 if (cl1->vid != cl2->vid)
102 return 0;
103
104 return 1;
105 }
106
107 /* free a backbone gw */
108 static void
109 batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
110 {
111 if (atomic_dec_and_test(&backbone_gw->refcount))
112 kfree_rcu(backbone_gw, rcu);
113 }
114
115 /* finally deinitialize the claim */
116 static void batadv_claim_free_rcu(struct rcu_head *rcu)
117 {
118 struct batadv_bla_claim *claim;
119
120 claim = container_of(rcu, struct batadv_bla_claim, rcu);
121
122 batadv_backbone_gw_free_ref(claim->backbone_gw);
123 kfree(claim);
124 }
125
126 /* free a claim, call claim_free_rcu if its the last reference */
127 static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
128 {
129 if (atomic_dec_and_test(&claim->refcount))
130 call_rcu(&claim->rcu, batadv_claim_free_rcu);
131 }
132
133 /**
134 * batadv_claim_hash_find
135 * @bat_priv: the bat priv with all the soft interface information
136 * @data: search data (may be local/static data)
137 *
138 * looks for a claim in the hash, and returns it if found
139 * or NULL otherwise.
140 */
141 static struct batadv_bla_claim
142 *batadv_claim_hash_find(struct batadv_priv *bat_priv,
143 struct batadv_bla_claim *data)
144 {
145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
146 struct hlist_head *head;
147 struct batadv_bla_claim *claim;
148 struct batadv_bla_claim *claim_tmp = NULL;
149 int index;
150
151 if (!hash)
152 return NULL;
153
154 index = batadv_choose_claim(data, hash->size);
155 head = &hash->table[index];
156
157 rcu_read_lock();
158 hlist_for_each_entry_rcu(claim, head, hash_entry) {
159 if (!batadv_compare_claim(&claim->hash_entry, data))
160 continue;
161
162 if (!atomic_inc_not_zero(&claim->refcount))
163 continue;
164
165 claim_tmp = claim;
166 break;
167 }
168 rcu_read_unlock();
169
170 return claim_tmp;
171 }
172
173 /**
174 * batadv_backbone_hash_find - looks for a claim in the hash
175 * @bat_priv: the bat priv with all the soft interface information
176 * @addr: the address of the originator
177 * @vid: the VLAN ID
178 *
179 * Returns claim if found or NULL otherwise.
180 */
181 static struct batadv_bla_backbone_gw *
182 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
183 uint8_t *addr, unsigned short vid)
184 {
185 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
186 struct hlist_head *head;
187 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
188 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
189 int index;
190
191 if (!hash)
192 return NULL;
193
194 ether_addr_copy(search_entry.orig, addr);
195 search_entry.vid = vid;
196
197 index = batadv_choose_backbone_gw(&search_entry, hash->size);
198 head = &hash->table[index];
199
200 rcu_read_lock();
201 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
202 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
203 &search_entry))
204 continue;
205
206 if (!atomic_inc_not_zero(&backbone_gw->refcount))
207 continue;
208
209 backbone_gw_tmp = backbone_gw;
210 break;
211 }
212 rcu_read_unlock();
213
214 return backbone_gw_tmp;
215 }
216
217 /* delete all claims for a backbone */
218 static void
219 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
220 {
221 struct batadv_hashtable *hash;
222 struct hlist_node *node_tmp;
223 struct hlist_head *head;
224 struct batadv_bla_claim *claim;
225 int i;
226 spinlock_t *list_lock; /* protects write access to the hash lists */
227
228 hash = backbone_gw->bat_priv->bla.claim_hash;
229 if (!hash)
230 return;
231
232 for (i = 0; i < hash->size; i++) {
233 head = &hash->table[i];
234 list_lock = &hash->list_locks[i];
235
236 spin_lock_bh(list_lock);
237 hlist_for_each_entry_safe(claim, node_tmp,
238 head, hash_entry) {
239 if (claim->backbone_gw != backbone_gw)
240 continue;
241
242 batadv_claim_free_ref(claim);
243 hlist_del_rcu(&claim->hash_entry);
244 }
245 spin_unlock_bh(list_lock);
246 }
247
248 /* all claims gone, intialize CRC */
249 backbone_gw->crc = BATADV_BLA_CRC_INIT;
250 }
251
252 /**
253 * batadv_bla_send_claim - sends a claim frame according to the provided info
254 * @bat_priv: the bat priv with all the soft interface information
255 * @orig: the mac address to be announced within the claim
256 * @vid: the VLAN ID
257 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
258 */
259 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
260 unsigned short vid, int claimtype)
261 {
262 struct sk_buff *skb;
263 struct ethhdr *ethhdr;
264 struct batadv_hard_iface *primary_if;
265 struct net_device *soft_iface;
266 uint8_t *hw_src;
267 struct batadv_bla_claim_dst local_claim_dest;
268 __be32 zeroip = 0;
269
270 primary_if = batadv_primary_if_get_selected(bat_priv);
271 if (!primary_if)
272 return;
273
274 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
275 sizeof(local_claim_dest));
276 local_claim_dest.type = claimtype;
277
278 soft_iface = primary_if->soft_iface;
279
280 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
281 /* IP DST: 0.0.0.0 */
282 zeroip,
283 primary_if->soft_iface,
284 /* IP SRC: 0.0.0.0 */
285 zeroip,
286 /* Ethernet DST: Broadcast */
287 NULL,
288 /* Ethernet SRC/HW SRC: originator mac */
289 primary_if->net_dev->dev_addr,
290 /* HW DST: FF:43:05:XX:YY:YY
291 * with XX = claim type
292 * and YY:YY = group id
293 */
294 (uint8_t *)&local_claim_dest);
295
296 if (!skb)
297 goto out;
298
299 ethhdr = (struct ethhdr *)skb->data;
300 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
301
302 /* now we pretend that the client would have sent this ... */
303 switch (claimtype) {
304 case BATADV_CLAIM_TYPE_CLAIM:
305 /* normal claim frame
306 * set Ethernet SRC to the clients mac
307 */
308 ether_addr_copy(ethhdr->h_source, mac);
309 batadv_dbg(BATADV_DBG_BLA, bat_priv,
310 "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
311 BATADV_PRINT_VID(vid));
312 break;
313 case BATADV_CLAIM_TYPE_UNCLAIM:
314 /* unclaim frame
315 * set HW SRC to the clients mac
316 */
317 ether_addr_copy(hw_src, mac);
318 batadv_dbg(BATADV_DBG_BLA, bat_priv,
319 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
320 BATADV_PRINT_VID(vid));
321 break;
322 case BATADV_CLAIM_TYPE_ANNOUNCE:
323 /* announcement frame
324 * set HW SRC to the special mac containg the crc
325 */
326 ether_addr_copy(hw_src, mac);
327 batadv_dbg(BATADV_DBG_BLA, bat_priv,
328 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
329 ethhdr->h_source, BATADV_PRINT_VID(vid));
330 break;
331 case BATADV_CLAIM_TYPE_REQUEST:
332 /* request frame
333 * set HW SRC and header destination to the receiving backbone
334 * gws mac
335 */
336 ether_addr_copy(hw_src, mac);
337 ether_addr_copy(ethhdr->h_dest, mac);
338 batadv_dbg(BATADV_DBG_BLA, bat_priv,
339 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
340 ethhdr->h_source, ethhdr->h_dest,
341 BATADV_PRINT_VID(vid));
342 break;
343 }
344
345 if (vid & BATADV_VLAN_HAS_TAG)
346 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
347 vid & VLAN_VID_MASK);
348
349 skb_reset_mac_header(skb);
350 skb->protocol = eth_type_trans(skb, soft_iface);
351 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
352 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
353 skb->len + ETH_HLEN);
354 soft_iface->last_rx = jiffies;
355
356 netif_rx(skb);
357 out:
358 if (primary_if)
359 batadv_hardif_free_ref(primary_if);
360 }
361
362 /**
363 * batadv_bla_get_backbone_gw
364 * @bat_priv: the bat priv with all the soft interface information
365 * @orig: the mac address of the originator
366 * @vid: the VLAN ID
367 *
368 * searches for the backbone gw or creates a new one if it could not
369 * be found.
370 */
371 static struct batadv_bla_backbone_gw *
372 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
373 unsigned short vid, bool own_backbone)
374 {
375 struct batadv_bla_backbone_gw *entry;
376 struct batadv_orig_node *orig_node;
377 int hash_added;
378
379 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
380
381 if (entry)
382 return entry;
383
384 batadv_dbg(BATADV_DBG_BLA, bat_priv,
385 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
386 orig, BATADV_PRINT_VID(vid));
387
388 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
389 if (!entry)
390 return NULL;
391
392 entry->vid = vid;
393 entry->lasttime = jiffies;
394 entry->crc = BATADV_BLA_CRC_INIT;
395 entry->bat_priv = bat_priv;
396 atomic_set(&entry->request_sent, 0);
397 atomic_set(&entry->wait_periods, 0);
398 ether_addr_copy(entry->orig, orig);
399
400 /* one for the hash, one for returning */
401 atomic_set(&entry->refcount, 2);
402
403 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
404 batadv_compare_backbone_gw,
405 batadv_choose_backbone_gw, entry,
406 &entry->hash_entry);
407
408 if (unlikely(hash_added != 0)) {
409 /* hash failed, free the structure */
410 kfree(entry);
411 return NULL;
412 }
413
414 /* this is a gateway now, remove any TT entry on this VLAN */
415 orig_node = batadv_orig_hash_find(bat_priv, orig);
416 if (orig_node) {
417 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
418 "became a backbone gateway");
419 batadv_orig_node_free_ref(orig_node);
420 }
421
422 if (own_backbone) {
423 batadv_bla_send_announce(bat_priv, entry);
424
425 /* this will be decreased in the worker thread */
426 atomic_inc(&entry->request_sent);
427 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
428 atomic_inc(&bat_priv->bla.num_requests);
429 }
430
431 return entry;
432 }
433
434 /* update or add the own backbone gw to make sure we announce
435 * where we receive other backbone gws
436 */
437 static void
438 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
439 struct batadv_hard_iface *primary_if,
440 unsigned short vid)
441 {
442 struct batadv_bla_backbone_gw *backbone_gw;
443
444 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
445 primary_if->net_dev->dev_addr,
446 vid, true);
447 if (unlikely(!backbone_gw))
448 return;
449
450 backbone_gw->lasttime = jiffies;
451 batadv_backbone_gw_free_ref(backbone_gw);
452 }
453
454 /**
455 * batadv_bla_answer_request - answer a bla request by sending own claims
456 * @bat_priv: the bat priv with all the soft interface information
457 * @vid: the vid where the request came on
458 *
459 * Repeat all of our own claims, and finally send an ANNOUNCE frame
460 * to allow the requester another check if the CRC is correct now.
461 */
462 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
463 struct batadv_hard_iface *primary_if,
464 unsigned short vid)
465 {
466 struct hlist_head *head;
467 struct batadv_hashtable *hash;
468 struct batadv_bla_claim *claim;
469 struct batadv_bla_backbone_gw *backbone_gw;
470 int i;
471
472 batadv_dbg(BATADV_DBG_BLA, bat_priv,
473 "bla_answer_request(): received a claim request, send all of our own claims again\n");
474
475 backbone_gw = batadv_backbone_hash_find(bat_priv,
476 primary_if->net_dev->dev_addr,
477 vid);
478 if (!backbone_gw)
479 return;
480
481 hash = bat_priv->bla.claim_hash;
482 for (i = 0; i < hash->size; i++) {
483 head = &hash->table[i];
484
485 rcu_read_lock();
486 hlist_for_each_entry_rcu(claim, head, hash_entry) {
487 /* only own claims are interesting */
488 if (claim->backbone_gw != backbone_gw)
489 continue;
490
491 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
492 BATADV_CLAIM_TYPE_CLAIM);
493 }
494 rcu_read_unlock();
495 }
496
497 /* finally, send an announcement frame */
498 batadv_bla_send_announce(bat_priv, backbone_gw);
499 batadv_backbone_gw_free_ref(backbone_gw);
500 }
501
502 /**
503 * batadv_bla_send_request - send a request to repeat claims
504 * @backbone_gw: the backbone gateway from whom we are out of sync
505 *
506 * When the crc is wrong, ask the backbone gateway for a full table update.
507 * After the request, it will repeat all of his own claims and finally
508 * send an announcement claim with which we can check again.
509 */
510 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
511 {
512 /* first, remove all old entries */
513 batadv_bla_del_backbone_claims(backbone_gw);
514
515 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
516 "Sending REQUEST to %pM\n", backbone_gw->orig);
517
518 /* send request */
519 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
520 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
521
522 /* no local broadcasts should be sent or received, for now. */
523 if (!atomic_read(&backbone_gw->request_sent)) {
524 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
525 atomic_set(&backbone_gw->request_sent, 1);
526 }
527 }
528
529 /**
530 * batadv_bla_send_announce
531 * @bat_priv: the bat priv with all the soft interface information
532 * @backbone_gw: our backbone gateway which should be announced
533 *
534 * This function sends an announcement. It is called from multiple
535 * places.
536 */
537 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
538 struct batadv_bla_backbone_gw *backbone_gw)
539 {
540 uint8_t mac[ETH_ALEN];
541 __be16 crc;
542
543 memcpy(mac, batadv_announce_mac, 4);
544 crc = htons(backbone_gw->crc);
545 memcpy(&mac[4], &crc, 2);
546
547 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
548 BATADV_CLAIM_TYPE_ANNOUNCE);
549 }
550
551 /**
552 * batadv_bla_add_claim - Adds a claim in the claim hash
553 * @bat_priv: the bat priv with all the soft interface information
554 * @mac: the mac address of the claim
555 * @vid: the VLAN ID of the frame
556 * @backbone_gw: the backbone gateway which claims it
557 */
558 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
559 const uint8_t *mac, const unsigned short vid,
560 struct batadv_bla_backbone_gw *backbone_gw)
561 {
562 struct batadv_bla_claim *claim;
563 struct batadv_bla_claim search_claim;
564 int hash_added;
565
566 ether_addr_copy(search_claim.addr, mac);
567 search_claim.vid = vid;
568 claim = batadv_claim_hash_find(bat_priv, &search_claim);
569
570 /* create a new claim entry if it does not exist yet. */
571 if (!claim) {
572 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
573 if (!claim)
574 return;
575
576 ether_addr_copy(claim->addr, mac);
577 claim->vid = vid;
578 claim->lasttime = jiffies;
579 claim->backbone_gw = backbone_gw;
580
581 atomic_set(&claim->refcount, 2);
582 batadv_dbg(BATADV_DBG_BLA, bat_priv,
583 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
584 mac, BATADV_PRINT_VID(vid));
585 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
586 batadv_compare_claim,
587 batadv_choose_claim, claim,
588 &claim->hash_entry);
589
590 if (unlikely(hash_added != 0)) {
591 /* only local changes happened. */
592 kfree(claim);
593 return;
594 }
595 } else {
596 claim->lasttime = jiffies;
597 if (claim->backbone_gw == backbone_gw)
598 /* no need to register a new backbone */
599 goto claim_free_ref;
600
601 batadv_dbg(BATADV_DBG_BLA, bat_priv,
602 "bla_add_claim(): changing ownership for %pM, vid %d\n",
603 mac, BATADV_PRINT_VID(vid));
604
605 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
606 batadv_backbone_gw_free_ref(claim->backbone_gw);
607 }
608 /* set (new) backbone gw */
609 atomic_inc(&backbone_gw->refcount);
610 claim->backbone_gw = backbone_gw;
611
612 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
613 backbone_gw->lasttime = jiffies;
614
615 claim_free_ref:
616 batadv_claim_free_ref(claim);
617 }
618
619 /* Delete a claim from the claim hash which has the
620 * given mac address and vid.
621 */
622 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
623 const uint8_t *mac, const unsigned short vid)
624 {
625 struct batadv_bla_claim search_claim, *claim;
626
627 ether_addr_copy(search_claim.addr, mac);
628 search_claim.vid = vid;
629 claim = batadv_claim_hash_find(bat_priv, &search_claim);
630 if (!claim)
631 return;
632
633 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
634 mac, BATADV_PRINT_VID(vid));
635
636 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
637 batadv_choose_claim, claim);
638 batadv_claim_free_ref(claim); /* reference from the hash is gone */
639
640 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
641
642 /* don't need the reference from hash_find() anymore */
643 batadv_claim_free_ref(claim);
644 }
645
646 /* check for ANNOUNCE frame, return 1 if handled */
647 static int batadv_handle_announce(struct batadv_priv *bat_priv,
648 uint8_t *an_addr, uint8_t *backbone_addr,
649 unsigned short vid)
650 {
651 struct batadv_bla_backbone_gw *backbone_gw;
652 uint16_t crc;
653
654 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
655 return 0;
656
657 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
658 false);
659
660 if (unlikely(!backbone_gw))
661 return 1;
662
663
664 /* handle as ANNOUNCE frame */
665 backbone_gw->lasttime = jiffies;
666 crc = ntohs(*((__be16 *)(&an_addr[4])));
667
668 batadv_dbg(BATADV_DBG_BLA, bat_priv,
669 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
670 BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
671
672 if (backbone_gw->crc != crc) {
673 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
674 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
675 backbone_gw->orig,
676 BATADV_PRINT_VID(backbone_gw->vid),
677 backbone_gw->crc, crc);
678
679 batadv_bla_send_request(backbone_gw);
680 } else {
681 /* if we have sent a request and the crc was OK,
682 * we can allow traffic again.
683 */
684 if (atomic_read(&backbone_gw->request_sent)) {
685 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
686 atomic_set(&backbone_gw->request_sent, 0);
687 }
688 }
689
690 batadv_backbone_gw_free_ref(backbone_gw);
691 return 1;
692 }
693
694 /* check for REQUEST frame, return 1 if handled */
695 static int batadv_handle_request(struct batadv_priv *bat_priv,
696 struct batadv_hard_iface *primary_if,
697 uint8_t *backbone_addr,
698 struct ethhdr *ethhdr, unsigned short vid)
699 {
700 /* check for REQUEST frame */
701 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
702 return 0;
703
704 /* sanity check, this should not happen on a normal switch,
705 * we ignore it in this case.
706 */
707 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
708 return 1;
709
710 batadv_dbg(BATADV_DBG_BLA, bat_priv,
711 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
712 BATADV_PRINT_VID(vid), ethhdr->h_source);
713
714 batadv_bla_answer_request(bat_priv, primary_if, vid);
715 return 1;
716 }
717
718 /* check for UNCLAIM frame, return 1 if handled */
719 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
720 struct batadv_hard_iface *primary_if,
721 uint8_t *backbone_addr,
722 uint8_t *claim_addr, unsigned short vid)
723 {
724 struct batadv_bla_backbone_gw *backbone_gw;
725
726 /* unclaim in any case if it is our own */
727 if (primary_if && batadv_compare_eth(backbone_addr,
728 primary_if->net_dev->dev_addr))
729 batadv_bla_send_claim(bat_priv, claim_addr, vid,
730 BATADV_CLAIM_TYPE_UNCLAIM);
731
732 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
733
734 if (!backbone_gw)
735 return 1;
736
737 /* this must be an UNCLAIM frame */
738 batadv_dbg(BATADV_DBG_BLA, bat_priv,
739 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
740 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
741
742 batadv_bla_del_claim(bat_priv, claim_addr, vid);
743 batadv_backbone_gw_free_ref(backbone_gw);
744 return 1;
745 }
746
747 /* check for CLAIM frame, return 1 if handled */
748 static int batadv_handle_claim(struct batadv_priv *bat_priv,
749 struct batadv_hard_iface *primary_if,
750 uint8_t *backbone_addr, uint8_t *claim_addr,
751 unsigned short vid)
752 {
753 struct batadv_bla_backbone_gw *backbone_gw;
754
755 /* register the gateway if not yet available, and add the claim. */
756
757 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
758 false);
759
760 if (unlikely(!backbone_gw))
761 return 1;
762
763 /* this must be a CLAIM frame */
764 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
765 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
766 batadv_bla_send_claim(bat_priv, claim_addr, vid,
767 BATADV_CLAIM_TYPE_CLAIM);
768
769 /* TODO: we could call something like tt_local_del() here. */
770
771 batadv_backbone_gw_free_ref(backbone_gw);
772 return 1;
773 }
774
775 /**
776 * batadv_check_claim_group
777 * @bat_priv: the bat priv with all the soft interface information
778 * @hw_src: the Hardware source in the ARP Header
779 * @hw_dst: the Hardware destination in the ARP Header
780 * @ethhdr: pointer to the Ethernet header of the claim frame
781 *
782 * checks if it is a claim packet and if its on the same group.
783 * This function also applies the group ID of the sender
784 * if it is in the same mesh.
785 *
786 * returns:
787 * 2 - if it is a claim packet and on the same group
788 * 1 - if is a claim packet from another group
789 * 0 - if it is not a claim packet
790 */
791 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
792 struct batadv_hard_iface *primary_if,
793 uint8_t *hw_src, uint8_t *hw_dst,
794 struct ethhdr *ethhdr)
795 {
796 uint8_t *backbone_addr;
797 struct batadv_orig_node *orig_node;
798 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
799
800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
801 bla_dst_own = &bat_priv->bla.claim_dest;
802
803 /* if announcement packet, use the source,
804 * otherwise assume it is in the hw_src
805 */
806 switch (bla_dst->type) {
807 case BATADV_CLAIM_TYPE_CLAIM:
808 backbone_addr = hw_src;
809 break;
810 case BATADV_CLAIM_TYPE_REQUEST:
811 case BATADV_CLAIM_TYPE_ANNOUNCE:
812 case BATADV_CLAIM_TYPE_UNCLAIM:
813 backbone_addr = ethhdr->h_source;
814 break;
815 default:
816 return 0;
817 }
818
819 /* don't accept claim frames from ourselves */
820 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
821 return 0;
822
823 /* if its already the same group, it is fine. */
824 if (bla_dst->group == bla_dst_own->group)
825 return 2;
826
827 /* lets see if this originator is in our mesh */
828 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
829
830 /* dont accept claims from gateways which are not in
831 * the same mesh or group.
832 */
833 if (!orig_node)
834 return 1;
835
836 /* if our mesh friends mac is bigger, use it for ourselves. */
837 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
838 batadv_dbg(BATADV_DBG_BLA, bat_priv,
839 "taking other backbones claim group: %#.4x\n",
840 ntohs(bla_dst->group));
841 bla_dst_own->group = bla_dst->group;
842 }
843
844 batadv_orig_node_free_ref(orig_node);
845
846 return 2;
847 }
848
849
850 /**
851 * batadv_bla_process_claim
852 * @bat_priv: the bat priv with all the soft interface information
853 * @skb: the frame to be checked
854 *
855 * Check if this is a claim frame, and process it accordingly.
856 *
857 * returns 1 if it was a claim frame, otherwise return 0 to
858 * tell the callee that it can use the frame on its own.
859 */
860 static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
861 struct batadv_hard_iface *primary_if,
862 struct sk_buff *skb)
863 {
864 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
865 uint8_t *hw_src, *hw_dst;
866 struct vlan_hdr *vhdr, vhdr_buf;
867 struct ethhdr *ethhdr;
868 struct arphdr *arphdr;
869 unsigned short vid;
870 int vlan_depth = 0;
871 __be16 proto;
872 int headlen;
873 int ret;
874
875 vid = batadv_get_vid(skb, 0);
876 ethhdr = eth_hdr(skb);
877
878 proto = ethhdr->h_proto;
879 headlen = ETH_HLEN;
880 if (vid & BATADV_VLAN_HAS_TAG) {
881 /* Traverse the VLAN/Ethertypes.
882 *
883 * At this point it is known that the first protocol is a VLAN
884 * header, so start checking at the encapsulated protocol.
885 *
886 * The depth of the VLAN headers is recorded to drop BLA claim
887 * frames encapsulated into multiple VLAN headers (QinQ).
888 */
889 do {
890 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
891 &vhdr_buf);
892 if (!vhdr)
893 return 0;
894
895 proto = vhdr->h_vlan_encapsulated_proto;
896 headlen += VLAN_HLEN;
897 vlan_depth++;
898 } while (proto == htons(ETH_P_8021Q));
899 }
900
901 if (proto != htons(ETH_P_ARP))
902 return 0; /* not a claim frame */
903
904 /* this must be a ARP frame. check if it is a claim. */
905
906 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
907 return 0;
908
909 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
910 ethhdr = eth_hdr(skb);
911 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
912
913 /* Check whether the ARP frame carries a valid
914 * IP information
915 */
916 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
917 return 0;
918 if (arphdr->ar_pro != htons(ETH_P_IP))
919 return 0;
920 if (arphdr->ar_hln != ETH_ALEN)
921 return 0;
922 if (arphdr->ar_pln != 4)
923 return 0;
924
925 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
926 hw_dst = hw_src + ETH_ALEN + 4;
927 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
928 bla_dst_own = &bat_priv->bla.claim_dest;
929
930 /* check if it is a claim frame in general */
931 if (memcmp(bla_dst->magic, bla_dst_own->magic,
932 sizeof(bla_dst->magic)) != 0)
933 return 0;
934
935 /* check if there is a claim frame encapsulated deeper in (QinQ) and
936 * drop that, as this is not supported by BLA but should also not be
937 * sent via the mesh.
938 */
939 if (vlan_depth > 1)
940 return 1;
941
942 /* check if it is a claim frame. */
943 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
944 ethhdr);
945 if (ret == 1)
946 batadv_dbg(BATADV_DBG_BLA, bat_priv,
947 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
948 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
949 hw_dst);
950
951 if (ret < 2)
952 return ret;
953
954 /* become a backbone gw ourselves on this vlan if not happened yet */
955 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
956
957 /* check for the different types of claim frames ... */
958 switch (bla_dst->type) {
959 case BATADV_CLAIM_TYPE_CLAIM:
960 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
961 ethhdr->h_source, vid))
962 return 1;
963 break;
964 case BATADV_CLAIM_TYPE_UNCLAIM:
965 if (batadv_handle_unclaim(bat_priv, primary_if,
966 ethhdr->h_source, hw_src, vid))
967 return 1;
968 break;
969
970 case BATADV_CLAIM_TYPE_ANNOUNCE:
971 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
972 vid))
973 return 1;
974 break;
975 case BATADV_CLAIM_TYPE_REQUEST:
976 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
977 vid))
978 return 1;
979 break;
980 }
981
982 batadv_dbg(BATADV_DBG_BLA, bat_priv,
983 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
984 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
985 return 1;
986 }
987
988 /* Check when we last heard from other nodes, and remove them in case of
989 * a time out, or clean all backbone gws if now is set.
990 */
991 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
992 {
993 struct batadv_bla_backbone_gw *backbone_gw;
994 struct hlist_node *node_tmp;
995 struct hlist_head *head;
996 struct batadv_hashtable *hash;
997 spinlock_t *list_lock; /* protects write access to the hash lists */
998 int i;
999
1000 hash = bat_priv->bla.backbone_hash;
1001 if (!hash)
1002 return;
1003
1004 for (i = 0; i < hash->size; i++) {
1005 head = &hash->table[i];
1006 list_lock = &hash->list_locks[i];
1007
1008 spin_lock_bh(list_lock);
1009 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1010 head, hash_entry) {
1011 if (now)
1012 goto purge_now;
1013 if (!batadv_has_timed_out(backbone_gw->lasttime,
1014 BATADV_BLA_BACKBONE_TIMEOUT))
1015 continue;
1016
1017 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1018 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
1019 backbone_gw->orig);
1020
1021 purge_now:
1022 /* don't wait for the pending request anymore */
1023 if (atomic_read(&backbone_gw->request_sent))
1024 atomic_dec(&bat_priv->bla.num_requests);
1025
1026 batadv_bla_del_backbone_claims(backbone_gw);
1027
1028 hlist_del_rcu(&backbone_gw->hash_entry);
1029 batadv_backbone_gw_free_ref(backbone_gw);
1030 }
1031 spin_unlock_bh(list_lock);
1032 }
1033 }
1034
1035 /**
1036 * batadv_bla_purge_claims
1037 * @bat_priv: the bat priv with all the soft interface information
1038 * @primary_if: the selected primary interface, may be NULL if now is set
1039 * @now: whether the whole hash shall be wiped now
1040 *
1041 * Check when we heard last time from our own claims, and remove them in case of
1042 * a time out, or clean all claims if now is set
1043 */
1044 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1045 struct batadv_hard_iface *primary_if,
1046 int now)
1047 {
1048 struct batadv_bla_claim *claim;
1049 struct hlist_head *head;
1050 struct batadv_hashtable *hash;
1051 int i;
1052
1053 hash = bat_priv->bla.claim_hash;
1054 if (!hash)
1055 return;
1056
1057 for (i = 0; i < hash->size; i++) {
1058 head = &hash->table[i];
1059
1060 rcu_read_lock();
1061 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1062 if (now)
1063 goto purge_now;
1064 if (!batadv_compare_eth(claim->backbone_gw->orig,
1065 primary_if->net_dev->dev_addr))
1066 continue;
1067 if (!batadv_has_timed_out(claim->lasttime,
1068 BATADV_BLA_CLAIM_TIMEOUT))
1069 continue;
1070
1071 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1072 "bla_purge_claims(): %pM, vid %d, time out\n",
1073 claim->addr, claim->vid);
1074
1075 purge_now:
1076 batadv_handle_unclaim(bat_priv, primary_if,
1077 claim->backbone_gw->orig,
1078 claim->addr, claim->vid);
1079 }
1080 rcu_read_unlock();
1081 }
1082 }
1083
1084 /**
1085 * batadv_bla_update_orig_address
1086 * @bat_priv: the bat priv with all the soft interface information
1087 * @primary_if: the new selected primary_if
1088 * @oldif: the old primary interface, may be NULL
1089 *
1090 * Update the backbone gateways when the own orig address changes.
1091 */
1092 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1093 struct batadv_hard_iface *primary_if,
1094 struct batadv_hard_iface *oldif)
1095 {
1096 struct batadv_bla_backbone_gw *backbone_gw;
1097 struct hlist_head *head;
1098 struct batadv_hashtable *hash;
1099 __be16 group;
1100 int i;
1101
1102 /* reset bridge loop avoidance group id */
1103 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1104 bat_priv->bla.claim_dest.group = group;
1105
1106 /* purge everything when bridge loop avoidance is turned off */
1107 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1108 oldif = NULL;
1109
1110 if (!oldif) {
1111 batadv_bla_purge_claims(bat_priv, NULL, 1);
1112 batadv_bla_purge_backbone_gw(bat_priv, 1);
1113 return;
1114 }
1115
1116 hash = bat_priv->bla.backbone_hash;
1117 if (!hash)
1118 return;
1119
1120 for (i = 0; i < hash->size; i++) {
1121 head = &hash->table[i];
1122
1123 rcu_read_lock();
1124 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1125 /* own orig still holds the old value. */
1126 if (!batadv_compare_eth(backbone_gw->orig,
1127 oldif->net_dev->dev_addr))
1128 continue;
1129
1130 ether_addr_copy(backbone_gw->orig,
1131 primary_if->net_dev->dev_addr);
1132 /* send an announce frame so others will ask for our
1133 * claims and update their tables.
1134 */
1135 batadv_bla_send_announce(bat_priv, backbone_gw);
1136 }
1137 rcu_read_unlock();
1138 }
1139 }
1140
1141 /* periodic work to do:
1142 * * purge structures when they are too old
1143 * * send announcements
1144 */
1145 static void batadv_bla_periodic_work(struct work_struct *work)
1146 {
1147 struct delayed_work *delayed_work;
1148 struct batadv_priv *bat_priv;
1149 struct batadv_priv_bla *priv_bla;
1150 struct hlist_head *head;
1151 struct batadv_bla_backbone_gw *backbone_gw;
1152 struct batadv_hashtable *hash;
1153 struct batadv_hard_iface *primary_if;
1154 int i;
1155
1156 delayed_work = container_of(work, struct delayed_work, work);
1157 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1158 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1159 primary_if = batadv_primary_if_get_selected(bat_priv);
1160 if (!primary_if)
1161 goto out;
1162
1163 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1164 batadv_bla_purge_backbone_gw(bat_priv, 0);
1165
1166 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1167 goto out;
1168
1169 hash = bat_priv->bla.backbone_hash;
1170 if (!hash)
1171 goto out;
1172
1173 for (i = 0; i < hash->size; i++) {
1174 head = &hash->table[i];
1175
1176 rcu_read_lock();
1177 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1178 if (!batadv_compare_eth(backbone_gw->orig,
1179 primary_if->net_dev->dev_addr))
1180 continue;
1181
1182 backbone_gw->lasttime = jiffies;
1183
1184 batadv_bla_send_announce(bat_priv, backbone_gw);
1185
1186 /* request_sent is only set after creation to avoid
1187 * problems when we are not yet known as backbone gw
1188 * in the backbone.
1189 *
1190 * We can reset this now after we waited some periods
1191 * to give bridge forward delays and bla group forming
1192 * some grace time.
1193 */
1194
1195 if (atomic_read(&backbone_gw->request_sent) == 0)
1196 continue;
1197
1198 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1199 continue;
1200
1201 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1202 atomic_set(&backbone_gw->request_sent, 0);
1203 }
1204 rcu_read_unlock();
1205 }
1206 out:
1207 if (primary_if)
1208 batadv_hardif_free_ref(primary_if);
1209
1210 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1211 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1212 }
1213
1214 /* The hash for claim and backbone hash receive the same key because they
1215 * are getting initialized by hash_new with the same key. Reinitializing
1216 * them with to different keys to allow nested locking without generating
1217 * lockdep warnings
1218 */
1219 static struct lock_class_key batadv_claim_hash_lock_class_key;
1220 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1221
1222 /* initialize all bla structures */
1223 int batadv_bla_init(struct batadv_priv *bat_priv)
1224 {
1225 int i;
1226 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1227 struct batadv_hard_iface *primary_if;
1228 uint16_t crc;
1229 unsigned long entrytime;
1230
1231 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1232
1233 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1234
1235 /* setting claim destination address */
1236 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1237 bat_priv->bla.claim_dest.type = 0;
1238 primary_if = batadv_primary_if_get_selected(bat_priv);
1239 if (primary_if) {
1240 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1241 bat_priv->bla.claim_dest.group = htons(crc);
1242 batadv_hardif_free_ref(primary_if);
1243 } else {
1244 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1245 }
1246
1247 /* initialize the duplicate list */
1248 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1249 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1250 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1251 bat_priv->bla.bcast_duplist_curr = 0;
1252
1253 if (bat_priv->bla.claim_hash)
1254 return 0;
1255
1256 bat_priv->bla.claim_hash = batadv_hash_new(128);
1257 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1258
1259 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1260 return -ENOMEM;
1261
1262 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1263 &batadv_claim_hash_lock_class_key);
1264 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1265 &batadv_backbone_hash_lock_class_key);
1266
1267 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1268
1269 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1270
1271 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1272 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1273 return 0;
1274 }
1275
1276 /**
1277 * batadv_bla_check_bcast_duplist
1278 * @bat_priv: the bat priv with all the soft interface information
1279 * @skb: contains the bcast_packet to be checked
1280 *
1281 * check if it is on our broadcast list. Another gateway might
1282 * have sent the same packet because it is connected to the same backbone,
1283 * so we have to remove this duplicate.
1284 *
1285 * This is performed by checking the CRC, which will tell us
1286 * with a good chance that it is the same packet. If it is furthermore
1287 * sent by another host, drop it. We allow equal packets from
1288 * the same host however as this might be intended.
1289 */
1290 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1291 struct sk_buff *skb)
1292 {
1293 int i, curr, ret = 0;
1294 __be32 crc;
1295 struct batadv_bcast_packet *bcast_packet;
1296 struct batadv_bcast_duplist_entry *entry;
1297
1298 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1299
1300 /* calculate the crc ... */
1301 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1302
1303 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1304
1305 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1306 curr = (bat_priv->bla.bcast_duplist_curr + i);
1307 curr %= BATADV_DUPLIST_SIZE;
1308 entry = &bat_priv->bla.bcast_duplist[curr];
1309
1310 /* we can stop searching if the entry is too old ;
1311 * later entries will be even older
1312 */
1313 if (batadv_has_timed_out(entry->entrytime,
1314 BATADV_DUPLIST_TIMEOUT))
1315 break;
1316
1317 if (entry->crc != crc)
1318 continue;
1319
1320 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1321 continue;
1322
1323 /* this entry seems to match: same crc, not too old,
1324 * and from another gw. therefore return 1 to forbid it.
1325 */
1326 ret = 1;
1327 goto out;
1328 }
1329 /* not found, add a new entry (overwrite the oldest entry)
1330 * and allow it, its the first occurence.
1331 */
1332 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1333 curr %= BATADV_DUPLIST_SIZE;
1334 entry = &bat_priv->bla.bcast_duplist[curr];
1335 entry->crc = crc;
1336 entry->entrytime = jiffies;
1337 ether_addr_copy(entry->orig, bcast_packet->orig);
1338 bat_priv->bla.bcast_duplist_curr = curr;
1339
1340 out:
1341 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1342
1343 return ret;
1344 }
1345
1346
1347
1348 /**
1349 * batadv_bla_is_backbone_gw_orig
1350 * @bat_priv: the bat priv with all the soft interface information
1351 * @orig: originator mac address
1352 * @vid: VLAN identifier
1353 *
1354 * Check if the originator is a gateway for the VLAN identified by vid.
1355 *
1356 * Returns true if orig is a backbone for this vid, false otherwise.
1357 */
1358 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
1359 unsigned short vid)
1360 {
1361 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1362 struct hlist_head *head;
1363 struct batadv_bla_backbone_gw *backbone_gw;
1364 int i;
1365
1366 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1367 return false;
1368
1369 if (!hash)
1370 return false;
1371
1372 for (i = 0; i < hash->size; i++) {
1373 head = &hash->table[i];
1374
1375 rcu_read_lock();
1376 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1377 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1378 backbone_gw->vid == vid) {
1379 rcu_read_unlock();
1380 return true;
1381 }
1382 }
1383 rcu_read_unlock();
1384 }
1385
1386 return false;
1387 }
1388
1389
1390 /**
1391 * batadv_bla_is_backbone_gw
1392 * @skb: the frame to be checked
1393 * @orig_node: the orig_node of the frame
1394 * @hdr_size: maximum length of the frame
1395 *
1396 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1397 * if the orig_node is also a gateway on the soft interface, otherwise it
1398 * returns 0.
1399 */
1400 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1401 struct batadv_orig_node *orig_node, int hdr_size)
1402 {
1403 struct batadv_bla_backbone_gw *backbone_gw;
1404 unsigned short vid;
1405
1406 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1407 return 0;
1408
1409 /* first, find out the vid. */
1410 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1411 return 0;
1412
1413 vid = batadv_get_vid(skb, hdr_size);
1414
1415 /* see if this originator is a backbone gw for this VLAN */
1416 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1417 orig_node->orig, vid);
1418 if (!backbone_gw)
1419 return 0;
1420
1421 batadv_backbone_gw_free_ref(backbone_gw);
1422 return 1;
1423 }
1424
1425 /* free all bla structures (for softinterface free or module unload) */
1426 void batadv_bla_free(struct batadv_priv *bat_priv)
1427 {
1428 struct batadv_hard_iface *primary_if;
1429
1430 cancel_delayed_work_sync(&bat_priv->bla.work);
1431 primary_if = batadv_primary_if_get_selected(bat_priv);
1432
1433 if (bat_priv->bla.claim_hash) {
1434 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1435 batadv_hash_destroy(bat_priv->bla.claim_hash);
1436 bat_priv->bla.claim_hash = NULL;
1437 }
1438 if (bat_priv->bla.backbone_hash) {
1439 batadv_bla_purge_backbone_gw(bat_priv, 1);
1440 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1441 bat_priv->bla.backbone_hash = NULL;
1442 }
1443 if (primary_if)
1444 batadv_hardif_free_ref(primary_if);
1445 }
1446
1447 /**
1448 * batadv_bla_rx
1449 * @bat_priv: the bat priv with all the soft interface information
1450 * @skb: the frame to be checked
1451 * @vid: the VLAN ID of the frame
1452 * @is_bcast: the packet came in a broadcast packet type.
1453 *
1454 * bla_rx avoidance checks if:
1455 * * we have to race for a claim
1456 * * if the frame is allowed on the LAN
1457 *
1458 * in these cases, the skb is further handled by this function and
1459 * returns 1, otherwise it returns 0 and the caller shall further
1460 * process the skb.
1461 */
1462 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1463 unsigned short vid, bool is_bcast)
1464 {
1465 struct ethhdr *ethhdr;
1466 struct batadv_bla_claim search_claim, *claim = NULL;
1467 struct batadv_hard_iface *primary_if;
1468 int ret;
1469
1470 ethhdr = eth_hdr(skb);
1471
1472 primary_if = batadv_primary_if_get_selected(bat_priv);
1473 if (!primary_if)
1474 goto handled;
1475
1476 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1477 goto allow;
1478
1479
1480 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1481 /* don't allow broadcasts while requests are in flight */
1482 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1483 goto handled;
1484
1485 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1486 search_claim.vid = vid;
1487 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1488
1489 if (!claim) {
1490 /* possible optimization: race for a claim */
1491 /* No claim exists yet, claim it for us!
1492 */
1493 batadv_handle_claim(bat_priv, primary_if,
1494 primary_if->net_dev->dev_addr,
1495 ethhdr->h_source, vid);
1496 goto allow;
1497 }
1498
1499 /* if it is our own claim ... */
1500 if (batadv_compare_eth(claim->backbone_gw->orig,
1501 primary_if->net_dev->dev_addr)) {
1502 /* ... allow it in any case */
1503 claim->lasttime = jiffies;
1504 goto allow;
1505 }
1506
1507 /* if it is a broadcast ... */
1508 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1509 /* ... drop it. the responsible gateway is in charge.
1510 *
1511 * We need to check is_bcast because with the gateway
1512 * feature, broadcasts (like DHCP requests) may be sent
1513 * using a unicast packet type.
1514 */
1515 goto handled;
1516 } else {
1517 /* seems the client considers us as its best gateway.
1518 * send a claim and update the claim table
1519 * immediately.
1520 */
1521 batadv_handle_claim(bat_priv, primary_if,
1522 primary_if->net_dev->dev_addr,
1523 ethhdr->h_source, vid);
1524 goto allow;
1525 }
1526 allow:
1527 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1528 ret = 0;
1529 goto out;
1530
1531 handled:
1532 kfree_skb(skb);
1533 ret = 1;
1534
1535 out:
1536 if (primary_if)
1537 batadv_hardif_free_ref(primary_if);
1538 if (claim)
1539 batadv_claim_free_ref(claim);
1540 return ret;
1541 }
1542
1543 /**
1544 * batadv_bla_tx
1545 * @bat_priv: the bat priv with all the soft interface information
1546 * @skb: the frame to be checked
1547 * @vid: the VLAN ID of the frame
1548 *
1549 * bla_tx checks if:
1550 * * a claim was received which has to be processed
1551 * * the frame is allowed on the mesh
1552 *
1553 * in these cases, the skb is further handled by this function and
1554 * returns 1, otherwise it returns 0 and the caller shall further
1555 * process the skb.
1556 *
1557 * This call might reallocate skb data.
1558 */
1559 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1560 unsigned short vid)
1561 {
1562 struct ethhdr *ethhdr;
1563 struct batadv_bla_claim search_claim, *claim = NULL;
1564 struct batadv_hard_iface *primary_if;
1565 int ret = 0;
1566
1567 primary_if = batadv_primary_if_get_selected(bat_priv);
1568 if (!primary_if)
1569 goto out;
1570
1571 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1572 goto allow;
1573
1574 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1575 goto handled;
1576
1577 ethhdr = eth_hdr(skb);
1578
1579 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1580 /* don't allow broadcasts while requests are in flight */
1581 if (is_multicast_ether_addr(ethhdr->h_dest))
1582 goto handled;
1583
1584 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1585 search_claim.vid = vid;
1586
1587 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1588
1589 /* if no claim exists, allow it. */
1590 if (!claim)
1591 goto allow;
1592
1593 /* check if we are responsible. */
1594 if (batadv_compare_eth(claim->backbone_gw->orig,
1595 primary_if->net_dev->dev_addr)) {
1596 /* if yes, the client has roamed and we have
1597 * to unclaim it.
1598 */
1599 batadv_handle_unclaim(bat_priv, primary_if,
1600 primary_if->net_dev->dev_addr,
1601 ethhdr->h_source, vid);
1602 goto allow;
1603 }
1604
1605 /* check if it is a multicast/broadcast frame */
1606 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1607 /* drop it. the responsible gateway has forwarded it into
1608 * the backbone network.
1609 */
1610 goto handled;
1611 } else {
1612 /* we must allow it. at least if we are
1613 * responsible for the DESTINATION.
1614 */
1615 goto allow;
1616 }
1617 allow:
1618 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1619 ret = 0;
1620 goto out;
1621 handled:
1622 ret = 1;
1623 out:
1624 if (primary_if)
1625 batadv_hardif_free_ref(primary_if);
1626 if (claim)
1627 batadv_claim_free_ref(claim);
1628 return ret;
1629 }
1630
1631 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1632 {
1633 struct net_device *net_dev = (struct net_device *)seq->private;
1634 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1635 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1636 struct batadv_bla_claim *claim;
1637 struct batadv_hard_iface *primary_if;
1638 struct hlist_head *head;
1639 uint32_t i;
1640 bool is_own;
1641 uint8_t *primary_addr;
1642
1643 primary_if = batadv_seq_print_text_primary_if_get(seq);
1644 if (!primary_if)
1645 goto out;
1646
1647 primary_addr = primary_if->net_dev->dev_addr;
1648 seq_printf(seq,
1649 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
1650 net_dev->name, primary_addr,
1651 ntohs(bat_priv->bla.claim_dest.group));
1652 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
1653 "Client", "VID", "Originator", "CRC");
1654 for (i = 0; i < hash->size; i++) {
1655 head = &hash->table[i];
1656
1657 rcu_read_lock();
1658 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1659 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1660 primary_addr);
1661 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
1662 claim->addr, BATADV_PRINT_VID(claim->vid),
1663 claim->backbone_gw->orig,
1664 (is_own ? 'x' : ' '),
1665 claim->backbone_gw->crc);
1666 }
1667 rcu_read_unlock();
1668 }
1669 out:
1670 if (primary_if)
1671 batadv_hardif_free_ref(primary_if);
1672 return 0;
1673 }
1674
1675 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1676 {
1677 struct net_device *net_dev = (struct net_device *)seq->private;
1678 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1679 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1680 struct batadv_bla_backbone_gw *backbone_gw;
1681 struct batadv_hard_iface *primary_if;
1682 struct hlist_head *head;
1683 int secs, msecs;
1684 uint32_t i;
1685 bool is_own;
1686 uint8_t *primary_addr;
1687
1688 primary_if = batadv_seq_print_text_primary_if_get(seq);
1689 if (!primary_if)
1690 goto out;
1691
1692 primary_addr = primary_if->net_dev->dev_addr;
1693 seq_printf(seq,
1694 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
1695 net_dev->name, primary_addr,
1696 ntohs(bat_priv->bla.claim_dest.group));
1697 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
1698 "Originator", "VID", "last seen", "CRC");
1699 for (i = 0; i < hash->size; i++) {
1700 head = &hash->table[i];
1701
1702 rcu_read_lock();
1703 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1704 msecs = jiffies_to_msecs(jiffies -
1705 backbone_gw->lasttime);
1706 secs = msecs / 1000;
1707 msecs = msecs % 1000;
1708
1709 is_own = batadv_compare_eth(backbone_gw->orig,
1710 primary_addr);
1711 if (is_own)
1712 continue;
1713
1714 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
1715 backbone_gw->orig,
1716 BATADV_PRINT_VID(backbone_gw->vid), secs,
1717 msecs, backbone_gw->crc);
1718 }
1719 rcu_read_unlock();
1720 }
1721 out:
1722 if (primary_if)
1723 batadv_hardif_free_ref(primary_if);
1724 return 0;
1725 }