]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/batman-adv/multicast.c
batman-adv: mcast: collect softif listeners from IP lists instead
[mirror_ubuntu-focal-kernel.git] / net / batman-adv / multicast.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2014-2019 B.A.T.M.A.N. contributors:
3 *
4 * Linus Lüssing
5 */
6
7 #include "multicast.h"
8 #include "main.h"
9
10 #include <linux/atomic.h>
11 #include <linux/bitops.h>
12 #include <linux/bug.h>
13 #include <linux/byteorder/generic.h>
14 #include <linux/errno.h>
15 #include <linux/etherdevice.h>
16 #include <linux/gfp.h>
17 #include <linux/icmpv6.h>
18 #include <linux/if_bridge.h>
19 #include <linux/if_ether.h>
20 #include <linux/igmp.h>
21 #include <linux/in.h>
22 #include <linux/in6.h>
23 #include <linux/inetdevice.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/jiffies.h>
27 #include <linux/kernel.h>
28 #include <linux/kref.h>
29 #include <linux/list.h>
30 #include <linux/lockdep.h>
31 #include <linux/netdevice.h>
32 #include <linux/netlink.h>
33 #include <linux/printk.h>
34 #include <linux/rculist.h>
35 #include <linux/rcupdate.h>
36 #include <linux/seq_file.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/stddef.h>
41 #include <linux/string.h>
42 #include <linux/types.h>
43 #include <linux/workqueue.h>
44 #include <net/addrconf.h>
45 #include <net/genetlink.h>
46 #include <net/if_inet6.h>
47 #include <net/ip.h>
48 #include <net/ipv6.h>
49 #include <net/netlink.h>
50 #include <net/sock.h>
51 #include <uapi/linux/batadv_packet.h>
52 #include <uapi/linux/batman_adv.h>
53
54 #include "hard-interface.h"
55 #include "hash.h"
56 #include "log.h"
57 #include "netlink.h"
58 #include "send.h"
59 #include "soft-interface.h"
60 #include "translation-table.h"
61 #include "tvlv.h"
62
63 static void batadv_mcast_mla_update(struct work_struct *work);
64
65 /**
66 * batadv_mcast_start_timer() - schedule the multicast periodic worker
67 * @bat_priv: the bat priv with all the soft interface information
68 */
69 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
70 {
71 queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
72 msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
73 }
74
75 /**
76 * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
77 * @bat_priv: the bat priv with all the soft interface information
78 *
79 * Checks whether there is a bridge on top of our soft interface.
80 *
81 * Return: true if there is a bridge, false otherwise.
82 */
83 static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
84 {
85 struct net_device *upper = bat_priv->soft_iface;
86
87 rcu_read_lock();
88 do {
89 upper = netdev_master_upper_dev_get_rcu(upper);
90 } while (upper && !(upper->priv_flags & IFF_EBRIDGE));
91 rcu_read_unlock();
92
93 return upper;
94 }
95
96 /**
97 * batadv_mcast_mla_flags_get() - get the new multicast flags
98 * @bat_priv: the bat priv with all the soft interface information
99 *
100 * Return: A set of flags for the current/next TVLV, querier and
101 * bridge state.
102 */
103 static struct batadv_mcast_mla_flags
104 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
105 {
106 struct net_device *dev = bat_priv->soft_iface;
107 struct batadv_mcast_querier_state *qr4, *qr6;
108 struct batadv_mcast_mla_flags mla_flags;
109
110 memset(&mla_flags, 0, sizeof(mla_flags));
111 mla_flags.enabled = 1;
112
113 if (!batadv_mcast_has_bridge(bat_priv))
114 return mla_flags;
115
116 mla_flags.bridged = 1;
117 qr4 = &mla_flags.querier_ipv4;
118 qr6 = &mla_flags.querier_ipv6;
119
120 if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
121 pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
122
123 qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
124 qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
125
126 qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
127 qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
128
129 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
130
131 /* 1) If no querier exists at all, then multicast listeners on
132 * our local TT clients behind the bridge will keep silent.
133 * 2) If the selected querier is on one of our local TT clients,
134 * behind the bridge, then this querier might shadow multicast
135 * listeners on our local TT clients, behind this bridge.
136 *
137 * In both cases, we will signalize other batman nodes that
138 * we need all multicast traffic of the according protocol.
139 */
140 if (!qr4->exists || qr4->shadowing)
141 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
142
143 if (!qr6->exists || qr6->shadowing)
144 mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
145
146 return mla_flags;
147 }
148
149 /**
150 * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
151 * @soft_iface: netdev struct of the mesh interface
152 *
153 * If the given soft interface has a bridge on top then the refcount
154 * of the according net device is increased.
155 *
156 * Return: NULL if no such bridge exists. Otherwise the net device of the
157 * bridge.
158 */
159 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
160 {
161 struct net_device *upper = soft_iface;
162
163 rcu_read_lock();
164 do {
165 upper = netdev_master_upper_dev_get_rcu(upper);
166 } while (upper && !(upper->priv_flags & IFF_EBRIDGE));
167
168 if (upper)
169 dev_hold(upper);
170 rcu_read_unlock();
171
172 return upper;
173 }
174
175 /**
176 * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
177 * @mcast_addr: the multicast address to check
178 * @mcast_list: the list with multicast addresses to search in
179 *
180 * Return: true if the given address is already in the given list.
181 * Otherwise returns false.
182 */
183 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
184 struct hlist_head *mcast_list)
185 {
186 struct batadv_hw_addr *mcast_entry;
187
188 hlist_for_each_entry(mcast_entry, mcast_list, list)
189 if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
190 return true;
191
192 return false;
193 }
194
195 /**
196 * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners
197 * @dev: the device to collect multicast addresses from
198 * @mcast_list: a list to put found addresses into
199 * @flags: flags indicating the new multicast state
200 *
201 * Collects multicast addresses of IPv4 multicast listeners residing
202 * on this kernel on the given soft interface, dev, in
203 * the given mcast_list. In general, multicast listeners provided by
204 * your multicast receiving applications run directly on this node.
205 *
206 * Return: -ENOMEM on memory allocation error or the number of
207 * items added to the mcast_list otherwise.
208 */
209 static int
210 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
211 struct hlist_head *mcast_list,
212 struct batadv_mcast_mla_flags *flags)
213 {
214 struct batadv_hw_addr *new;
215 struct in_device *in_dev;
216 u8 mcast_addr[ETH_ALEN];
217 struct ip_mc_list *pmc;
218 int ret = 0;
219
220 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
221 return 0;
222
223 rcu_read_lock();
224
225 in_dev = __in_dev_get_rcu(dev);
226 if (!in_dev) {
227 rcu_read_unlock();
228 return 0;
229 }
230
231 for (pmc = rcu_dereference(in_dev->mc_list); pmc;
232 pmc = rcu_dereference(pmc->next_rcu)) {
233 ip_eth_mc_map(pmc->multiaddr, mcast_addr);
234
235 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
236 continue;
237
238 new = kmalloc(sizeof(*new), GFP_ATOMIC);
239 if (!new) {
240 ret = -ENOMEM;
241 break;
242 }
243
244 ether_addr_copy(new->addr, mcast_addr);
245 hlist_add_head(&new->list, mcast_list);
246 ret++;
247 }
248 rcu_read_unlock();
249
250 return ret;
251 }
252
253 /**
254 * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners
255 * @dev: the device to collect multicast addresses from
256 * @mcast_list: a list to put found addresses into
257 * @flags: flags indicating the new multicast state
258 *
259 * Collects multicast addresses of IPv6 multicast listeners residing
260 * on this kernel on the given soft interface, dev, in
261 * the given mcast_list. In general, multicast listeners provided by
262 * your multicast receiving applications run directly on this node.
263 *
264 * Return: -ENOMEM on memory allocation error or the number of
265 * items added to the mcast_list otherwise.
266 */
267 #if IS_ENABLED(CONFIG_IPV6)
268 static int
269 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
270 struct hlist_head *mcast_list,
271 struct batadv_mcast_mla_flags *flags)
272 {
273 struct batadv_hw_addr *new;
274 struct inet6_dev *in6_dev;
275 u8 mcast_addr[ETH_ALEN];
276 struct ifmcaddr6 *pmc6;
277 int ret = 0;
278
279 if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
280 return 0;
281
282 rcu_read_lock();
283
284 in6_dev = __in6_dev_get(dev);
285 if (!in6_dev) {
286 rcu_read_unlock();
287 return 0;
288 }
289
290 read_lock_bh(&in6_dev->lock);
291 for (pmc6 = in6_dev->mc_list; pmc6; pmc6 = pmc6->next) {
292 if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
293 IPV6_ADDR_SCOPE_LINKLOCAL)
294 continue;
295
296 ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
297
298 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
299 continue;
300
301 new = kmalloc(sizeof(*new), GFP_ATOMIC);
302 if (!new) {
303 ret = -ENOMEM;
304 break;
305 }
306
307 ether_addr_copy(new->addr, mcast_addr);
308 hlist_add_head(&new->list, mcast_list);
309 ret++;
310 }
311 read_unlock_bh(&in6_dev->lock);
312 rcu_read_unlock();
313
314 return ret;
315 }
316 #else
317 static inline int
318 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
319 struct hlist_head *mcast_list,
320 struct batadv_mcast_mla_flags *flags)
321 {
322 return 0;
323 }
324 #endif
325
326 /**
327 * batadv_mcast_mla_softif_get() - get softif multicast listeners
328 * @dev: the device to collect multicast addresses from
329 * @mcast_list: a list to put found addresses into
330 * @flags: flags indicating the new multicast state
331 *
332 * Collects multicast addresses of multicast listeners residing
333 * on this kernel on the given soft interface, dev, in
334 * the given mcast_list. In general, multicast listeners provided by
335 * your multicast receiving applications run directly on this node.
336 *
337 * If there is a bridge interface on top of dev, collects from that one
338 * instead. Just like with IP addresses and routes, multicast listeners
339 * will(/should) register to the bridge interface instead of an
340 * enslaved bat0.
341 *
342 * Return: -ENOMEM on memory allocation error or the number of
343 * items added to the mcast_list otherwise.
344 */
345 static int
346 batadv_mcast_mla_softif_get(struct net_device *dev,
347 struct hlist_head *mcast_list,
348 struct batadv_mcast_mla_flags *flags)
349 {
350 struct net_device *bridge = batadv_mcast_get_bridge(dev);
351 int ret4, ret6 = 0;
352
353 if (bridge)
354 dev = bridge;
355
356 ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
357 if (ret4 < 0)
358 goto out;
359
360 ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
361 if (ret6 < 0) {
362 ret4 = 0;
363 goto out;
364 }
365
366 out:
367 if (bridge)
368 dev_put(bridge);
369
370 return ret4 + ret6;
371 }
372
373 /**
374 * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
375 * @dst: destination to write to - a multicast MAC address
376 * @src: source to read from - a multicast IP address
377 *
378 * Converts a given multicast IPv4/IPv6 address from a bridge
379 * to its matching multicast MAC address and copies it into the given
380 * destination buffer.
381 *
382 * Caller needs to make sure the destination buffer can hold
383 * at least ETH_ALEN bytes.
384 */
385 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
386 {
387 if (src->proto == htons(ETH_P_IP))
388 ip_eth_mc_map(src->u.ip4, dst);
389 #if IS_ENABLED(CONFIG_IPV6)
390 else if (src->proto == htons(ETH_P_IPV6))
391 ipv6_eth_mc_map(&src->u.ip6, dst);
392 #endif
393 else
394 eth_zero_addr(dst);
395 }
396
397 /**
398 * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
399 * @dev: a bridge slave whose bridge to collect multicast addresses from
400 * @mcast_list: a list to put found addresses into
401 * @flags: flags indicating the new multicast state
402 *
403 * Collects multicast addresses of multicast listeners residing
404 * on foreign, non-mesh devices which we gave access to our mesh via
405 * a bridge on top of the given soft interface, dev, in the given
406 * mcast_list.
407 *
408 * Return: -ENOMEM on memory allocation error or the number of
409 * items added to the mcast_list otherwise.
410 */
411 static int batadv_mcast_mla_bridge_get(struct net_device *dev,
412 struct hlist_head *mcast_list,
413 struct batadv_mcast_mla_flags *flags)
414 {
415 struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
416 bool all_ipv4 = flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4;
417 bool all_ipv6 = flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6;
418 struct br_ip_list *br_ip_entry, *tmp;
419 struct batadv_hw_addr *new;
420 u8 mcast_addr[ETH_ALEN];
421 int ret;
422
423 /* we don't need to detect these devices/listeners, the IGMP/MLD
424 * snooping code of the Linux bridge already does that for us
425 */
426 ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
427 if (ret < 0)
428 goto out;
429
430 list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
431 if (all_ipv4 && br_ip_entry->addr.proto == htons(ETH_P_IP))
432 continue;
433
434 if (all_ipv6 && br_ip_entry->addr.proto == htons(ETH_P_IPV6))
435 continue;
436
437 batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
438 if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
439 continue;
440
441 new = kmalloc(sizeof(*new), GFP_ATOMIC);
442 if (!new) {
443 ret = -ENOMEM;
444 break;
445 }
446
447 ether_addr_copy(new->addr, mcast_addr);
448 hlist_add_head(&new->list, mcast_list);
449 }
450
451 out:
452 list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
453 list_del(&br_ip_entry->list);
454 kfree(br_ip_entry);
455 }
456
457 return ret;
458 }
459
460 /**
461 * batadv_mcast_mla_list_free() - free a list of multicast addresses
462 * @mcast_list: the list to free
463 *
464 * Removes and frees all items in the given mcast_list.
465 */
466 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
467 {
468 struct batadv_hw_addr *mcast_entry;
469 struct hlist_node *tmp;
470
471 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
472 hlist_del(&mcast_entry->list);
473 kfree(mcast_entry);
474 }
475 }
476
477 /**
478 * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
479 * @bat_priv: the bat priv with all the soft interface information
480 * @mcast_list: a list of addresses which should _not_ be removed
481 *
482 * Retracts the announcement of any multicast listener from the
483 * translation table except the ones listed in the given mcast_list.
484 *
485 * If mcast_list is NULL then all are retracted.
486 */
487 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
488 struct hlist_head *mcast_list)
489 {
490 struct batadv_hw_addr *mcast_entry;
491 struct hlist_node *tmp;
492
493 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
494 list) {
495 if (mcast_list &&
496 batadv_mcast_mla_is_duplicate(mcast_entry->addr,
497 mcast_list))
498 continue;
499
500 batadv_tt_local_remove(bat_priv, mcast_entry->addr,
501 BATADV_NO_FLAGS,
502 "mcast TT outdated", false);
503
504 hlist_del(&mcast_entry->list);
505 kfree(mcast_entry);
506 }
507 }
508
509 /**
510 * batadv_mcast_mla_tt_add() - add multicast listener announcements
511 * @bat_priv: the bat priv with all the soft interface information
512 * @mcast_list: a list of addresses which are going to get added
513 *
514 * Adds multicast listener announcements from the given mcast_list to the
515 * translation table if they have not been added yet.
516 */
517 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
518 struct hlist_head *mcast_list)
519 {
520 struct batadv_hw_addr *mcast_entry;
521 struct hlist_node *tmp;
522
523 if (!mcast_list)
524 return;
525
526 hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
527 if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
528 &bat_priv->mcast.mla_list))
529 continue;
530
531 if (!batadv_tt_local_add(bat_priv->soft_iface,
532 mcast_entry->addr, BATADV_NO_FLAGS,
533 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
534 continue;
535
536 hlist_del(&mcast_entry->list);
537 hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
538 }
539 }
540
541 /**
542 * batadv_mcast_querier_log() - debug output regarding the querier status on
543 * link
544 * @bat_priv: the bat priv with all the soft interface information
545 * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
546 * @old_state: the previous querier state on our link
547 * @new_state: the new querier state on our link
548 *
549 * Outputs debug messages to the logging facility with log level 'mcast'
550 * regarding changes to the querier status on the link which are relevant
551 * to our multicast optimizations.
552 *
553 * Usually this is about whether a querier appeared or vanished in
554 * our mesh or whether the querier is in the suboptimal position of being
555 * behind our local bridge segment: Snooping switches will directly
556 * forward listener reports to the querier, therefore batman-adv and
557 * the bridge will potentially not see these listeners - the querier is
558 * potentially shadowing listeners from us then.
559 *
560 * This is only interesting for nodes with a bridge on top of their
561 * soft interface.
562 */
563 static void
564 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
565 struct batadv_mcast_querier_state *old_state,
566 struct batadv_mcast_querier_state *new_state)
567 {
568 if (!old_state->exists && new_state->exists)
569 batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
570 str_proto);
571 else if (old_state->exists && !new_state->exists)
572 batadv_info(bat_priv->soft_iface,
573 "%s Querier disappeared - multicast optimizations disabled\n",
574 str_proto);
575 else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
576 batadv_info(bat_priv->soft_iface,
577 "No %s Querier present - multicast optimizations disabled\n",
578 str_proto);
579
580 if (new_state->exists) {
581 if ((!old_state->shadowing && new_state->shadowing) ||
582 (!old_state->exists && new_state->shadowing))
583 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
584 "%s Querier is behind our bridged segment: Might shadow listeners\n",
585 str_proto);
586 else if (old_state->shadowing && !new_state->shadowing)
587 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
588 "%s Querier is not behind our bridged segment\n",
589 str_proto);
590 }
591 }
592
593 /**
594 * batadv_mcast_bridge_log() - debug output for topology changes in bridged
595 * setups
596 * @bat_priv: the bat priv with all the soft interface information
597 * @new_flags: flags indicating the new multicast state
598 *
599 * If no bridges are ever used on this node, then this function does nothing.
600 *
601 * Otherwise this function outputs debug information to the 'mcast' log level
602 * which might be relevant to our multicast optimizations.
603 *
604 * More precisely, it outputs information when a bridge interface is added or
605 * removed from a soft interface. And when a bridge is present, it further
606 * outputs information about the querier state which is relevant for the
607 * multicast flags this node is going to set.
608 */
609 static void
610 batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
611 struct batadv_mcast_mla_flags *new_flags)
612 {
613 struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
614
615 if (!old_flags->bridged && new_flags->bridged)
616 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
617 "Bridge added: Setting Unsnoopables(U)-flag\n");
618 else if (old_flags->bridged && !new_flags->bridged)
619 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
620 "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
621
622 if (new_flags->bridged) {
623 batadv_mcast_querier_log(bat_priv, "IGMP",
624 &old_flags->querier_ipv4,
625 &new_flags->querier_ipv4);
626 batadv_mcast_querier_log(bat_priv, "MLD",
627 &old_flags->querier_ipv6,
628 &new_flags->querier_ipv6);
629 }
630 }
631
632 /**
633 * batadv_mcast_flags_logs() - output debug information about mcast flag changes
634 * @bat_priv: the bat priv with all the soft interface information
635 * @flags: TVLV flags indicating the new multicast state
636 *
637 * Whenever the multicast TVLV flags this nodes announces change this notifies
638 * userspace via the 'mcast' log level.
639 */
640 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
641 {
642 bool old_enabled = bat_priv->mcast.mla_flags.enabled;
643 u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
644 char str_old_flags[] = "[...]";
645
646 sprintf(str_old_flags, "[%c%c%c]",
647 (old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
648 (old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
649 (old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
650
651 batadv_dbg(BATADV_DBG_MCAST, bat_priv,
652 "Changing multicast flags from '%s' to '[%c%c%c]'\n",
653 old_enabled ? str_old_flags : "<undefined>",
654 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
655 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
656 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
657 }
658
659 /**
660 * batadv_mcast_mla_flags_update() - update multicast flags
661 * @bat_priv: the bat priv with all the soft interface information
662 * @flags: flags indicating the new multicast state
663 *
664 * Updates the own multicast tvlv with our current multicast related settings,
665 * capabilities and inabilities.
666 */
667 static void
668 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
669 struct batadv_mcast_mla_flags *flags)
670 {
671 struct batadv_tvlv_mcast_data mcast_data;
672
673 if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
674 return;
675
676 batadv_mcast_bridge_log(bat_priv, flags);
677 batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
678
679 mcast_data.flags = flags->tvlv_flags;
680 memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
681
682 batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
683 &mcast_data, sizeof(mcast_data));
684
685 bat_priv->mcast.mla_flags = *flags;
686 }
687
688 /**
689 * __batadv_mcast_mla_update() - update the own MLAs
690 * @bat_priv: the bat priv with all the soft interface information
691 *
692 * Updates the own multicast listener announcements in the translation
693 * table as well as the own, announced multicast tvlv container.
694 *
695 * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
696 * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
697 * ensured by the non-parallel execution of the worker this function
698 * belongs to.
699 */
700 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
701 {
702 struct net_device *soft_iface = bat_priv->soft_iface;
703 struct hlist_head mcast_list = HLIST_HEAD_INIT;
704 struct batadv_mcast_mla_flags flags;
705 int ret;
706
707 flags = batadv_mcast_mla_flags_get(bat_priv);
708
709 ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
710 if (ret < 0)
711 goto out;
712
713 ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
714 if (ret < 0)
715 goto out;
716
717 spin_lock(&bat_priv->mcast.mla_lock);
718 batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
719 batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
720 batadv_mcast_mla_flags_update(bat_priv, &flags);
721 spin_unlock(&bat_priv->mcast.mla_lock);
722
723 out:
724 batadv_mcast_mla_list_free(&mcast_list);
725 }
726
727 /**
728 * batadv_mcast_mla_update() - update the own MLAs
729 * @work: kernel work struct
730 *
731 * Updates the own multicast listener announcements in the translation
732 * table as well as the own, announced multicast tvlv container.
733 *
734 * In the end, reschedules the work timer.
735 */
736 static void batadv_mcast_mla_update(struct work_struct *work)
737 {
738 struct delayed_work *delayed_work;
739 struct batadv_priv_mcast *priv_mcast;
740 struct batadv_priv *bat_priv;
741
742 delayed_work = to_delayed_work(work);
743 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
744 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
745
746 __batadv_mcast_mla_update(bat_priv);
747 batadv_mcast_start_timer(bat_priv);
748 }
749
750 /**
751 * batadv_mcast_is_report_ipv4() - check for IGMP reports
752 * @skb: the ethernet frame destined for the mesh
753 *
754 * This call might reallocate skb data.
755 *
756 * Checks whether the given frame is a valid IGMP report.
757 *
758 * Return: If so then true, otherwise false.
759 */
760 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
761 {
762 if (ip_mc_check_igmp(skb) < 0)
763 return false;
764
765 switch (igmp_hdr(skb)->type) {
766 case IGMP_HOST_MEMBERSHIP_REPORT:
767 case IGMPV2_HOST_MEMBERSHIP_REPORT:
768 case IGMPV3_HOST_MEMBERSHIP_REPORT:
769 return true;
770 }
771
772 return false;
773 }
774
775 /**
776 * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
777 * potential
778 * @bat_priv: the bat priv with all the soft interface information
779 * @skb: the IPv4 packet to check
780 * @is_unsnoopable: stores whether the destination is snoopable
781 *
782 * Checks whether the given IPv4 packet has the potential to be forwarded with a
783 * mode more optimal than classic flooding.
784 *
785 * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
786 * allocation failure.
787 */
788 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
789 struct sk_buff *skb,
790 bool *is_unsnoopable)
791 {
792 struct iphdr *iphdr;
793
794 /* We might fail due to out-of-memory -> drop it */
795 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
796 return -ENOMEM;
797
798 if (batadv_mcast_is_report_ipv4(skb))
799 return -EINVAL;
800
801 iphdr = ip_hdr(skb);
802
803 /* TODO: Implement Multicast Router Discovery (RFC4286),
804 * then allow scope > link local, too
805 */
806 if (!ipv4_is_local_multicast(iphdr->daddr))
807 return -EINVAL;
808
809 /* link-local multicast listeners behind a bridge are
810 * not snoopable (see RFC4541, section 2.1.2.2)
811 */
812 *is_unsnoopable = true;
813
814 return 0;
815 }
816
817 /**
818 * batadv_mcast_is_report_ipv6() - check for MLD reports
819 * @skb: the ethernet frame destined for the mesh
820 *
821 * This call might reallocate skb data.
822 *
823 * Checks whether the given frame is a valid MLD report.
824 *
825 * Return: If so then true, otherwise false.
826 */
827 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
828 {
829 if (ipv6_mc_check_mld(skb) < 0)
830 return false;
831
832 switch (icmp6_hdr(skb)->icmp6_type) {
833 case ICMPV6_MGM_REPORT:
834 case ICMPV6_MLD2_REPORT:
835 return true;
836 }
837
838 return false;
839 }
840
841 /**
842 * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
843 * potential
844 * @bat_priv: the bat priv with all the soft interface information
845 * @skb: the IPv6 packet to check
846 * @is_unsnoopable: stores whether the destination is snoopable
847 *
848 * Checks whether the given IPv6 packet has the potential to be forwarded with a
849 * mode more optimal than classic flooding.
850 *
851 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
852 */
853 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
854 struct sk_buff *skb,
855 bool *is_unsnoopable)
856 {
857 struct ipv6hdr *ip6hdr;
858
859 /* We might fail due to out-of-memory -> drop it */
860 if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
861 return -ENOMEM;
862
863 if (batadv_mcast_is_report_ipv6(skb))
864 return -EINVAL;
865
866 ip6hdr = ipv6_hdr(skb);
867
868 /* TODO: Implement Multicast Router Discovery (RFC4286),
869 * then allow scope > link local, too
870 */
871 if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) != IPV6_ADDR_SCOPE_LINKLOCAL)
872 return -EINVAL;
873
874 /* link-local-all-nodes multicast listeners behind a bridge are
875 * not snoopable (see RFC4541, section 3, paragraph 3)
876 */
877 if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
878 *is_unsnoopable = true;
879
880 return 0;
881 }
882
883 /**
884 * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
885 * @bat_priv: the bat priv with all the soft interface information
886 * @skb: the multicast frame to check
887 * @is_unsnoopable: stores whether the destination is snoopable
888 *
889 * Checks whether the given multicast ethernet frame has the potential to be
890 * forwarded with a mode more optimal than classic flooding.
891 *
892 * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
893 */
894 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
895 struct sk_buff *skb,
896 bool *is_unsnoopable)
897 {
898 struct ethhdr *ethhdr = eth_hdr(skb);
899
900 if (!atomic_read(&bat_priv->multicast_mode))
901 return -EINVAL;
902
903 switch (ntohs(ethhdr->h_proto)) {
904 case ETH_P_IP:
905 return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
906 is_unsnoopable);
907 case ETH_P_IPV6:
908 if (!IS_ENABLED(CONFIG_IPV6))
909 return -EINVAL;
910
911 return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
912 is_unsnoopable);
913 default:
914 return -EINVAL;
915 }
916 }
917
918 /**
919 * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
920 * interest
921 * @bat_priv: the bat priv with all the soft interface information
922 * @ethhdr: ethernet header of a packet
923 *
924 * Return: the number of nodes which want all IPv4 multicast traffic if the
925 * given ethhdr is from an IPv4 packet or the number of nodes which want all
926 * IPv6 traffic if it matches an IPv6 packet.
927 */
928 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
929 struct ethhdr *ethhdr)
930 {
931 switch (ntohs(ethhdr->h_proto)) {
932 case ETH_P_IP:
933 return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
934 case ETH_P_IPV6:
935 return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
936 default:
937 /* we shouldn't be here... */
938 return 0;
939 }
940 }
941
942 /**
943 * batadv_mcast_forw_tt_node_get() - get a multicast tt node
944 * @bat_priv: the bat priv with all the soft interface information
945 * @ethhdr: the ether header containing the multicast destination
946 *
947 * Return: an orig_node matching the multicast address provided by ethhdr
948 * via a translation table lookup. This increases the returned nodes refcount.
949 */
950 static struct batadv_orig_node *
951 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
952 struct ethhdr *ethhdr)
953 {
954 return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
955 BATADV_NO_FLAGS);
956 }
957
958 /**
959 * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
960 * @bat_priv: the bat priv with all the soft interface information
961 *
962 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
963 * increases its refcount.
964 */
965 static struct batadv_orig_node *
966 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
967 {
968 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
969
970 rcu_read_lock();
971 hlist_for_each_entry_rcu(tmp_orig_node,
972 &bat_priv->mcast.want_all_ipv4_list,
973 mcast_want_all_ipv4_node) {
974 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
975 continue;
976
977 orig_node = tmp_orig_node;
978 break;
979 }
980 rcu_read_unlock();
981
982 return orig_node;
983 }
984
985 /**
986 * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
987 * @bat_priv: the bat priv with all the soft interface information
988 *
989 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
990 * and increases its refcount.
991 */
992 static struct batadv_orig_node *
993 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
994 {
995 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
996
997 rcu_read_lock();
998 hlist_for_each_entry_rcu(tmp_orig_node,
999 &bat_priv->mcast.want_all_ipv6_list,
1000 mcast_want_all_ipv6_node) {
1001 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1002 continue;
1003
1004 orig_node = tmp_orig_node;
1005 break;
1006 }
1007 rcu_read_unlock();
1008
1009 return orig_node;
1010 }
1011
1012 /**
1013 * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
1014 * @bat_priv: the bat priv with all the soft interface information
1015 * @ethhdr: an ethernet header to determine the protocol family from
1016 *
1017 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
1018 * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and
1019 * increases its refcount.
1020 */
1021 static struct batadv_orig_node *
1022 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
1023 struct ethhdr *ethhdr)
1024 {
1025 switch (ntohs(ethhdr->h_proto)) {
1026 case ETH_P_IP:
1027 return batadv_mcast_forw_ipv4_node_get(bat_priv);
1028 case ETH_P_IPV6:
1029 return batadv_mcast_forw_ipv6_node_get(bat_priv);
1030 default:
1031 /* we shouldn't be here... */
1032 return NULL;
1033 }
1034 }
1035
1036 /**
1037 * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
1038 * @bat_priv: the bat priv with all the soft interface information
1039 *
1040 * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
1041 * set and increases its refcount.
1042 */
1043 static struct batadv_orig_node *
1044 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
1045 {
1046 struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1047
1048 rcu_read_lock();
1049 hlist_for_each_entry_rcu(tmp_orig_node,
1050 &bat_priv->mcast.want_all_unsnoopables_list,
1051 mcast_want_all_unsnoopables_node) {
1052 if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1053 continue;
1054
1055 orig_node = tmp_orig_node;
1056 break;
1057 }
1058 rcu_read_unlock();
1059
1060 return orig_node;
1061 }
1062
1063 /**
1064 * batadv_mcast_forw_mode() - check on how to forward a multicast packet
1065 * @bat_priv: the bat priv with all the soft interface information
1066 * @skb: The multicast packet to check
1067 * @orig: an originator to be set to forward the skb to
1068 *
1069 * Return: the forwarding mode as enum batadv_forw_mode and in case of
1070 * BATADV_FORW_SINGLE set the orig to the single originator the skb
1071 * should be forwarded to.
1072 */
1073 enum batadv_forw_mode
1074 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
1075 struct batadv_orig_node **orig)
1076 {
1077 int ret, tt_count, ip_count, unsnoop_count, total_count;
1078 bool is_unsnoopable = false;
1079 unsigned int mcast_fanout;
1080 struct ethhdr *ethhdr;
1081
1082 ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable);
1083 if (ret == -ENOMEM)
1084 return BATADV_FORW_NONE;
1085 else if (ret < 0)
1086 return BATADV_FORW_ALL;
1087
1088 ethhdr = eth_hdr(skb);
1089
1090 tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
1091 BATADV_NO_FLAGS);
1092 ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
1093 unsnoop_count = !is_unsnoopable ? 0 :
1094 atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
1095
1096 total_count = tt_count + ip_count + unsnoop_count;
1097
1098 switch (total_count) {
1099 case 1:
1100 if (tt_count)
1101 *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
1102 else if (ip_count)
1103 *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
1104 else if (unsnoop_count)
1105 *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
1106
1107 if (*orig)
1108 return BATADV_FORW_SINGLE;
1109
1110 /* fall through */
1111 case 0:
1112 return BATADV_FORW_NONE;
1113 default:
1114 mcast_fanout = atomic_read(&bat_priv->multicast_fanout);
1115
1116 if (!unsnoop_count && total_count <= mcast_fanout)
1117 return BATADV_FORW_SOME;
1118 }
1119
1120 return BATADV_FORW_ALL;
1121 }
1122
1123 /**
1124 * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
1125 * @bat_priv: the bat priv with all the soft interface information
1126 * @skb: the multicast packet to transmit
1127 * @vid: the vlan identifier
1128 *
1129 * Sends copies of a frame with multicast destination to any multicast
1130 * listener registered in the translation table. A transmission is performed
1131 * via a batman-adv unicast packet for each such destination node.
1132 *
1133 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1134 * otherwise.
1135 */
1136 static int
1137 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1138 unsigned short vid)
1139 {
1140 int ret = NET_XMIT_SUCCESS;
1141 struct sk_buff *newskb;
1142
1143 struct batadv_tt_orig_list_entry *orig_entry;
1144
1145 struct batadv_tt_global_entry *tt_global;
1146 const u8 *addr = eth_hdr(skb)->h_dest;
1147
1148 tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1149 if (!tt_global)
1150 goto out;
1151
1152 rcu_read_lock();
1153 hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1154 newskb = skb_copy(skb, GFP_ATOMIC);
1155 if (!newskb) {
1156 ret = NET_XMIT_DROP;
1157 break;
1158 }
1159
1160 batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1161 orig_entry->orig_node, vid);
1162 }
1163 rcu_read_unlock();
1164
1165 batadv_tt_global_entry_put(tt_global);
1166
1167 out:
1168 return ret;
1169 }
1170
1171 /**
1172 * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
1173 * @bat_priv: the bat priv with all the soft interface information
1174 * @skb: the multicast packet to transmit
1175 * @vid: the vlan identifier
1176 *
1177 * Sends copies of a frame with multicast destination to any node with a
1178 * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
1179 * batman-adv unicast packet for each such destination node.
1180 *
1181 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1182 * otherwise.
1183 */
1184 static int
1185 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1186 struct sk_buff *skb, unsigned short vid)
1187 {
1188 struct batadv_orig_node *orig_node;
1189 int ret = NET_XMIT_SUCCESS;
1190 struct sk_buff *newskb;
1191
1192 rcu_read_lock();
1193 hlist_for_each_entry_rcu(orig_node,
1194 &bat_priv->mcast.want_all_ipv4_list,
1195 mcast_want_all_ipv4_node) {
1196 newskb = skb_copy(skb, GFP_ATOMIC);
1197 if (!newskb) {
1198 ret = NET_XMIT_DROP;
1199 break;
1200 }
1201
1202 batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1203 orig_node, vid);
1204 }
1205 rcu_read_unlock();
1206 return ret;
1207 }
1208
1209 /**
1210 * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
1211 * @bat_priv: the bat priv with all the soft interface information
1212 * @skb: The multicast packet to transmit
1213 * @vid: the vlan identifier
1214 *
1215 * Sends copies of a frame with multicast destination to any node with a
1216 * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
1217 * batman-adv unicast packet for each such destination node.
1218 *
1219 * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1220 * otherwise.
1221 */
1222 static int
1223 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1224 struct sk_buff *skb, unsigned short vid)
1225 {
1226 struct batadv_orig_node *orig_node;
1227 int ret = NET_XMIT_SUCCESS;
1228 struct sk_buff *newskb;
1229
1230 rcu_read_lock();
1231 hlist_for_each_entry_rcu(orig_node,
1232 &bat_priv->mcast.want_all_ipv6_list,
1233 mcast_want_all_ipv6_node) {
1234 newskb = skb_copy(skb, GFP_ATOMIC);
1235 if (!newskb) {
1236 ret = NET_XMIT_DROP;
1237 break;
1238 }
1239
1240 batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
1241 orig_node, vid);
1242 }
1243 rcu_read_unlock();
1244 return ret;
1245 }
1246
1247 /**
1248 * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
1249 * @bat_priv: the bat priv with all the soft interface information
1250 * @skb: the multicast packet to transmit
1251 * @vid: the vlan identifier
1252 *
1253 * Sends copies of a frame with multicast destination to any node with a
1254 * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
1255 * transmission is performed via a batman-adv unicast packet for each such
1256 * destination node.
1257 *
1258 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1259 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1260 */
1261 static int
1262 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1263 struct sk_buff *skb, unsigned short vid)
1264 {
1265 switch (ntohs(eth_hdr(skb)->h_proto)) {
1266 case ETH_P_IP:
1267 return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1268 case ETH_P_IPV6:
1269 return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1270 default:
1271 /* we shouldn't be here... */
1272 return NET_XMIT_DROP;
1273 }
1274 }
1275
1276 /**
1277 * batadv_mcast_forw_send() - send packet to any detected multicast recpient
1278 * @bat_priv: the bat priv with all the soft interface information
1279 * @skb: the multicast packet to transmit
1280 * @vid: the vlan identifier
1281 *
1282 * Sends copies of a frame with multicast destination to any node that signaled
1283 * interest in it, that is either via the translation table or the according
1284 * want-all flags. A transmission is performed via a batman-adv unicast packet
1285 * for each such destination node.
1286 *
1287 * The given skb is consumed/freed.
1288 *
1289 * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1290 * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1291 */
1292 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1293 unsigned short vid)
1294 {
1295 int ret;
1296
1297 ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1298 if (ret != NET_XMIT_SUCCESS) {
1299 kfree_skb(skb);
1300 return ret;
1301 }
1302
1303 ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1304 if (ret != NET_XMIT_SUCCESS) {
1305 kfree_skb(skb);
1306 return ret;
1307 }
1308
1309 consume_skb(skb);
1310 return ret;
1311 }
1312
1313 /**
1314 * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
1315 * @bat_priv: the bat priv with all the soft interface information
1316 * @orig: the orig_node which multicast state might have changed of
1317 * @mcast_flags: flags indicating the new multicast state
1318 *
1319 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
1320 * orig, has toggled then this method updates counter and list accordingly.
1321 *
1322 * Caller needs to hold orig->mcast_handler_lock.
1323 */
1324 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1325 struct batadv_orig_node *orig,
1326 u8 mcast_flags)
1327 {
1328 struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1329 struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1330
1331 lockdep_assert_held(&orig->mcast_handler_lock);
1332
1333 /* switched from flag unset to set */
1334 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1335 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1336 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1337
1338 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1339 /* flag checks above + mcast_handler_lock prevents this */
1340 WARN_ON(!hlist_unhashed(node));
1341
1342 hlist_add_head_rcu(node, head);
1343 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1344 /* switched from flag set to unset */
1345 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1346 orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1347 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1348
1349 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1350 /* flag checks above + mcast_handler_lock prevents this */
1351 WARN_ON(hlist_unhashed(node));
1352
1353 hlist_del_init_rcu(node);
1354 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1355 }
1356 }
1357
1358 /**
1359 * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
1360 * @bat_priv: the bat priv with all the soft interface information
1361 * @orig: the orig_node which multicast state might have changed of
1362 * @mcast_flags: flags indicating the new multicast state
1363 *
1364 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
1365 * toggled then this method updates counter and list accordingly.
1366 *
1367 * Caller needs to hold orig->mcast_handler_lock.
1368 */
1369 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1370 struct batadv_orig_node *orig,
1371 u8 mcast_flags)
1372 {
1373 struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1374 struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1375
1376 lockdep_assert_held(&orig->mcast_handler_lock);
1377
1378 /* switched from flag unset to set */
1379 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1380 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1381 atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1382
1383 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1384 /* flag checks above + mcast_handler_lock prevents this */
1385 WARN_ON(!hlist_unhashed(node));
1386
1387 hlist_add_head_rcu(node, head);
1388 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1389 /* switched from flag set to unset */
1390 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1391 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1392 atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1393
1394 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1395 /* flag checks above + mcast_handler_lock prevents this */
1396 WARN_ON(hlist_unhashed(node));
1397
1398 hlist_del_init_rcu(node);
1399 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1400 }
1401 }
1402
1403 /**
1404 * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
1405 * @bat_priv: the bat priv with all the soft interface information
1406 * @orig: the orig_node which multicast state might have changed of
1407 * @mcast_flags: flags indicating the new multicast state
1408 *
1409 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
1410 * toggled then this method updates counter and list accordingly.
1411 *
1412 * Caller needs to hold orig->mcast_handler_lock.
1413 */
1414 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1415 struct batadv_orig_node *orig,
1416 u8 mcast_flags)
1417 {
1418 struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1419 struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1420
1421 lockdep_assert_held(&orig->mcast_handler_lock);
1422
1423 /* switched from flag unset to set */
1424 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1425 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1426 atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1427
1428 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1429 /* flag checks above + mcast_handler_lock prevents this */
1430 WARN_ON(!hlist_unhashed(node));
1431
1432 hlist_add_head_rcu(node, head);
1433 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1434 /* switched from flag set to unset */
1435 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1436 orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1437 atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1438
1439 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1440 /* flag checks above + mcast_handler_lock prevents this */
1441 WARN_ON(hlist_unhashed(node));
1442
1443 hlist_del_init_rcu(node);
1444 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1445 }
1446 }
1447
1448 /**
1449 * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
1450 * @bat_priv: the bat priv with all the soft interface information
1451 * @orig: the orig_node of the ogm
1452 * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
1453 * @tvlv_value: tvlv buffer containing the multicast data
1454 * @tvlv_value_len: tvlv buffer length
1455 */
1456 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
1457 struct batadv_orig_node *orig,
1458 u8 flags,
1459 void *tvlv_value,
1460 u16 tvlv_value_len)
1461 {
1462 bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1463 u8 mcast_flags = BATADV_NO_FLAGS;
1464
1465 if (orig_mcast_enabled && tvlv_value &&
1466 tvlv_value_len >= sizeof(mcast_flags))
1467 mcast_flags = *(u8 *)tvlv_value;
1468
1469 if (!orig_mcast_enabled) {
1470 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
1471 mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
1472 }
1473
1474 spin_lock_bh(&orig->mcast_handler_lock);
1475
1476 if (orig_mcast_enabled &&
1477 !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1478 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1479 } else if (!orig_mcast_enabled &&
1480 test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
1481 clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
1482 }
1483
1484 set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
1485
1486 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
1487 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
1488 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
1489
1490 orig->mcast_flags = mcast_flags;
1491 spin_unlock_bh(&orig->mcast_handler_lock);
1492 }
1493
1494 /**
1495 * batadv_mcast_init() - initialize the multicast optimizations structures
1496 * @bat_priv: the bat priv with all the soft interface information
1497 */
1498 void batadv_mcast_init(struct batadv_priv *bat_priv)
1499 {
1500 batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
1501 NULL, BATADV_TVLV_MCAST, 2,
1502 BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
1503
1504 INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
1505 batadv_mcast_start_timer(bat_priv);
1506 }
1507
1508 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
1509 /**
1510 * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
1511 * @bat_priv: the bat priv with all the soft interface information
1512 * @seq: debugfs table seq_file struct
1513 *
1514 * Prints our own multicast flags including a more specific reason why
1515 * they are set, that is prints the bridge and querier state too, to
1516 * the debugfs table specified via @seq.
1517 */
1518 static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
1519 struct seq_file *seq)
1520 {
1521 struct batadv_mcast_mla_flags *mla_flags = &bat_priv->mcast.mla_flags;
1522 char querier4, querier6, shadowing4, shadowing6;
1523 bool bridged = mla_flags->bridged;
1524 u8 flags = mla_flags->tvlv_flags;
1525
1526 if (bridged) {
1527 querier4 = mla_flags->querier_ipv4.exists ? '.' : '4';
1528 querier6 = mla_flags->querier_ipv6.exists ? '.' : '6';
1529 shadowing4 = mla_flags->querier_ipv4.shadowing ? '4' : '.';
1530 shadowing6 = mla_flags->querier_ipv6.shadowing ? '6' : '.';
1531 } else {
1532 querier4 = '?';
1533 querier6 = '?';
1534 shadowing4 = '?';
1535 shadowing6 = '?';
1536 }
1537
1538 seq_printf(seq, "Multicast flags (own flags: [%c%c%c])\n",
1539 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
1540 (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
1541 (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.');
1542 seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.');
1543 seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n",
1544 querier4, querier6);
1545 seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n",
1546 shadowing4, shadowing6);
1547 seq_puts(seq, "-------------------------------------------\n");
1548 seq_printf(seq, " %-10s %s\n", "Originator", "Flags");
1549 }
1550
1551 /**
1552 * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
1553 * @seq: seq file to print on
1554 * @offset: not used
1555 *
1556 * This prints a table of (primary) originators and their according
1557 * multicast flags, including (in the header) our own.
1558 *
1559 * Return: always 0
1560 */
1561 int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
1562 {
1563 struct net_device *net_dev = (struct net_device *)seq->private;
1564 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1565 struct batadv_hard_iface *primary_if;
1566 struct batadv_hashtable *hash = bat_priv->orig_hash;
1567 struct batadv_orig_node *orig_node;
1568 struct hlist_head *head;
1569 u8 flags;
1570 u32 i;
1571
1572 primary_if = batadv_seq_print_text_primary_if_get(seq);
1573 if (!primary_if)
1574 return 0;
1575
1576 batadv_mcast_flags_print_header(bat_priv, seq);
1577
1578 for (i = 0; i < hash->size; i++) {
1579 head = &hash->table[i];
1580
1581 rcu_read_lock();
1582 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1583 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1584 &orig_node->capa_initialized))
1585 continue;
1586
1587 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1588 &orig_node->capabilities)) {
1589 seq_printf(seq, "%pM -\n", orig_node->orig);
1590 continue;
1591 }
1592
1593 flags = orig_node->mcast_flags;
1594
1595 seq_printf(seq, "%pM [%c%c%c]\n", orig_node->orig,
1596 (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)
1597 ? 'U' : '.',
1598 (flags & BATADV_MCAST_WANT_ALL_IPV4)
1599 ? '4' : '.',
1600 (flags & BATADV_MCAST_WANT_ALL_IPV6)
1601 ? '6' : '.');
1602 }
1603 rcu_read_unlock();
1604 }
1605
1606 batadv_hardif_put(primary_if);
1607
1608 return 0;
1609 }
1610 #endif
1611
1612 /**
1613 * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
1614 * @msg: buffer for the message
1615 * @bat_priv: the bat priv with all the soft interface information
1616 *
1617 * Return: 0 or error code.
1618 */
1619 int batadv_mcast_mesh_info_put(struct sk_buff *msg,
1620 struct batadv_priv *bat_priv)
1621 {
1622 u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
1623 u32 flags_priv = BATADV_NO_FLAGS;
1624
1625 if (bat_priv->mcast.mla_flags.bridged) {
1626 flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
1627
1628 if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
1629 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
1630 if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
1631 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
1632 if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
1633 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
1634 if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
1635 flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
1636 }
1637
1638 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
1639 nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
1640 return -EMSGSIZE;
1641
1642 return 0;
1643 }
1644
1645 /**
1646 * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
1647 * to a netlink socket
1648 * @msg: buffer for the message
1649 * @portid: netlink port
1650 * @cb: Control block containing additional options
1651 * @orig_node: originator to dump the multicast flags of
1652 *
1653 * Return: 0 or error code.
1654 */
1655 static int
1656 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
1657 struct netlink_callback *cb,
1658 struct batadv_orig_node *orig_node)
1659 {
1660 void *hdr;
1661
1662 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
1663 &batadv_netlink_family, NLM_F_MULTI,
1664 BATADV_CMD_GET_MCAST_FLAGS);
1665 if (!hdr)
1666 return -ENOBUFS;
1667
1668 genl_dump_check_consistent(cb, hdr);
1669
1670 if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
1671 orig_node->orig)) {
1672 genlmsg_cancel(msg, hdr);
1673 return -EMSGSIZE;
1674 }
1675
1676 if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1677 &orig_node->capabilities)) {
1678 if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
1679 orig_node->mcast_flags)) {
1680 genlmsg_cancel(msg, hdr);
1681 return -EMSGSIZE;
1682 }
1683 }
1684
1685 genlmsg_end(msg, hdr);
1686 return 0;
1687 }
1688
1689 /**
1690 * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
1691 * table to a netlink socket
1692 * @msg: buffer for the message
1693 * @portid: netlink port
1694 * @cb: Control block containing additional options
1695 * @hash: hash to dump
1696 * @bucket: bucket index to dump
1697 * @idx_skip: How many entries to skip
1698 *
1699 * Return: 0 or error code.
1700 */
1701 static int
1702 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
1703 struct netlink_callback *cb,
1704 struct batadv_hashtable *hash,
1705 unsigned int bucket, long *idx_skip)
1706 {
1707 struct batadv_orig_node *orig_node;
1708 long idx = 0;
1709
1710 spin_lock_bh(&hash->list_locks[bucket]);
1711 cb->seq = atomic_read(&hash->generation) << 1 | 1;
1712
1713 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
1714 if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
1715 &orig_node->capa_initialized))
1716 continue;
1717
1718 if (idx < *idx_skip)
1719 goto skip;
1720
1721 if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
1722 spin_unlock_bh(&hash->list_locks[bucket]);
1723 *idx_skip = idx;
1724
1725 return -EMSGSIZE;
1726 }
1727
1728 skip:
1729 idx++;
1730 }
1731 spin_unlock_bh(&hash->list_locks[bucket]);
1732
1733 return 0;
1734 }
1735
1736 /**
1737 * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
1738 * @msg: buffer for the message
1739 * @portid: netlink port
1740 * @cb: Control block containing additional options
1741 * @bat_priv: the bat priv with all the soft interface information
1742 * @bucket: current bucket to dump
1743 * @idx: index in current bucket to the next entry to dump
1744 *
1745 * Return: 0 or error code.
1746 */
1747 static int
1748 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
1749 struct netlink_callback *cb,
1750 struct batadv_priv *bat_priv, long *bucket, long *idx)
1751 {
1752 struct batadv_hashtable *hash = bat_priv->orig_hash;
1753 long bucket_tmp = *bucket;
1754 long idx_tmp = *idx;
1755
1756 while (bucket_tmp < hash->size) {
1757 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
1758 *bucket, &idx_tmp))
1759 break;
1760
1761 bucket_tmp++;
1762 idx_tmp = 0;
1763 }
1764
1765 *bucket = bucket_tmp;
1766 *idx = idx_tmp;
1767
1768 return msg->len;
1769 }
1770
1771 /**
1772 * batadv_mcast_netlink_get_primary() - get primary interface from netlink
1773 * callback
1774 * @cb: netlink callback structure
1775 * @primary_if: the primary interface pointer to return the result in
1776 *
1777 * Return: 0 or error code.
1778 */
1779 static int
1780 batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
1781 struct batadv_hard_iface **primary_if)
1782 {
1783 struct batadv_hard_iface *hard_iface = NULL;
1784 struct net *net = sock_net(cb->skb->sk);
1785 struct net_device *soft_iface;
1786 struct batadv_priv *bat_priv;
1787 int ifindex;
1788 int ret = 0;
1789
1790 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
1791 if (!ifindex)
1792 return -EINVAL;
1793
1794 soft_iface = dev_get_by_index(net, ifindex);
1795 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
1796 ret = -ENODEV;
1797 goto out;
1798 }
1799
1800 bat_priv = netdev_priv(soft_iface);
1801
1802 hard_iface = batadv_primary_if_get_selected(bat_priv);
1803 if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
1804 ret = -ENOENT;
1805 goto out;
1806 }
1807
1808 out:
1809 if (soft_iface)
1810 dev_put(soft_iface);
1811
1812 if (!ret && primary_if)
1813 *primary_if = hard_iface;
1814 else if (hard_iface)
1815 batadv_hardif_put(hard_iface);
1816
1817 return ret;
1818 }
1819
1820 /**
1821 * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
1822 * @msg: buffer for the message
1823 * @cb: callback structure containing arguments
1824 *
1825 * Return: message length.
1826 */
1827 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
1828 {
1829 struct batadv_hard_iface *primary_if = NULL;
1830 int portid = NETLINK_CB(cb->skb).portid;
1831 struct batadv_priv *bat_priv;
1832 long *bucket = &cb->args[0];
1833 long *idx = &cb->args[1];
1834 int ret;
1835
1836 ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
1837 if (ret)
1838 return ret;
1839
1840 bat_priv = netdev_priv(primary_if->soft_iface);
1841 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
1842
1843 batadv_hardif_put(primary_if);
1844 return ret;
1845 }
1846
1847 /**
1848 * batadv_mcast_free() - free the multicast optimizations structures
1849 * @bat_priv: the bat priv with all the soft interface information
1850 */
1851 void batadv_mcast_free(struct batadv_priv *bat_priv)
1852 {
1853 cancel_delayed_work_sync(&bat_priv->mcast.work);
1854
1855 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
1856 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
1857
1858 /* safely calling outside of worker, as worker was canceled above */
1859 batadv_mcast_mla_tt_retract(bat_priv, NULL);
1860 }
1861
1862 /**
1863 * batadv_mcast_purge_orig() - reset originator global mcast state modifications
1864 * @orig: the originator which is going to get purged
1865 */
1866 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
1867 {
1868 struct batadv_priv *bat_priv = orig->bat_priv;
1869
1870 spin_lock_bh(&orig->mcast_handler_lock);
1871
1872 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
1873 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
1874 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
1875
1876 spin_unlock_bh(&orig->mcast_handler_lock);
1877 }