]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - net/bridge/br_multicast.c
bridge: mcast: add IGMPv3 query support
[mirror_ubuntu-bionic-kernel.git] / net / bridge / br_multicast.c
... / ...
CommitLineData
1/*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/export.h>
15#include <linux/if_ether.h>
16#include <linux/igmp.h>
17#include <linux/jhash.h>
18#include <linux/kernel.h>
19#include <linux/log2.h>
20#include <linux/netdevice.h>
21#include <linux/netfilter_bridge.h>
22#include <linux/random.h>
23#include <linux/rculist.h>
24#include <linux/skbuff.h>
25#include <linux/slab.h>
26#include <linux/timer.h>
27#include <linux/inetdevice.h>
28#include <linux/mroute.h>
29#include <net/ip.h>
30#if IS_ENABLED(CONFIG_IPV6)
31#include <net/ipv6.h>
32#include <net/mld.h>
33#include <net/ip6_checksum.h>
34#include <net/addrconf.h>
35#endif
36
37#include "br_private.h"
38
39static void br_multicast_start_querier(struct net_bridge *br,
40 struct bridge_mcast_own_query *query);
41static void br_multicast_add_router(struct net_bridge *br,
42 struct net_bridge_port *port);
43static void br_ip4_multicast_leave_group(struct net_bridge *br,
44 struct net_bridge_port *port,
45 __be32 group,
46 __u16 vid);
47#if IS_ENABLED(CONFIG_IPV6)
48static void br_ip6_multicast_leave_group(struct net_bridge *br,
49 struct net_bridge_port *port,
50 const struct in6_addr *group,
51 __u16 vid);
52#endif
53unsigned int br_mdb_rehash_seq;
54
55static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
56{
57 if (a->proto != b->proto)
58 return 0;
59 if (a->vid != b->vid)
60 return 0;
61 switch (a->proto) {
62 case htons(ETH_P_IP):
63 return a->u.ip4 == b->u.ip4;
64#if IS_ENABLED(CONFIG_IPV6)
65 case htons(ETH_P_IPV6):
66 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
67#endif
68 }
69 return 0;
70}
71
72static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
73 __u16 vid)
74{
75 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
76}
77
78#if IS_ENABLED(CONFIG_IPV6)
79static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
80 const struct in6_addr *ip,
81 __u16 vid)
82{
83 return jhash_2words(ipv6_addr_hash(ip), vid,
84 mdb->secret) & (mdb->max - 1);
85}
86#endif
87
88static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
89 struct br_ip *ip)
90{
91 switch (ip->proto) {
92 case htons(ETH_P_IP):
93 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
94#if IS_ENABLED(CONFIG_IPV6)
95 case htons(ETH_P_IPV6):
96 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
97#endif
98 }
99 return 0;
100}
101
102static struct net_bridge_mdb_entry *__br_mdb_ip_get(
103 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
104{
105 struct net_bridge_mdb_entry *mp;
106
107 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
108 if (br_ip_equal(&mp->addr, dst))
109 return mp;
110 }
111
112 return NULL;
113}
114
115struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
116 struct br_ip *dst)
117{
118 if (!mdb)
119 return NULL;
120
121 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
122}
123
124static struct net_bridge_mdb_entry *br_mdb_ip4_get(
125 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
126{
127 struct br_ip br_dst;
128
129 br_dst.u.ip4 = dst;
130 br_dst.proto = htons(ETH_P_IP);
131 br_dst.vid = vid;
132
133 return br_mdb_ip_get(mdb, &br_dst);
134}
135
136#if IS_ENABLED(CONFIG_IPV6)
137static struct net_bridge_mdb_entry *br_mdb_ip6_get(
138 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
139 __u16 vid)
140{
141 struct br_ip br_dst;
142
143 br_dst.u.ip6 = *dst;
144 br_dst.proto = htons(ETH_P_IPV6);
145 br_dst.vid = vid;
146
147 return br_mdb_ip_get(mdb, &br_dst);
148}
149#endif
150
151struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
152 struct sk_buff *skb, u16 vid)
153{
154 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
155 struct br_ip ip;
156
157 if (br->multicast_disabled)
158 return NULL;
159
160 if (BR_INPUT_SKB_CB(skb)->igmp)
161 return NULL;
162
163 ip.proto = skb->protocol;
164 ip.vid = vid;
165
166 switch (skb->protocol) {
167 case htons(ETH_P_IP):
168 ip.u.ip4 = ip_hdr(skb)->daddr;
169 break;
170#if IS_ENABLED(CONFIG_IPV6)
171 case htons(ETH_P_IPV6):
172 ip.u.ip6 = ipv6_hdr(skb)->daddr;
173 break;
174#endif
175 default:
176 return NULL;
177 }
178
179 return br_mdb_ip_get(mdb, &ip);
180}
181
182static void br_mdb_free(struct rcu_head *head)
183{
184 struct net_bridge_mdb_htable *mdb =
185 container_of(head, struct net_bridge_mdb_htable, rcu);
186 struct net_bridge_mdb_htable *old = mdb->old;
187
188 mdb->old = NULL;
189 kfree(old->mhash);
190 kfree(old);
191}
192
193static int br_mdb_copy(struct net_bridge_mdb_htable *new,
194 struct net_bridge_mdb_htable *old,
195 int elasticity)
196{
197 struct net_bridge_mdb_entry *mp;
198 int maxlen;
199 int len;
200 int i;
201
202 for (i = 0; i < old->max; i++)
203 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
204 hlist_add_head(&mp->hlist[new->ver],
205 &new->mhash[br_ip_hash(new, &mp->addr)]);
206
207 if (!elasticity)
208 return 0;
209
210 maxlen = 0;
211 for (i = 0; i < new->max; i++) {
212 len = 0;
213 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
214 len++;
215 if (len > maxlen)
216 maxlen = len;
217 }
218
219 return maxlen > elasticity ? -EINVAL : 0;
220}
221
222void br_multicast_free_pg(struct rcu_head *head)
223{
224 struct net_bridge_port_group *p =
225 container_of(head, struct net_bridge_port_group, rcu);
226
227 kfree(p);
228}
229
230static void br_multicast_free_group(struct rcu_head *head)
231{
232 struct net_bridge_mdb_entry *mp =
233 container_of(head, struct net_bridge_mdb_entry, rcu);
234
235 kfree(mp);
236}
237
238static void br_multicast_group_expired(unsigned long data)
239{
240 struct net_bridge_mdb_entry *mp = (void *)data;
241 struct net_bridge *br = mp->br;
242 struct net_bridge_mdb_htable *mdb;
243
244 spin_lock(&br->multicast_lock);
245 if (!netif_running(br->dev) || timer_pending(&mp->timer))
246 goto out;
247
248 mp->mglist = false;
249
250 if (mp->ports)
251 goto out;
252
253 mdb = mlock_dereference(br->mdb, br);
254
255 hlist_del_rcu(&mp->hlist[mdb->ver]);
256 mdb->size--;
257
258 call_rcu_bh(&mp->rcu, br_multicast_free_group);
259
260out:
261 spin_unlock(&br->multicast_lock);
262}
263
264static void br_multicast_del_pg(struct net_bridge *br,
265 struct net_bridge_port_group *pg)
266{
267 struct net_bridge_mdb_htable *mdb;
268 struct net_bridge_mdb_entry *mp;
269 struct net_bridge_port_group *p;
270 struct net_bridge_port_group __rcu **pp;
271
272 mdb = mlock_dereference(br->mdb, br);
273
274 mp = br_mdb_ip_get(mdb, &pg->addr);
275 if (WARN_ON(!mp))
276 return;
277
278 for (pp = &mp->ports;
279 (p = mlock_dereference(*pp, br)) != NULL;
280 pp = &p->next) {
281 if (p != pg)
282 continue;
283
284 rcu_assign_pointer(*pp, p->next);
285 hlist_del_init(&p->mglist);
286 del_timer(&p->timer);
287 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
288 p->flags);
289 call_rcu_bh(&p->rcu, br_multicast_free_pg);
290
291 if (!mp->ports && !mp->mglist &&
292 netif_running(br->dev))
293 mod_timer(&mp->timer, jiffies);
294
295 return;
296 }
297
298 WARN_ON(1);
299}
300
301static void br_multicast_port_group_expired(unsigned long data)
302{
303 struct net_bridge_port_group *pg = (void *)data;
304 struct net_bridge *br = pg->port->br;
305
306 spin_lock(&br->multicast_lock);
307 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
308 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
309 goto out;
310
311 br_multicast_del_pg(br, pg);
312
313out:
314 spin_unlock(&br->multicast_lock);
315}
316
317static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
318 int elasticity)
319{
320 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
321 struct net_bridge_mdb_htable *mdb;
322 int err;
323
324 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
325 if (!mdb)
326 return -ENOMEM;
327
328 mdb->max = max;
329 mdb->old = old;
330
331 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
332 if (!mdb->mhash) {
333 kfree(mdb);
334 return -ENOMEM;
335 }
336
337 mdb->size = old ? old->size : 0;
338 mdb->ver = old ? old->ver ^ 1 : 0;
339
340 if (!old || elasticity)
341 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
342 else
343 mdb->secret = old->secret;
344
345 if (!old)
346 goto out;
347
348 err = br_mdb_copy(mdb, old, elasticity);
349 if (err) {
350 kfree(mdb->mhash);
351 kfree(mdb);
352 return err;
353 }
354
355 br_mdb_rehash_seq++;
356 call_rcu_bh(&mdb->rcu, br_mdb_free);
357
358out:
359 rcu_assign_pointer(*mdbp, mdb);
360
361 return 0;
362}
363
364static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
365 __be32 group,
366 u8 *igmp_type)
367{
368 struct igmpv3_query *ihv3;
369 size_t igmp_hdr_size;
370 struct sk_buff *skb;
371 struct igmphdr *ih;
372 struct ethhdr *eth;
373 struct iphdr *iph;
374
375 igmp_hdr_size = sizeof(*ih);
376 if (br->multicast_igmp_version == 3)
377 igmp_hdr_size = sizeof(*ihv3);
378 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
379 igmp_hdr_size + 4);
380 if (!skb)
381 goto out;
382
383 skb->protocol = htons(ETH_P_IP);
384
385 skb_reset_mac_header(skb);
386 eth = eth_hdr(skb);
387
388 ether_addr_copy(eth->h_source, br->dev->dev_addr);
389 eth->h_dest[0] = 1;
390 eth->h_dest[1] = 0;
391 eth->h_dest[2] = 0x5e;
392 eth->h_dest[3] = 0;
393 eth->h_dest[4] = 0;
394 eth->h_dest[5] = 1;
395 eth->h_proto = htons(ETH_P_IP);
396 skb_put(skb, sizeof(*eth));
397
398 skb_set_network_header(skb, skb->len);
399 iph = ip_hdr(skb);
400
401 iph->version = 4;
402 iph->ihl = 6;
403 iph->tos = 0xc0;
404 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
405 iph->id = 0;
406 iph->frag_off = htons(IP_DF);
407 iph->ttl = 1;
408 iph->protocol = IPPROTO_IGMP;
409 iph->saddr = br->multicast_query_use_ifaddr ?
410 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
411 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
412 ((u8 *)&iph[1])[0] = IPOPT_RA;
413 ((u8 *)&iph[1])[1] = 4;
414 ((u8 *)&iph[1])[2] = 0;
415 ((u8 *)&iph[1])[3] = 0;
416 ip_send_check(iph);
417 skb_put(skb, 24);
418
419 skb_set_transport_header(skb, skb->len);
420 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
421
422 switch (br->multicast_igmp_version) {
423 case 2:
424 ih = igmp_hdr(skb);
425 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
426 ih->code = (group ? br->multicast_last_member_interval :
427 br->multicast_query_response_interval) /
428 (HZ / IGMP_TIMER_SCALE);
429 ih->group = group;
430 ih->csum = 0;
431 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
432 break;
433 case 3:
434 ihv3 = igmpv3_query_hdr(skb);
435 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
436 ihv3->code = (group ? br->multicast_last_member_interval :
437 br->multicast_query_response_interval) /
438 (HZ / IGMP_TIMER_SCALE);
439 ihv3->group = group;
440 ihv3->qqic = br->multicast_query_interval / HZ;
441 ihv3->nsrcs = 0;
442 ihv3->resv = 0;
443 ihv3->suppress = 0;
444 ihv3->qrv = 2;
445 ihv3->csum = 0;
446 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
447 break;
448 }
449
450 skb_put(skb, igmp_hdr_size);
451 __skb_pull(skb, sizeof(*eth));
452
453out:
454 return skb;
455}
456
457#if IS_ENABLED(CONFIG_IPV6)
458static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
459 const struct in6_addr *grp,
460 u8 *igmp_type)
461{
462 struct sk_buff *skb;
463 struct ipv6hdr *ip6h;
464 struct mld_msg *mldq;
465 struct ethhdr *eth;
466 u8 *hopopt;
467 unsigned long interval;
468
469 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
470 8 + sizeof(*mldq));
471 if (!skb)
472 goto out;
473
474 skb->protocol = htons(ETH_P_IPV6);
475
476 /* Ethernet header */
477 skb_reset_mac_header(skb);
478 eth = eth_hdr(skb);
479
480 ether_addr_copy(eth->h_source, br->dev->dev_addr);
481 eth->h_proto = htons(ETH_P_IPV6);
482 skb_put(skb, sizeof(*eth));
483
484 /* IPv6 header + HbH option */
485 skb_set_network_header(skb, skb->len);
486 ip6h = ipv6_hdr(skb);
487
488 *(__force __be32 *)ip6h = htonl(0x60000000);
489 ip6h->payload_len = htons(8 + sizeof(*mldq));
490 ip6h->nexthdr = IPPROTO_HOPOPTS;
491 ip6h->hop_limit = 1;
492 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
493 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
494 &ip6h->saddr)) {
495 kfree_skb(skb);
496 br->has_ipv6_addr = 0;
497 return NULL;
498 }
499
500 br->has_ipv6_addr = 1;
501 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
502
503 hopopt = (u8 *)(ip6h + 1);
504 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
505 hopopt[1] = 0; /* length of HbH */
506 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
507 hopopt[3] = 2; /* Length of RA Option */
508 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
509 hopopt[5] = 0;
510 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
511 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
512
513 skb_put(skb, sizeof(*ip6h) + 8);
514
515 /* ICMPv6 */
516 skb_set_transport_header(skb, skb->len);
517 mldq = (struct mld_msg *) icmp6_hdr(skb);
518
519 interval = ipv6_addr_any(grp) ?
520 br->multicast_query_response_interval :
521 br->multicast_last_member_interval;
522
523 *igmp_type = ICMPV6_MGM_QUERY;
524 mldq->mld_type = ICMPV6_MGM_QUERY;
525 mldq->mld_code = 0;
526 mldq->mld_cksum = 0;
527 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
528 mldq->mld_reserved = 0;
529 mldq->mld_mca = *grp;
530
531 /* checksum */
532 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
533 sizeof(*mldq), IPPROTO_ICMPV6,
534 csum_partial(mldq,
535 sizeof(*mldq), 0));
536 skb_put(skb, sizeof(*mldq));
537
538 __skb_pull(skb, sizeof(*eth));
539
540out:
541 return skb;
542}
543#endif
544
545static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
546 struct br_ip *addr,
547 u8 *igmp_type)
548{
549 switch (addr->proto) {
550 case htons(ETH_P_IP):
551 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
552#if IS_ENABLED(CONFIG_IPV6)
553 case htons(ETH_P_IPV6):
554 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
555 igmp_type);
556#endif
557 }
558 return NULL;
559}
560
561static struct net_bridge_mdb_entry *br_multicast_get_group(
562 struct net_bridge *br, struct net_bridge_port *port,
563 struct br_ip *group, int hash)
564{
565 struct net_bridge_mdb_htable *mdb;
566 struct net_bridge_mdb_entry *mp;
567 unsigned int count = 0;
568 unsigned int max;
569 int elasticity;
570 int err;
571
572 mdb = rcu_dereference_protected(br->mdb, 1);
573 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
574 count++;
575 if (unlikely(br_ip_equal(group, &mp->addr)))
576 return mp;
577 }
578
579 elasticity = 0;
580 max = mdb->max;
581
582 if (unlikely(count > br->hash_elasticity && count)) {
583 if (net_ratelimit())
584 br_info(br, "Multicast hash table "
585 "chain limit reached: %s\n",
586 port ? port->dev->name : br->dev->name);
587
588 elasticity = br->hash_elasticity;
589 }
590
591 if (mdb->size >= max) {
592 max *= 2;
593 if (unlikely(max > br->hash_max)) {
594 br_warn(br, "Multicast hash table maximum of %d "
595 "reached, disabling snooping: %s\n",
596 br->hash_max,
597 port ? port->dev->name : br->dev->name);
598 err = -E2BIG;
599disable:
600 br->multicast_disabled = 1;
601 goto err;
602 }
603 }
604
605 if (max > mdb->max || elasticity) {
606 if (mdb->old) {
607 if (net_ratelimit())
608 br_info(br, "Multicast hash table "
609 "on fire: %s\n",
610 port ? port->dev->name : br->dev->name);
611 err = -EEXIST;
612 goto err;
613 }
614
615 err = br_mdb_rehash(&br->mdb, max, elasticity);
616 if (err) {
617 br_warn(br, "Cannot rehash multicast "
618 "hash table, disabling snooping: %s, %d, %d\n",
619 port ? port->dev->name : br->dev->name,
620 mdb->size, err);
621 goto disable;
622 }
623
624 err = -EAGAIN;
625 goto err;
626 }
627
628 return NULL;
629
630err:
631 mp = ERR_PTR(err);
632 return mp;
633}
634
635struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
636 struct net_bridge_port *p,
637 struct br_ip *group)
638{
639 struct net_bridge_mdb_htable *mdb;
640 struct net_bridge_mdb_entry *mp;
641 int hash;
642 int err;
643
644 mdb = rcu_dereference_protected(br->mdb, 1);
645 if (!mdb) {
646 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
647 if (err)
648 return ERR_PTR(err);
649 goto rehash;
650 }
651
652 hash = br_ip_hash(mdb, group);
653 mp = br_multicast_get_group(br, p, group, hash);
654 switch (PTR_ERR(mp)) {
655 case 0:
656 break;
657
658 case -EAGAIN:
659rehash:
660 mdb = rcu_dereference_protected(br->mdb, 1);
661 hash = br_ip_hash(mdb, group);
662 break;
663
664 default:
665 goto out;
666 }
667
668 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
669 if (unlikely(!mp))
670 return ERR_PTR(-ENOMEM);
671
672 mp->br = br;
673 mp->addr = *group;
674 setup_timer(&mp->timer, br_multicast_group_expired,
675 (unsigned long)mp);
676
677 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
678 mdb->size++;
679
680out:
681 return mp;
682}
683
684struct net_bridge_port_group *br_multicast_new_port_group(
685 struct net_bridge_port *port,
686 struct br_ip *group,
687 struct net_bridge_port_group __rcu *next,
688 unsigned char flags)
689{
690 struct net_bridge_port_group *p;
691
692 p = kzalloc(sizeof(*p), GFP_ATOMIC);
693 if (unlikely(!p))
694 return NULL;
695
696 p->addr = *group;
697 p->port = port;
698 p->flags = flags;
699 rcu_assign_pointer(p->next, next);
700 hlist_add_head(&p->mglist, &port->mglist);
701 setup_timer(&p->timer, br_multicast_port_group_expired,
702 (unsigned long)p);
703 return p;
704}
705
706static int br_multicast_add_group(struct net_bridge *br,
707 struct net_bridge_port *port,
708 struct br_ip *group)
709{
710 struct net_bridge_port_group __rcu **pp;
711 struct net_bridge_port_group *p;
712 struct net_bridge_mdb_entry *mp;
713 unsigned long now = jiffies;
714 int err;
715
716 spin_lock(&br->multicast_lock);
717 if (!netif_running(br->dev) ||
718 (port && port->state == BR_STATE_DISABLED))
719 goto out;
720
721 mp = br_multicast_new_group(br, port, group);
722 err = PTR_ERR(mp);
723 if (IS_ERR(mp))
724 goto err;
725
726 if (!port) {
727 mp->mglist = true;
728 mod_timer(&mp->timer, now + br->multicast_membership_interval);
729 goto out;
730 }
731
732 for (pp = &mp->ports;
733 (p = mlock_dereference(*pp, br)) != NULL;
734 pp = &p->next) {
735 if (p->port == port)
736 goto found;
737 if ((unsigned long)p->port < (unsigned long)port)
738 break;
739 }
740
741 p = br_multicast_new_port_group(port, group, *pp, 0);
742 if (unlikely(!p))
743 goto err;
744 rcu_assign_pointer(*pp, p);
745 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
746
747found:
748 mod_timer(&p->timer, now + br->multicast_membership_interval);
749out:
750 err = 0;
751
752err:
753 spin_unlock(&br->multicast_lock);
754 return err;
755}
756
757static int br_ip4_multicast_add_group(struct net_bridge *br,
758 struct net_bridge_port *port,
759 __be32 group,
760 __u16 vid)
761{
762 struct br_ip br_group;
763
764 if (ipv4_is_local_multicast(group))
765 return 0;
766
767 br_group.u.ip4 = group;
768 br_group.proto = htons(ETH_P_IP);
769 br_group.vid = vid;
770
771 return br_multicast_add_group(br, port, &br_group);
772}
773
774#if IS_ENABLED(CONFIG_IPV6)
775static int br_ip6_multicast_add_group(struct net_bridge *br,
776 struct net_bridge_port *port,
777 const struct in6_addr *group,
778 __u16 vid)
779{
780 struct br_ip br_group;
781
782 if (ipv6_addr_is_ll_all_nodes(group))
783 return 0;
784
785 br_group.u.ip6 = *group;
786 br_group.proto = htons(ETH_P_IPV6);
787 br_group.vid = vid;
788
789 return br_multicast_add_group(br, port, &br_group);
790}
791#endif
792
793static void br_multicast_router_expired(unsigned long data)
794{
795 struct net_bridge_port *port = (void *)data;
796 struct net_bridge *br = port->br;
797
798 spin_lock(&br->multicast_lock);
799 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
800 port->multicast_router == MDB_RTR_TYPE_PERM ||
801 timer_pending(&port->multicast_router_timer) ||
802 hlist_unhashed(&port->rlist))
803 goto out;
804
805 hlist_del_init_rcu(&port->rlist);
806 br_rtr_notify(br->dev, port, RTM_DELMDB);
807 /* Don't allow timer refresh if the router expired */
808 if (port->multicast_router == MDB_RTR_TYPE_TEMP)
809 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
810
811out:
812 spin_unlock(&br->multicast_lock);
813}
814
815static void br_multicast_local_router_expired(unsigned long data)
816{
817}
818
819static void br_multicast_querier_expired(struct net_bridge *br,
820 struct bridge_mcast_own_query *query)
821{
822 spin_lock(&br->multicast_lock);
823 if (!netif_running(br->dev) || br->multicast_disabled)
824 goto out;
825
826 br_multicast_start_querier(br, query);
827
828out:
829 spin_unlock(&br->multicast_lock);
830}
831
832static void br_ip4_multicast_querier_expired(unsigned long data)
833{
834 struct net_bridge *br = (void *)data;
835
836 br_multicast_querier_expired(br, &br->ip4_own_query);
837}
838
839#if IS_ENABLED(CONFIG_IPV6)
840static void br_ip6_multicast_querier_expired(unsigned long data)
841{
842 struct net_bridge *br = (void *)data;
843
844 br_multicast_querier_expired(br, &br->ip6_own_query);
845}
846#endif
847
848static void br_multicast_select_own_querier(struct net_bridge *br,
849 struct br_ip *ip,
850 struct sk_buff *skb)
851{
852 if (ip->proto == htons(ETH_P_IP))
853 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
854#if IS_ENABLED(CONFIG_IPV6)
855 else
856 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
857#endif
858}
859
860static void __br_multicast_send_query(struct net_bridge *br,
861 struct net_bridge_port *port,
862 struct br_ip *ip)
863{
864 struct sk_buff *skb;
865 u8 igmp_type;
866
867 skb = br_multicast_alloc_query(br, ip, &igmp_type);
868 if (!skb)
869 return;
870
871 if (port) {
872 skb->dev = port->dev;
873 br_multicast_count(br, port, skb, igmp_type,
874 BR_MCAST_DIR_TX);
875 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
876 dev_net(port->dev), NULL, skb, NULL, skb->dev,
877 br_dev_queue_push_xmit);
878 } else {
879 br_multicast_select_own_querier(br, ip, skb);
880 br_multicast_count(br, port, skb, igmp_type,
881 BR_MCAST_DIR_RX);
882 netif_rx(skb);
883 }
884}
885
886static void br_multicast_send_query(struct net_bridge *br,
887 struct net_bridge_port *port,
888 struct bridge_mcast_own_query *own_query)
889{
890 struct bridge_mcast_other_query *other_query = NULL;
891 struct br_ip br_group;
892 unsigned long time;
893
894 if (!netif_running(br->dev) || br->multicast_disabled ||
895 !br->multicast_querier)
896 return;
897
898 memset(&br_group.u, 0, sizeof(br_group.u));
899
900 if (port ? (own_query == &port->ip4_own_query) :
901 (own_query == &br->ip4_own_query)) {
902 other_query = &br->ip4_other_query;
903 br_group.proto = htons(ETH_P_IP);
904#if IS_ENABLED(CONFIG_IPV6)
905 } else {
906 other_query = &br->ip6_other_query;
907 br_group.proto = htons(ETH_P_IPV6);
908#endif
909 }
910
911 if (!other_query || timer_pending(&other_query->timer))
912 return;
913
914 __br_multicast_send_query(br, port, &br_group);
915
916 time = jiffies;
917 time += own_query->startup_sent < br->multicast_startup_query_count ?
918 br->multicast_startup_query_interval :
919 br->multicast_query_interval;
920 mod_timer(&own_query->timer, time);
921}
922
923static void
924br_multicast_port_query_expired(struct net_bridge_port *port,
925 struct bridge_mcast_own_query *query)
926{
927 struct net_bridge *br = port->br;
928
929 spin_lock(&br->multicast_lock);
930 if (port->state == BR_STATE_DISABLED ||
931 port->state == BR_STATE_BLOCKING)
932 goto out;
933
934 if (query->startup_sent < br->multicast_startup_query_count)
935 query->startup_sent++;
936
937 br_multicast_send_query(port->br, port, query);
938
939out:
940 spin_unlock(&br->multicast_lock);
941}
942
943static void br_ip4_multicast_port_query_expired(unsigned long data)
944{
945 struct net_bridge_port *port = (void *)data;
946
947 br_multicast_port_query_expired(port, &port->ip4_own_query);
948}
949
950#if IS_ENABLED(CONFIG_IPV6)
951static void br_ip6_multicast_port_query_expired(unsigned long data)
952{
953 struct net_bridge_port *port = (void *)data;
954
955 br_multicast_port_query_expired(port, &port->ip6_own_query);
956}
957#endif
958
959int br_multicast_add_port(struct net_bridge_port *port)
960{
961 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
962
963 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
964 (unsigned long)port);
965 setup_timer(&port->ip4_own_query.timer,
966 br_ip4_multicast_port_query_expired, (unsigned long)port);
967#if IS_ENABLED(CONFIG_IPV6)
968 setup_timer(&port->ip6_own_query.timer,
969 br_ip6_multicast_port_query_expired, (unsigned long)port);
970#endif
971 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
972 if (!port->mcast_stats)
973 return -ENOMEM;
974
975 return 0;
976}
977
978void br_multicast_del_port(struct net_bridge_port *port)
979{
980 struct net_bridge *br = port->br;
981 struct net_bridge_port_group *pg;
982 struct hlist_node *n;
983
984 /* Take care of the remaining groups, only perm ones should be left */
985 spin_lock_bh(&br->multicast_lock);
986 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
987 br_multicast_del_pg(br, pg);
988 spin_unlock_bh(&br->multicast_lock);
989 del_timer_sync(&port->multicast_router_timer);
990 free_percpu(port->mcast_stats);
991}
992
993static void br_multicast_enable(struct bridge_mcast_own_query *query)
994{
995 query->startup_sent = 0;
996
997 if (try_to_del_timer_sync(&query->timer) >= 0 ||
998 del_timer(&query->timer))
999 mod_timer(&query->timer, jiffies);
1000}
1001
1002static void __br_multicast_enable_port(struct net_bridge_port *port)
1003{
1004 struct net_bridge *br = port->br;
1005
1006 if (br->multicast_disabled || !netif_running(br->dev))
1007 return;
1008
1009 br_multicast_enable(&port->ip4_own_query);
1010#if IS_ENABLED(CONFIG_IPV6)
1011 br_multicast_enable(&port->ip6_own_query);
1012#endif
1013 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1014 hlist_unhashed(&port->rlist))
1015 br_multicast_add_router(br, port);
1016}
1017
1018void br_multicast_enable_port(struct net_bridge_port *port)
1019{
1020 struct net_bridge *br = port->br;
1021
1022 spin_lock(&br->multicast_lock);
1023 __br_multicast_enable_port(port);
1024 spin_unlock(&br->multicast_lock);
1025}
1026
1027void br_multicast_disable_port(struct net_bridge_port *port)
1028{
1029 struct net_bridge *br = port->br;
1030 struct net_bridge_port_group *pg;
1031 struct hlist_node *n;
1032
1033 spin_lock(&br->multicast_lock);
1034 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1035 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1036 br_multicast_del_pg(br, pg);
1037
1038 if (!hlist_unhashed(&port->rlist)) {
1039 hlist_del_init_rcu(&port->rlist);
1040 br_rtr_notify(br->dev, port, RTM_DELMDB);
1041 /* Don't allow timer refresh if disabling */
1042 if (port->multicast_router == MDB_RTR_TYPE_TEMP)
1043 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1044 }
1045 del_timer(&port->multicast_router_timer);
1046 del_timer(&port->ip4_own_query.timer);
1047#if IS_ENABLED(CONFIG_IPV6)
1048 del_timer(&port->ip6_own_query.timer);
1049#endif
1050 spin_unlock(&br->multicast_lock);
1051}
1052
1053static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1054 struct net_bridge_port *port,
1055 struct sk_buff *skb,
1056 u16 vid)
1057{
1058 struct igmpv3_report *ih;
1059 struct igmpv3_grec *grec;
1060 int i;
1061 int len;
1062 int num;
1063 int type;
1064 int err = 0;
1065 __be32 group;
1066
1067 ih = igmpv3_report_hdr(skb);
1068 num = ntohs(ih->ngrec);
1069 len = skb_transport_offset(skb) + sizeof(*ih);
1070
1071 for (i = 0; i < num; i++) {
1072 len += sizeof(*grec);
1073 if (!pskb_may_pull(skb, len))
1074 return -EINVAL;
1075
1076 grec = (void *)(skb->data + len - sizeof(*grec));
1077 group = grec->grec_mca;
1078 type = grec->grec_type;
1079
1080 len += ntohs(grec->grec_nsrcs) * 4;
1081 if (!pskb_may_pull(skb, len))
1082 return -EINVAL;
1083
1084 /* We treat this as an IGMPv2 report for now. */
1085 switch (type) {
1086 case IGMPV3_MODE_IS_INCLUDE:
1087 case IGMPV3_MODE_IS_EXCLUDE:
1088 case IGMPV3_CHANGE_TO_INCLUDE:
1089 case IGMPV3_CHANGE_TO_EXCLUDE:
1090 case IGMPV3_ALLOW_NEW_SOURCES:
1091 case IGMPV3_BLOCK_OLD_SOURCES:
1092 break;
1093
1094 default:
1095 continue;
1096 }
1097
1098 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1099 type == IGMPV3_MODE_IS_INCLUDE) &&
1100 ntohs(grec->grec_nsrcs) == 0) {
1101 br_ip4_multicast_leave_group(br, port, group, vid);
1102 } else {
1103 err = br_ip4_multicast_add_group(br, port, group, vid);
1104 if (err)
1105 break;
1106 }
1107 }
1108
1109 return err;
1110}
1111
1112#if IS_ENABLED(CONFIG_IPV6)
1113static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1114 struct net_bridge_port *port,
1115 struct sk_buff *skb,
1116 u16 vid)
1117{
1118 struct icmp6hdr *icmp6h;
1119 struct mld2_grec *grec;
1120 int i;
1121 int len;
1122 int num;
1123 int err = 0;
1124
1125 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
1126 return -EINVAL;
1127
1128 icmp6h = icmp6_hdr(skb);
1129 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1130 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1131
1132 for (i = 0; i < num; i++) {
1133 __be16 *nsrcs, _nsrcs;
1134
1135 nsrcs = skb_header_pointer(skb,
1136 len + offsetof(struct mld2_grec,
1137 grec_nsrcs),
1138 sizeof(_nsrcs), &_nsrcs);
1139 if (!nsrcs)
1140 return -EINVAL;
1141
1142 if (!pskb_may_pull(skb,
1143 len + sizeof(*grec) +
1144 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1145 return -EINVAL;
1146
1147 grec = (struct mld2_grec *)(skb->data + len);
1148 len += sizeof(*grec) +
1149 sizeof(struct in6_addr) * ntohs(*nsrcs);
1150
1151 /* We treat these as MLDv1 reports for now. */
1152 switch (grec->grec_type) {
1153 case MLD2_MODE_IS_INCLUDE:
1154 case MLD2_MODE_IS_EXCLUDE:
1155 case MLD2_CHANGE_TO_INCLUDE:
1156 case MLD2_CHANGE_TO_EXCLUDE:
1157 case MLD2_ALLOW_NEW_SOURCES:
1158 case MLD2_BLOCK_OLD_SOURCES:
1159 break;
1160
1161 default:
1162 continue;
1163 }
1164
1165 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1166 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1167 ntohs(*nsrcs) == 0) {
1168 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1169 vid);
1170 } else {
1171 err = br_ip6_multicast_add_group(br, port,
1172 &grec->grec_mca, vid);
1173 if (err)
1174 break;
1175 }
1176 }
1177
1178 return err;
1179}
1180#endif
1181
1182static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1183 struct net_bridge_port *port,
1184 __be32 saddr)
1185{
1186 if (!timer_pending(&br->ip4_own_query.timer) &&
1187 !timer_pending(&br->ip4_other_query.timer))
1188 goto update;
1189
1190 if (!br->ip4_querier.addr.u.ip4)
1191 goto update;
1192
1193 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1194 goto update;
1195
1196 return false;
1197
1198update:
1199 br->ip4_querier.addr.u.ip4 = saddr;
1200
1201 /* update protected by general multicast_lock by caller */
1202 rcu_assign_pointer(br->ip4_querier.port, port);
1203
1204 return true;
1205}
1206
1207#if IS_ENABLED(CONFIG_IPV6)
1208static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1209 struct net_bridge_port *port,
1210 struct in6_addr *saddr)
1211{
1212 if (!timer_pending(&br->ip6_own_query.timer) &&
1213 !timer_pending(&br->ip6_other_query.timer))
1214 goto update;
1215
1216 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1217 goto update;
1218
1219 return false;
1220
1221update:
1222 br->ip6_querier.addr.u.ip6 = *saddr;
1223
1224 /* update protected by general multicast_lock by caller */
1225 rcu_assign_pointer(br->ip6_querier.port, port);
1226
1227 return true;
1228}
1229#endif
1230
1231static bool br_multicast_select_querier(struct net_bridge *br,
1232 struct net_bridge_port *port,
1233 struct br_ip *saddr)
1234{
1235 switch (saddr->proto) {
1236 case htons(ETH_P_IP):
1237 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1238#if IS_ENABLED(CONFIG_IPV6)
1239 case htons(ETH_P_IPV6):
1240 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1241#endif
1242 }
1243
1244 return false;
1245}
1246
1247static void
1248br_multicast_update_query_timer(struct net_bridge *br,
1249 struct bridge_mcast_other_query *query,
1250 unsigned long max_delay)
1251{
1252 if (!timer_pending(&query->timer))
1253 query->delay_time = jiffies + max_delay;
1254
1255 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1256}
1257
1258/*
1259 * Add port to router_list
1260 * list is maintained ordered by pointer value
1261 * and locked by br->multicast_lock and RCU
1262 */
1263static void br_multicast_add_router(struct net_bridge *br,
1264 struct net_bridge_port *port)
1265{
1266 struct net_bridge_port *p;
1267 struct hlist_node *slot = NULL;
1268
1269 if (!hlist_unhashed(&port->rlist))
1270 return;
1271
1272 hlist_for_each_entry(p, &br->router_list, rlist) {
1273 if ((unsigned long) port >= (unsigned long) p)
1274 break;
1275 slot = &p->rlist;
1276 }
1277
1278 if (slot)
1279 hlist_add_behind_rcu(&port->rlist, slot);
1280 else
1281 hlist_add_head_rcu(&port->rlist, &br->router_list);
1282 br_rtr_notify(br->dev, port, RTM_NEWMDB);
1283}
1284
1285static void br_multicast_mark_router(struct net_bridge *br,
1286 struct net_bridge_port *port)
1287{
1288 unsigned long now = jiffies;
1289
1290 if (!port) {
1291 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY)
1292 mod_timer(&br->multicast_router_timer,
1293 now + br->multicast_querier_interval);
1294 return;
1295 }
1296
1297 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1298 port->multicast_router == MDB_RTR_TYPE_PERM)
1299 return;
1300
1301 br_multicast_add_router(br, port);
1302
1303 mod_timer(&port->multicast_router_timer,
1304 now + br->multicast_querier_interval);
1305}
1306
1307static void br_multicast_query_received(struct net_bridge *br,
1308 struct net_bridge_port *port,
1309 struct bridge_mcast_other_query *query,
1310 struct br_ip *saddr,
1311 unsigned long max_delay)
1312{
1313 if (!br_multicast_select_querier(br, port, saddr))
1314 return;
1315
1316 br_multicast_update_query_timer(br, query, max_delay);
1317 br_multicast_mark_router(br, port);
1318}
1319
1320static int br_ip4_multicast_query(struct net_bridge *br,
1321 struct net_bridge_port *port,
1322 struct sk_buff *skb,
1323 u16 vid)
1324{
1325 const struct iphdr *iph = ip_hdr(skb);
1326 struct igmphdr *ih = igmp_hdr(skb);
1327 struct net_bridge_mdb_entry *mp;
1328 struct igmpv3_query *ih3;
1329 struct net_bridge_port_group *p;
1330 struct net_bridge_port_group __rcu **pp;
1331 struct br_ip saddr;
1332 unsigned long max_delay;
1333 unsigned long now = jiffies;
1334 unsigned int offset = skb_transport_offset(skb);
1335 __be32 group;
1336 int err = 0;
1337
1338 spin_lock(&br->multicast_lock);
1339 if (!netif_running(br->dev) ||
1340 (port && port->state == BR_STATE_DISABLED))
1341 goto out;
1342
1343 group = ih->group;
1344
1345 if (skb->len == offset + sizeof(*ih)) {
1346 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1347
1348 if (!max_delay) {
1349 max_delay = 10 * HZ;
1350 group = 0;
1351 }
1352 } else if (skb->len >= offset + sizeof(*ih3)) {
1353 ih3 = igmpv3_query_hdr(skb);
1354 if (ih3->nsrcs)
1355 goto out;
1356
1357 max_delay = ih3->code ?
1358 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1359 } else {
1360 goto out;
1361 }
1362
1363 if (!group) {
1364 saddr.proto = htons(ETH_P_IP);
1365 saddr.u.ip4 = iph->saddr;
1366
1367 br_multicast_query_received(br, port, &br->ip4_other_query,
1368 &saddr, max_delay);
1369 goto out;
1370 }
1371
1372 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1373 if (!mp)
1374 goto out;
1375
1376 max_delay *= br->multicast_last_member_count;
1377
1378 if (mp->mglist &&
1379 (timer_pending(&mp->timer) ?
1380 time_after(mp->timer.expires, now + max_delay) :
1381 try_to_del_timer_sync(&mp->timer) >= 0))
1382 mod_timer(&mp->timer, now + max_delay);
1383
1384 for (pp = &mp->ports;
1385 (p = mlock_dereference(*pp, br)) != NULL;
1386 pp = &p->next) {
1387 if (timer_pending(&p->timer) ?
1388 time_after(p->timer.expires, now + max_delay) :
1389 try_to_del_timer_sync(&p->timer) >= 0)
1390 mod_timer(&p->timer, now + max_delay);
1391 }
1392
1393out:
1394 spin_unlock(&br->multicast_lock);
1395 return err;
1396}
1397
1398#if IS_ENABLED(CONFIG_IPV6)
1399static int br_ip6_multicast_query(struct net_bridge *br,
1400 struct net_bridge_port *port,
1401 struct sk_buff *skb,
1402 u16 vid)
1403{
1404 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1405 struct mld_msg *mld;
1406 struct net_bridge_mdb_entry *mp;
1407 struct mld2_query *mld2q;
1408 struct net_bridge_port_group *p;
1409 struct net_bridge_port_group __rcu **pp;
1410 struct br_ip saddr;
1411 unsigned long max_delay;
1412 unsigned long now = jiffies;
1413 unsigned int offset = skb_transport_offset(skb);
1414 const struct in6_addr *group = NULL;
1415 bool is_general_query;
1416 int err = 0;
1417
1418 spin_lock(&br->multicast_lock);
1419 if (!netif_running(br->dev) ||
1420 (port && port->state == BR_STATE_DISABLED))
1421 goto out;
1422
1423 if (skb->len == offset + sizeof(*mld)) {
1424 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1425 err = -EINVAL;
1426 goto out;
1427 }
1428 mld = (struct mld_msg *) icmp6_hdr(skb);
1429 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1430 if (max_delay)
1431 group = &mld->mld_mca;
1432 } else {
1433 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1434 err = -EINVAL;
1435 goto out;
1436 }
1437 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1438 if (!mld2q->mld2q_nsrcs)
1439 group = &mld2q->mld2q_mca;
1440
1441 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1442 }
1443
1444 is_general_query = group && ipv6_addr_any(group);
1445
1446 if (is_general_query) {
1447 saddr.proto = htons(ETH_P_IPV6);
1448 saddr.u.ip6 = ip6h->saddr;
1449
1450 br_multicast_query_received(br, port, &br->ip6_other_query,
1451 &saddr, max_delay);
1452 goto out;
1453 } else if (!group) {
1454 goto out;
1455 }
1456
1457 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1458 if (!mp)
1459 goto out;
1460
1461 max_delay *= br->multicast_last_member_count;
1462 if (mp->mglist &&
1463 (timer_pending(&mp->timer) ?
1464 time_after(mp->timer.expires, now + max_delay) :
1465 try_to_del_timer_sync(&mp->timer) >= 0))
1466 mod_timer(&mp->timer, now + max_delay);
1467
1468 for (pp = &mp->ports;
1469 (p = mlock_dereference(*pp, br)) != NULL;
1470 pp = &p->next) {
1471 if (timer_pending(&p->timer) ?
1472 time_after(p->timer.expires, now + max_delay) :
1473 try_to_del_timer_sync(&p->timer) >= 0)
1474 mod_timer(&p->timer, now + max_delay);
1475 }
1476
1477out:
1478 spin_unlock(&br->multicast_lock);
1479 return err;
1480}
1481#endif
1482
1483static void
1484br_multicast_leave_group(struct net_bridge *br,
1485 struct net_bridge_port *port,
1486 struct br_ip *group,
1487 struct bridge_mcast_other_query *other_query,
1488 struct bridge_mcast_own_query *own_query)
1489{
1490 struct net_bridge_mdb_htable *mdb;
1491 struct net_bridge_mdb_entry *mp;
1492 struct net_bridge_port_group *p;
1493 unsigned long now;
1494 unsigned long time;
1495
1496 spin_lock(&br->multicast_lock);
1497 if (!netif_running(br->dev) ||
1498 (port && port->state == BR_STATE_DISABLED))
1499 goto out;
1500
1501 mdb = mlock_dereference(br->mdb, br);
1502 mp = br_mdb_ip_get(mdb, group);
1503 if (!mp)
1504 goto out;
1505
1506 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1507 struct net_bridge_port_group __rcu **pp;
1508
1509 for (pp = &mp->ports;
1510 (p = mlock_dereference(*pp, br)) != NULL;
1511 pp = &p->next) {
1512 if (p->port != port)
1513 continue;
1514
1515 rcu_assign_pointer(*pp, p->next);
1516 hlist_del_init(&p->mglist);
1517 del_timer(&p->timer);
1518 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1519 br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1520 p->flags);
1521
1522 if (!mp->ports && !mp->mglist &&
1523 netif_running(br->dev))
1524 mod_timer(&mp->timer, jiffies);
1525 }
1526 goto out;
1527 }
1528
1529 if (timer_pending(&other_query->timer))
1530 goto out;
1531
1532 if (br->multicast_querier) {
1533 __br_multicast_send_query(br, port, &mp->addr);
1534
1535 time = jiffies + br->multicast_last_member_count *
1536 br->multicast_last_member_interval;
1537
1538 mod_timer(&own_query->timer, time);
1539
1540 for (p = mlock_dereference(mp->ports, br);
1541 p != NULL;
1542 p = mlock_dereference(p->next, br)) {
1543 if (p->port != port)
1544 continue;
1545
1546 if (!hlist_unhashed(&p->mglist) &&
1547 (timer_pending(&p->timer) ?
1548 time_after(p->timer.expires, time) :
1549 try_to_del_timer_sync(&p->timer) >= 0)) {
1550 mod_timer(&p->timer, time);
1551 }
1552
1553 break;
1554 }
1555 }
1556
1557 now = jiffies;
1558 time = now + br->multicast_last_member_count *
1559 br->multicast_last_member_interval;
1560
1561 if (!port) {
1562 if (mp->mglist &&
1563 (timer_pending(&mp->timer) ?
1564 time_after(mp->timer.expires, time) :
1565 try_to_del_timer_sync(&mp->timer) >= 0)) {
1566 mod_timer(&mp->timer, time);
1567 }
1568
1569 goto out;
1570 }
1571
1572 for (p = mlock_dereference(mp->ports, br);
1573 p != NULL;
1574 p = mlock_dereference(p->next, br)) {
1575 if (p->port != port)
1576 continue;
1577
1578 if (!hlist_unhashed(&p->mglist) &&
1579 (timer_pending(&p->timer) ?
1580 time_after(p->timer.expires, time) :
1581 try_to_del_timer_sync(&p->timer) >= 0)) {
1582 mod_timer(&p->timer, time);
1583 }
1584
1585 break;
1586 }
1587out:
1588 spin_unlock(&br->multicast_lock);
1589}
1590
1591static void br_ip4_multicast_leave_group(struct net_bridge *br,
1592 struct net_bridge_port *port,
1593 __be32 group,
1594 __u16 vid)
1595{
1596 struct br_ip br_group;
1597 struct bridge_mcast_own_query *own_query;
1598
1599 if (ipv4_is_local_multicast(group))
1600 return;
1601
1602 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1603
1604 br_group.u.ip4 = group;
1605 br_group.proto = htons(ETH_P_IP);
1606 br_group.vid = vid;
1607
1608 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1609 own_query);
1610}
1611
1612#if IS_ENABLED(CONFIG_IPV6)
1613static void br_ip6_multicast_leave_group(struct net_bridge *br,
1614 struct net_bridge_port *port,
1615 const struct in6_addr *group,
1616 __u16 vid)
1617{
1618 struct br_ip br_group;
1619 struct bridge_mcast_own_query *own_query;
1620
1621 if (ipv6_addr_is_ll_all_nodes(group))
1622 return;
1623
1624 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1625
1626 br_group.u.ip6 = *group;
1627 br_group.proto = htons(ETH_P_IPV6);
1628 br_group.vid = vid;
1629
1630 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1631 own_query);
1632}
1633#endif
1634
1635static void br_multicast_err_count(const struct net_bridge *br,
1636 const struct net_bridge_port *p,
1637 __be16 proto)
1638{
1639 struct bridge_mcast_stats __percpu *stats;
1640 struct bridge_mcast_stats *pstats;
1641
1642 if (!br->multicast_stats_enabled)
1643 return;
1644
1645 if (p)
1646 stats = p->mcast_stats;
1647 else
1648 stats = br->mcast_stats;
1649 if (WARN_ON(!stats))
1650 return;
1651
1652 pstats = this_cpu_ptr(stats);
1653
1654 u64_stats_update_begin(&pstats->syncp);
1655 switch (proto) {
1656 case htons(ETH_P_IP):
1657 pstats->mstats.igmp_parse_errors++;
1658 break;
1659#if IS_ENABLED(CONFIG_IPV6)
1660 case htons(ETH_P_IPV6):
1661 pstats->mstats.mld_parse_errors++;
1662 break;
1663#endif
1664 }
1665 u64_stats_update_end(&pstats->syncp);
1666}
1667
1668static void br_multicast_pim(struct net_bridge *br,
1669 struct net_bridge_port *port,
1670 const struct sk_buff *skb)
1671{
1672 unsigned int offset = skb_transport_offset(skb);
1673 struct pimhdr *pimhdr, _pimhdr;
1674
1675 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1676 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1677 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1678 return;
1679
1680 br_multicast_mark_router(br, port);
1681}
1682
1683static int br_multicast_ipv4_rcv(struct net_bridge *br,
1684 struct net_bridge_port *port,
1685 struct sk_buff *skb,
1686 u16 vid)
1687{
1688 struct sk_buff *skb_trimmed = NULL;
1689 struct igmphdr *ih;
1690 int err;
1691
1692 err = ip_mc_check_igmp(skb, &skb_trimmed);
1693
1694 if (err == -ENOMSG) {
1695 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1696 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1697 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1698 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1699 br_multicast_pim(br, port, skb);
1700 }
1701 return 0;
1702 } else if (err < 0) {
1703 br_multicast_err_count(br, port, skb->protocol);
1704 return err;
1705 }
1706
1707 ih = igmp_hdr(skb);
1708 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1709
1710 switch (ih->type) {
1711 case IGMP_HOST_MEMBERSHIP_REPORT:
1712 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1713 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1714 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1715 break;
1716 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1717 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
1718 break;
1719 case IGMP_HOST_MEMBERSHIP_QUERY:
1720 err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
1721 break;
1722 case IGMP_HOST_LEAVE_MESSAGE:
1723 br_ip4_multicast_leave_group(br, port, ih->group, vid);
1724 break;
1725 }
1726
1727 if (skb_trimmed && skb_trimmed != skb)
1728 kfree_skb(skb_trimmed);
1729
1730 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1731 BR_MCAST_DIR_RX);
1732
1733 return err;
1734}
1735
1736#if IS_ENABLED(CONFIG_IPV6)
1737static int br_multicast_ipv6_rcv(struct net_bridge *br,
1738 struct net_bridge_port *port,
1739 struct sk_buff *skb,
1740 u16 vid)
1741{
1742 struct sk_buff *skb_trimmed = NULL;
1743 struct mld_msg *mld;
1744 int err;
1745
1746 err = ipv6_mc_check_mld(skb, &skb_trimmed);
1747
1748 if (err == -ENOMSG) {
1749 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1750 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1751 return 0;
1752 } else if (err < 0) {
1753 br_multicast_err_count(br, port, skb->protocol);
1754 return err;
1755 }
1756
1757 mld = (struct mld_msg *)skb_transport_header(skb);
1758 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1759
1760 switch (mld->mld_type) {
1761 case ICMPV6_MGM_REPORT:
1762 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1763 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
1764 break;
1765 case ICMPV6_MLD2_REPORT:
1766 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
1767 break;
1768 case ICMPV6_MGM_QUERY:
1769 err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
1770 break;
1771 case ICMPV6_MGM_REDUCTION:
1772 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
1773 break;
1774 }
1775
1776 if (skb_trimmed && skb_trimmed != skb)
1777 kfree_skb(skb_trimmed);
1778
1779 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1780 BR_MCAST_DIR_RX);
1781
1782 return err;
1783}
1784#endif
1785
1786int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1787 struct sk_buff *skb, u16 vid)
1788{
1789 int ret = 0;
1790
1791 BR_INPUT_SKB_CB(skb)->igmp = 0;
1792 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1793
1794 if (br->multicast_disabled)
1795 return 0;
1796
1797 switch (skb->protocol) {
1798 case htons(ETH_P_IP):
1799 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1800 break;
1801#if IS_ENABLED(CONFIG_IPV6)
1802 case htons(ETH_P_IPV6):
1803 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1804 break;
1805#endif
1806 }
1807
1808 return ret;
1809}
1810
1811static void br_multicast_query_expired(struct net_bridge *br,
1812 struct bridge_mcast_own_query *query,
1813 struct bridge_mcast_querier *querier)
1814{
1815 spin_lock(&br->multicast_lock);
1816 if (query->startup_sent < br->multicast_startup_query_count)
1817 query->startup_sent++;
1818
1819 RCU_INIT_POINTER(querier->port, NULL);
1820 br_multicast_send_query(br, NULL, query);
1821 spin_unlock(&br->multicast_lock);
1822}
1823
1824static void br_ip4_multicast_query_expired(unsigned long data)
1825{
1826 struct net_bridge *br = (void *)data;
1827
1828 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1829}
1830
1831#if IS_ENABLED(CONFIG_IPV6)
1832static void br_ip6_multicast_query_expired(unsigned long data)
1833{
1834 struct net_bridge *br = (void *)data;
1835
1836 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1837}
1838#endif
1839
1840void br_multicast_init(struct net_bridge *br)
1841{
1842 br->hash_elasticity = 4;
1843 br->hash_max = 512;
1844
1845 br->multicast_igmp_version = 2;
1846 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1847 br->multicast_querier = 0;
1848 br->multicast_query_use_ifaddr = 0;
1849 br->multicast_last_member_count = 2;
1850 br->multicast_startup_query_count = 2;
1851
1852 br->multicast_last_member_interval = HZ;
1853 br->multicast_query_response_interval = 10 * HZ;
1854 br->multicast_startup_query_interval = 125 * HZ / 4;
1855 br->multicast_query_interval = 125 * HZ;
1856 br->multicast_querier_interval = 255 * HZ;
1857 br->multicast_membership_interval = 260 * HZ;
1858
1859 br->ip4_other_query.delay_time = 0;
1860 br->ip4_querier.port = NULL;
1861#if IS_ENABLED(CONFIG_IPV6)
1862 br->ip6_other_query.delay_time = 0;
1863 br->ip6_querier.port = NULL;
1864#endif
1865 br->has_ipv6_addr = 1;
1866
1867 spin_lock_init(&br->multicast_lock);
1868 setup_timer(&br->multicast_router_timer,
1869 br_multicast_local_router_expired, 0);
1870 setup_timer(&br->ip4_other_query.timer,
1871 br_ip4_multicast_querier_expired, (unsigned long)br);
1872 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1873 (unsigned long)br);
1874#if IS_ENABLED(CONFIG_IPV6)
1875 setup_timer(&br->ip6_other_query.timer,
1876 br_ip6_multicast_querier_expired, (unsigned long)br);
1877 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1878 (unsigned long)br);
1879#endif
1880}
1881
1882static void __br_multicast_open(struct net_bridge *br,
1883 struct bridge_mcast_own_query *query)
1884{
1885 query->startup_sent = 0;
1886
1887 if (br->multicast_disabled)
1888 return;
1889
1890 mod_timer(&query->timer, jiffies);
1891}
1892
1893void br_multicast_open(struct net_bridge *br)
1894{
1895 __br_multicast_open(br, &br->ip4_own_query);
1896#if IS_ENABLED(CONFIG_IPV6)
1897 __br_multicast_open(br, &br->ip6_own_query);
1898#endif
1899}
1900
1901void br_multicast_stop(struct net_bridge *br)
1902{
1903 del_timer_sync(&br->multicast_router_timer);
1904 del_timer_sync(&br->ip4_other_query.timer);
1905 del_timer_sync(&br->ip4_own_query.timer);
1906#if IS_ENABLED(CONFIG_IPV6)
1907 del_timer_sync(&br->ip6_other_query.timer);
1908 del_timer_sync(&br->ip6_own_query.timer);
1909#endif
1910}
1911
1912void br_multicast_dev_del(struct net_bridge *br)
1913{
1914 struct net_bridge_mdb_htable *mdb;
1915 struct net_bridge_mdb_entry *mp;
1916 struct hlist_node *n;
1917 u32 ver;
1918 int i;
1919
1920 spin_lock_bh(&br->multicast_lock);
1921 mdb = mlock_dereference(br->mdb, br);
1922 if (!mdb)
1923 goto out;
1924
1925 br->mdb = NULL;
1926
1927 ver = mdb->ver;
1928 for (i = 0; i < mdb->max; i++) {
1929 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1930 hlist[ver]) {
1931 del_timer(&mp->timer);
1932 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1933 }
1934 }
1935
1936 if (mdb->old) {
1937 spin_unlock_bh(&br->multicast_lock);
1938 rcu_barrier_bh();
1939 spin_lock_bh(&br->multicast_lock);
1940 WARN_ON(mdb->old);
1941 }
1942
1943 mdb->old = mdb;
1944 call_rcu_bh(&mdb->rcu, br_mdb_free);
1945
1946out:
1947 spin_unlock_bh(&br->multicast_lock);
1948
1949 free_percpu(br->mcast_stats);
1950}
1951
1952int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1953{
1954 int err = -EINVAL;
1955
1956 spin_lock_bh(&br->multicast_lock);
1957
1958 switch (val) {
1959 case MDB_RTR_TYPE_DISABLED:
1960 case MDB_RTR_TYPE_PERM:
1961 del_timer(&br->multicast_router_timer);
1962 /* fall through */
1963 case MDB_RTR_TYPE_TEMP_QUERY:
1964 br->multicast_router = val;
1965 err = 0;
1966 break;
1967 }
1968
1969 spin_unlock_bh(&br->multicast_lock);
1970
1971 return err;
1972}
1973
1974static void __del_port_router(struct net_bridge_port *p)
1975{
1976 if (hlist_unhashed(&p->rlist))
1977 return;
1978 hlist_del_init_rcu(&p->rlist);
1979 br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1980}
1981
1982int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1983{
1984 struct net_bridge *br = p->br;
1985 unsigned long now = jiffies;
1986 int err = -EINVAL;
1987
1988 spin_lock(&br->multicast_lock);
1989 if (p->multicast_router == val) {
1990 /* Refresh the temp router port timer */
1991 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1992 mod_timer(&p->multicast_router_timer,
1993 now + br->multicast_querier_interval);
1994 err = 0;
1995 goto unlock;
1996 }
1997 switch (val) {
1998 case MDB_RTR_TYPE_DISABLED:
1999 p->multicast_router = MDB_RTR_TYPE_DISABLED;
2000 __del_port_router(p);
2001 del_timer(&p->multicast_router_timer);
2002 break;
2003 case MDB_RTR_TYPE_TEMP_QUERY:
2004 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2005 __del_port_router(p);
2006 break;
2007 case MDB_RTR_TYPE_PERM:
2008 p->multicast_router = MDB_RTR_TYPE_PERM;
2009 del_timer(&p->multicast_router_timer);
2010 br_multicast_add_router(br, p);
2011 break;
2012 case MDB_RTR_TYPE_TEMP:
2013 p->multicast_router = MDB_RTR_TYPE_TEMP;
2014 br_multicast_mark_router(br, p);
2015 break;
2016 default:
2017 goto unlock;
2018 }
2019 err = 0;
2020unlock:
2021 spin_unlock(&br->multicast_lock);
2022
2023 return err;
2024}
2025
2026static void br_multicast_start_querier(struct net_bridge *br,
2027 struct bridge_mcast_own_query *query)
2028{
2029 struct net_bridge_port *port;
2030
2031 __br_multicast_open(br, query);
2032
2033 list_for_each_entry(port, &br->port_list, list) {
2034 if (port->state == BR_STATE_DISABLED ||
2035 port->state == BR_STATE_BLOCKING)
2036 continue;
2037
2038 if (query == &br->ip4_own_query)
2039 br_multicast_enable(&port->ip4_own_query);
2040#if IS_ENABLED(CONFIG_IPV6)
2041 else
2042 br_multicast_enable(&port->ip6_own_query);
2043#endif
2044 }
2045}
2046
2047int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2048{
2049 struct net_bridge_mdb_htable *mdb;
2050 struct net_bridge_port *port;
2051 int err = 0;
2052
2053 spin_lock_bh(&br->multicast_lock);
2054 if (br->multicast_disabled == !val)
2055 goto unlock;
2056
2057 br->multicast_disabled = !val;
2058 if (br->multicast_disabled)
2059 goto unlock;
2060
2061 if (!netif_running(br->dev))
2062 goto unlock;
2063
2064 mdb = mlock_dereference(br->mdb, br);
2065 if (mdb) {
2066 if (mdb->old) {
2067 err = -EEXIST;
2068rollback:
2069 br->multicast_disabled = !!val;
2070 goto unlock;
2071 }
2072
2073 err = br_mdb_rehash(&br->mdb, mdb->max,
2074 br->hash_elasticity);
2075 if (err)
2076 goto rollback;
2077 }
2078
2079 br_multicast_open(br);
2080 list_for_each_entry(port, &br->port_list, list)
2081 __br_multicast_enable_port(port);
2082
2083unlock:
2084 spin_unlock_bh(&br->multicast_lock);
2085
2086 return err;
2087}
2088
2089int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2090{
2091 unsigned long max_delay;
2092
2093 val = !!val;
2094
2095 spin_lock_bh(&br->multicast_lock);
2096 if (br->multicast_querier == val)
2097 goto unlock;
2098
2099 br->multicast_querier = val;
2100 if (!val)
2101 goto unlock;
2102
2103 max_delay = br->multicast_query_response_interval;
2104
2105 if (!timer_pending(&br->ip4_other_query.timer))
2106 br->ip4_other_query.delay_time = jiffies + max_delay;
2107
2108 br_multicast_start_querier(br, &br->ip4_own_query);
2109
2110#if IS_ENABLED(CONFIG_IPV6)
2111 if (!timer_pending(&br->ip6_other_query.timer))
2112 br->ip6_other_query.delay_time = jiffies + max_delay;
2113
2114 br_multicast_start_querier(br, &br->ip6_own_query);
2115#endif
2116
2117unlock:
2118 spin_unlock_bh(&br->multicast_lock);
2119
2120 return 0;
2121}
2122
2123int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
2124{
2125 int err = -EINVAL;
2126 u32 old;
2127 struct net_bridge_mdb_htable *mdb;
2128
2129 spin_lock_bh(&br->multicast_lock);
2130 if (!is_power_of_2(val))
2131 goto unlock;
2132
2133 mdb = mlock_dereference(br->mdb, br);
2134 if (mdb && val < mdb->size)
2135 goto unlock;
2136
2137 err = 0;
2138
2139 old = br->hash_max;
2140 br->hash_max = val;
2141
2142 if (mdb) {
2143 if (mdb->old) {
2144 err = -EEXIST;
2145rollback:
2146 br->hash_max = old;
2147 goto unlock;
2148 }
2149
2150 err = br_mdb_rehash(&br->mdb, br->hash_max,
2151 br->hash_elasticity);
2152 if (err)
2153 goto rollback;
2154 }
2155
2156unlock:
2157 spin_unlock_bh(&br->multicast_lock);
2158
2159 return err;
2160}
2161
2162int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2163{
2164 /* Currently we support only version 2 and 3 */
2165 switch (val) {
2166 case 2:
2167 case 3:
2168 break;
2169 default:
2170 return -EINVAL;
2171 }
2172
2173 spin_lock_bh(&br->multicast_lock);
2174 br->multicast_igmp_version = val;
2175 spin_unlock_bh(&br->multicast_lock);
2176
2177 return 0;
2178}
2179
2180/**
2181 * br_multicast_list_adjacent - Returns snooped multicast addresses
2182 * @dev: The bridge port adjacent to which to retrieve addresses
2183 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2184 *
2185 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2186 * snooping feature on all bridge ports of dev's bridge device, excluding
2187 * the addresses from dev itself.
2188 *
2189 * Returns the number of items added to br_ip_list.
2190 *
2191 * Notes:
2192 * - br_ip_list needs to be initialized by caller
2193 * - br_ip_list might contain duplicates in the end
2194 * (needs to be taken care of by caller)
2195 * - br_ip_list needs to be freed by caller
2196 */
2197int br_multicast_list_adjacent(struct net_device *dev,
2198 struct list_head *br_ip_list)
2199{
2200 struct net_bridge *br;
2201 struct net_bridge_port *port;
2202 struct net_bridge_port_group *group;
2203 struct br_ip_list *entry;
2204 int count = 0;
2205
2206 rcu_read_lock();
2207 if (!br_ip_list || !br_port_exists(dev))
2208 goto unlock;
2209
2210 port = br_port_get_rcu(dev);
2211 if (!port || !port->br)
2212 goto unlock;
2213
2214 br = port->br;
2215
2216 list_for_each_entry_rcu(port, &br->port_list, list) {
2217 if (!port->dev || port->dev == dev)
2218 continue;
2219
2220 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2221 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2222 if (!entry)
2223 goto unlock;
2224
2225 entry->addr = group->addr;
2226 list_add(&entry->list, br_ip_list);
2227 count++;
2228 }
2229 }
2230
2231unlock:
2232 rcu_read_unlock();
2233 return count;
2234}
2235EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2236
2237/**
2238 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2239 * @dev: The bridge port providing the bridge on which to check for a querier
2240 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2241 *
2242 * Checks whether the given interface has a bridge on top and if so returns
2243 * true if a valid querier exists anywhere on the bridged link layer.
2244 * Otherwise returns false.
2245 */
2246bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2247{
2248 struct net_bridge *br;
2249 struct net_bridge_port *port;
2250 struct ethhdr eth;
2251 bool ret = false;
2252
2253 rcu_read_lock();
2254 if (!br_port_exists(dev))
2255 goto unlock;
2256
2257 port = br_port_get_rcu(dev);
2258 if (!port || !port->br)
2259 goto unlock;
2260
2261 br = port->br;
2262
2263 memset(&eth, 0, sizeof(eth));
2264 eth.h_proto = htons(proto);
2265
2266 ret = br_multicast_querier_exists(br, &eth);
2267
2268unlock:
2269 rcu_read_unlock();
2270 return ret;
2271}
2272EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2273
2274/**
2275 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2276 * @dev: The bridge port adjacent to which to check for a querier
2277 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2278 *
2279 * Checks whether the given interface has a bridge on top and if so returns
2280 * true if a selected querier is behind one of the other ports of this
2281 * bridge. Otherwise returns false.
2282 */
2283bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2284{
2285 struct net_bridge *br;
2286 struct net_bridge_port *port;
2287 bool ret = false;
2288
2289 rcu_read_lock();
2290 if (!br_port_exists(dev))
2291 goto unlock;
2292
2293 port = br_port_get_rcu(dev);
2294 if (!port || !port->br)
2295 goto unlock;
2296
2297 br = port->br;
2298
2299 switch (proto) {
2300 case ETH_P_IP:
2301 if (!timer_pending(&br->ip4_other_query.timer) ||
2302 rcu_dereference(br->ip4_querier.port) == port)
2303 goto unlock;
2304 break;
2305#if IS_ENABLED(CONFIG_IPV6)
2306 case ETH_P_IPV6:
2307 if (!timer_pending(&br->ip6_other_query.timer) ||
2308 rcu_dereference(br->ip6_querier.port) == port)
2309 goto unlock;
2310 break;
2311#endif
2312 default:
2313 goto unlock;
2314 }
2315
2316 ret = true;
2317unlock:
2318 rcu_read_unlock();
2319 return ret;
2320}
2321EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2322
2323static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2324 const struct sk_buff *skb, u8 type, u8 dir)
2325{
2326 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2327 __be16 proto = skb->protocol;
2328 unsigned int t_len;
2329
2330 u64_stats_update_begin(&pstats->syncp);
2331 switch (proto) {
2332 case htons(ETH_P_IP):
2333 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2334 switch (type) {
2335 case IGMP_HOST_MEMBERSHIP_REPORT:
2336 pstats->mstats.igmp_v1reports[dir]++;
2337 break;
2338 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2339 pstats->mstats.igmp_v2reports[dir]++;
2340 break;
2341 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2342 pstats->mstats.igmp_v3reports[dir]++;
2343 break;
2344 case IGMP_HOST_MEMBERSHIP_QUERY:
2345 if (t_len != sizeof(struct igmphdr)) {
2346 pstats->mstats.igmp_v3queries[dir]++;
2347 } else {
2348 unsigned int offset = skb_transport_offset(skb);
2349 struct igmphdr *ih, _ihdr;
2350
2351 ih = skb_header_pointer(skb, offset,
2352 sizeof(_ihdr), &_ihdr);
2353 if (!ih)
2354 break;
2355 if (!ih->code)
2356 pstats->mstats.igmp_v1queries[dir]++;
2357 else
2358 pstats->mstats.igmp_v2queries[dir]++;
2359 }
2360 break;
2361 case IGMP_HOST_LEAVE_MESSAGE:
2362 pstats->mstats.igmp_leaves[dir]++;
2363 break;
2364 }
2365 break;
2366#if IS_ENABLED(CONFIG_IPV6)
2367 case htons(ETH_P_IPV6):
2368 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2369 sizeof(struct ipv6hdr);
2370 t_len -= skb_network_header_len(skb);
2371 switch (type) {
2372 case ICMPV6_MGM_REPORT:
2373 pstats->mstats.mld_v1reports[dir]++;
2374 break;
2375 case ICMPV6_MLD2_REPORT:
2376 pstats->mstats.mld_v2reports[dir]++;
2377 break;
2378 case ICMPV6_MGM_QUERY:
2379 if (t_len != sizeof(struct mld_msg))
2380 pstats->mstats.mld_v2queries[dir]++;
2381 else
2382 pstats->mstats.mld_v1queries[dir]++;
2383 break;
2384 case ICMPV6_MGM_REDUCTION:
2385 pstats->mstats.mld_leaves[dir]++;
2386 break;
2387 }
2388 break;
2389#endif /* CONFIG_IPV6 */
2390 }
2391 u64_stats_update_end(&pstats->syncp);
2392}
2393
2394void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2395 const struct sk_buff *skb, u8 type, u8 dir)
2396{
2397 struct bridge_mcast_stats __percpu *stats;
2398
2399 /* if multicast_disabled is true then igmp type can't be set */
2400 if (!type || !br->multicast_stats_enabled)
2401 return;
2402
2403 if (p)
2404 stats = p->mcast_stats;
2405 else
2406 stats = br->mcast_stats;
2407 if (WARN_ON(!stats))
2408 return;
2409
2410 br_mcast_stats_add(stats, skb, type, dir);
2411}
2412
2413int br_multicast_init_stats(struct net_bridge *br)
2414{
2415 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2416 if (!br->mcast_stats)
2417 return -ENOMEM;
2418
2419 return 0;
2420}
2421
2422static void mcast_stats_add_dir(u64 *dst, u64 *src)
2423{
2424 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2425 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2426}
2427
2428void br_multicast_get_stats(const struct net_bridge *br,
2429 const struct net_bridge_port *p,
2430 struct br_mcast_stats *dest)
2431{
2432 struct bridge_mcast_stats __percpu *stats;
2433 struct br_mcast_stats tdst;
2434 int i;
2435
2436 memset(dest, 0, sizeof(*dest));
2437 if (p)
2438 stats = p->mcast_stats;
2439 else
2440 stats = br->mcast_stats;
2441 if (WARN_ON(!stats))
2442 return;
2443
2444 memset(&tdst, 0, sizeof(tdst));
2445 for_each_possible_cpu(i) {
2446 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2447 struct br_mcast_stats temp;
2448 unsigned int start;
2449
2450 do {
2451 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2452 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2453 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2454
2455 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2456 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2457 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2458 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2459 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2460 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2461 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2462 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2463
2464 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2465 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2466 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2467 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2468 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2469 tdst.mld_parse_errors += temp.mld_parse_errors;
2470 }
2471 memcpy(dest, &tdst, sizeof(*dest));
2472}