]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bridge/br_multicast.c
netfilter: nft_ct: prepare for key-dependent error unwind
[mirror_ubuntu-artful-kernel.git] / net / bridge / br_multicast.c
1 /*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13 #include <linux/err.h>
14 #include <linux/export.h>
15 #include <linux/if_ether.h>
16 #include <linux/igmp.h>
17 #include <linux/jhash.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/netdevice.h>
21 #include <linux/netfilter_bridge.h>
22 #include <linux/random.h>
23 #include <linux/rculist.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <linux/inetdevice.h>
28 #include <linux/mroute.h>
29 #include <net/ip.h>
30 #if IS_ENABLED(CONFIG_IPV6)
31 #include <net/ipv6.h>
32 #include <net/mld.h>
33 #include <net/ip6_checksum.h>
34 #include <net/addrconf.h>
35 #endif
36
37 #include "br_private.h"
38
39 static void br_multicast_start_querier(struct net_bridge *br,
40 struct bridge_mcast_own_query *query);
41 static void br_multicast_add_router(struct net_bridge *br,
42 struct net_bridge_port *port);
43 static void br_ip4_multicast_leave_group(struct net_bridge *br,
44 struct net_bridge_port *port,
45 __be32 group,
46 __u16 vid,
47 const unsigned char *src);
48
49 #if IS_ENABLED(CONFIG_IPV6)
50 static void br_ip6_multicast_leave_group(struct net_bridge *br,
51 struct net_bridge_port *port,
52 const struct in6_addr *group,
53 __u16 vid, const unsigned char *src);
54 #endif
55 unsigned int br_mdb_rehash_seq;
56
57 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
58 {
59 if (a->proto != b->proto)
60 return 0;
61 if (a->vid != b->vid)
62 return 0;
63 switch (a->proto) {
64 case htons(ETH_P_IP):
65 return a->u.ip4 == b->u.ip4;
66 #if IS_ENABLED(CONFIG_IPV6)
67 case htons(ETH_P_IPV6):
68 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
69 #endif
70 }
71 return 0;
72 }
73
74 static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
75 __u16 vid)
76 {
77 return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
78 }
79
80 #if IS_ENABLED(CONFIG_IPV6)
81 static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
82 const struct in6_addr *ip,
83 __u16 vid)
84 {
85 return jhash_2words(ipv6_addr_hash(ip), vid,
86 mdb->secret) & (mdb->max - 1);
87 }
88 #endif
89
90 static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
91 struct br_ip *ip)
92 {
93 switch (ip->proto) {
94 case htons(ETH_P_IP):
95 return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
96 #if IS_ENABLED(CONFIG_IPV6)
97 case htons(ETH_P_IPV6):
98 return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
99 #endif
100 }
101 return 0;
102 }
103
104 static struct net_bridge_mdb_entry *__br_mdb_ip_get(
105 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
106 {
107 struct net_bridge_mdb_entry *mp;
108
109 hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
110 if (br_ip_equal(&mp->addr, dst))
111 return mp;
112 }
113
114 return NULL;
115 }
116
117 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
118 struct br_ip *dst)
119 {
120 if (!mdb)
121 return NULL;
122
123 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
124 }
125
126 static struct net_bridge_mdb_entry *br_mdb_ip4_get(
127 struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
128 {
129 struct br_ip br_dst;
130
131 br_dst.u.ip4 = dst;
132 br_dst.proto = htons(ETH_P_IP);
133 br_dst.vid = vid;
134
135 return br_mdb_ip_get(mdb, &br_dst);
136 }
137
138 #if IS_ENABLED(CONFIG_IPV6)
139 static struct net_bridge_mdb_entry *br_mdb_ip6_get(
140 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
141 __u16 vid)
142 {
143 struct br_ip br_dst;
144
145 br_dst.u.ip6 = *dst;
146 br_dst.proto = htons(ETH_P_IPV6);
147 br_dst.vid = vid;
148
149 return br_mdb_ip_get(mdb, &br_dst);
150 }
151 #endif
152
153 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
154 struct sk_buff *skb, u16 vid)
155 {
156 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
157 struct br_ip ip;
158
159 if (br->multicast_disabled)
160 return NULL;
161
162 if (BR_INPUT_SKB_CB(skb)->igmp)
163 return NULL;
164
165 ip.proto = skb->protocol;
166 ip.vid = vid;
167
168 switch (skb->protocol) {
169 case htons(ETH_P_IP):
170 ip.u.ip4 = ip_hdr(skb)->daddr;
171 break;
172 #if IS_ENABLED(CONFIG_IPV6)
173 case htons(ETH_P_IPV6):
174 ip.u.ip6 = ipv6_hdr(skb)->daddr;
175 break;
176 #endif
177 default:
178 return NULL;
179 }
180
181 return br_mdb_ip_get(mdb, &ip);
182 }
183
184 static void br_mdb_free(struct rcu_head *head)
185 {
186 struct net_bridge_mdb_htable *mdb =
187 container_of(head, struct net_bridge_mdb_htable, rcu);
188 struct net_bridge_mdb_htable *old = mdb->old;
189
190 mdb->old = NULL;
191 kfree(old->mhash);
192 kfree(old);
193 }
194
195 static int br_mdb_copy(struct net_bridge_mdb_htable *new,
196 struct net_bridge_mdb_htable *old,
197 int elasticity)
198 {
199 struct net_bridge_mdb_entry *mp;
200 int maxlen;
201 int len;
202 int i;
203
204 for (i = 0; i < old->max; i++)
205 hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
206 hlist_add_head(&mp->hlist[new->ver],
207 &new->mhash[br_ip_hash(new, &mp->addr)]);
208
209 if (!elasticity)
210 return 0;
211
212 maxlen = 0;
213 for (i = 0; i < new->max; i++) {
214 len = 0;
215 hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
216 len++;
217 if (len > maxlen)
218 maxlen = len;
219 }
220
221 return maxlen > elasticity ? -EINVAL : 0;
222 }
223
224 void br_multicast_free_pg(struct rcu_head *head)
225 {
226 struct net_bridge_port_group *p =
227 container_of(head, struct net_bridge_port_group, rcu);
228
229 kfree(p);
230 }
231
232 static void br_multicast_free_group(struct rcu_head *head)
233 {
234 struct net_bridge_mdb_entry *mp =
235 container_of(head, struct net_bridge_mdb_entry, rcu);
236
237 kfree(mp);
238 }
239
240 static void br_multicast_group_expired(unsigned long data)
241 {
242 struct net_bridge_mdb_entry *mp = (void *)data;
243 struct net_bridge *br = mp->br;
244 struct net_bridge_mdb_htable *mdb;
245
246 spin_lock(&br->multicast_lock);
247 if (!netif_running(br->dev) || timer_pending(&mp->timer))
248 goto out;
249
250 mp->mglist = false;
251
252 if (mp->ports)
253 goto out;
254
255 mdb = mlock_dereference(br->mdb, br);
256
257 hlist_del_rcu(&mp->hlist[mdb->ver]);
258 mdb->size--;
259
260 call_rcu_bh(&mp->rcu, br_multicast_free_group);
261
262 out:
263 spin_unlock(&br->multicast_lock);
264 }
265
266 static void br_multicast_del_pg(struct net_bridge *br,
267 struct net_bridge_port_group *pg)
268 {
269 struct net_bridge_mdb_htable *mdb;
270 struct net_bridge_mdb_entry *mp;
271 struct net_bridge_port_group *p;
272 struct net_bridge_port_group __rcu **pp;
273
274 mdb = mlock_dereference(br->mdb, br);
275
276 mp = br_mdb_ip_get(mdb, &pg->addr);
277 if (WARN_ON(!mp))
278 return;
279
280 for (pp = &mp->ports;
281 (p = mlock_dereference(*pp, br)) != NULL;
282 pp = &p->next) {
283 if (p != pg)
284 continue;
285
286 rcu_assign_pointer(*pp, p->next);
287 hlist_del_init(&p->mglist);
288 del_timer(&p->timer);
289 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
290 p->flags);
291 call_rcu_bh(&p->rcu, br_multicast_free_pg);
292
293 if (!mp->ports && !mp->mglist &&
294 netif_running(br->dev))
295 mod_timer(&mp->timer, jiffies);
296
297 return;
298 }
299
300 WARN_ON(1);
301 }
302
303 static void br_multicast_port_group_expired(unsigned long data)
304 {
305 struct net_bridge_port_group *pg = (void *)data;
306 struct net_bridge *br = pg->port->br;
307
308 spin_lock(&br->multicast_lock);
309 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
310 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
311 goto out;
312
313 br_multicast_del_pg(br, pg);
314
315 out:
316 spin_unlock(&br->multicast_lock);
317 }
318
319 static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
320 int elasticity)
321 {
322 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
323 struct net_bridge_mdb_htable *mdb;
324 int err;
325
326 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
327 if (!mdb)
328 return -ENOMEM;
329
330 mdb->max = max;
331 mdb->old = old;
332
333 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
334 if (!mdb->mhash) {
335 kfree(mdb);
336 return -ENOMEM;
337 }
338
339 mdb->size = old ? old->size : 0;
340 mdb->ver = old ? old->ver ^ 1 : 0;
341
342 if (!old || elasticity)
343 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
344 else
345 mdb->secret = old->secret;
346
347 if (!old)
348 goto out;
349
350 err = br_mdb_copy(mdb, old, elasticity);
351 if (err) {
352 kfree(mdb->mhash);
353 kfree(mdb);
354 return err;
355 }
356
357 br_mdb_rehash_seq++;
358 call_rcu_bh(&mdb->rcu, br_mdb_free);
359
360 out:
361 rcu_assign_pointer(*mdbp, mdb);
362
363 return 0;
364 }
365
366 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
367 __be32 group,
368 u8 *igmp_type)
369 {
370 struct igmpv3_query *ihv3;
371 size_t igmp_hdr_size;
372 struct sk_buff *skb;
373 struct igmphdr *ih;
374 struct ethhdr *eth;
375 struct iphdr *iph;
376
377 igmp_hdr_size = sizeof(*ih);
378 if (br->multicast_igmp_version == 3)
379 igmp_hdr_size = sizeof(*ihv3);
380 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
381 igmp_hdr_size + 4);
382 if (!skb)
383 goto out;
384
385 skb->protocol = htons(ETH_P_IP);
386
387 skb_reset_mac_header(skb);
388 eth = eth_hdr(skb);
389
390 ether_addr_copy(eth->h_source, br->dev->dev_addr);
391 eth->h_dest[0] = 1;
392 eth->h_dest[1] = 0;
393 eth->h_dest[2] = 0x5e;
394 eth->h_dest[3] = 0;
395 eth->h_dest[4] = 0;
396 eth->h_dest[5] = 1;
397 eth->h_proto = htons(ETH_P_IP);
398 skb_put(skb, sizeof(*eth));
399
400 skb_set_network_header(skb, skb->len);
401 iph = ip_hdr(skb);
402
403 iph->version = 4;
404 iph->ihl = 6;
405 iph->tos = 0xc0;
406 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
407 iph->id = 0;
408 iph->frag_off = htons(IP_DF);
409 iph->ttl = 1;
410 iph->protocol = IPPROTO_IGMP;
411 iph->saddr = br->multicast_query_use_ifaddr ?
412 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
413 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
414 ((u8 *)&iph[1])[0] = IPOPT_RA;
415 ((u8 *)&iph[1])[1] = 4;
416 ((u8 *)&iph[1])[2] = 0;
417 ((u8 *)&iph[1])[3] = 0;
418 ip_send_check(iph);
419 skb_put(skb, 24);
420
421 skb_set_transport_header(skb, skb->len);
422 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
423
424 switch (br->multicast_igmp_version) {
425 case 2:
426 ih = igmp_hdr(skb);
427 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
428 ih->code = (group ? br->multicast_last_member_interval :
429 br->multicast_query_response_interval) /
430 (HZ / IGMP_TIMER_SCALE);
431 ih->group = group;
432 ih->csum = 0;
433 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
434 break;
435 case 3:
436 ihv3 = igmpv3_query_hdr(skb);
437 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
438 ihv3->code = (group ? br->multicast_last_member_interval :
439 br->multicast_query_response_interval) /
440 (HZ / IGMP_TIMER_SCALE);
441 ihv3->group = group;
442 ihv3->qqic = br->multicast_query_interval / HZ;
443 ihv3->nsrcs = 0;
444 ihv3->resv = 0;
445 ihv3->suppress = 0;
446 ihv3->qrv = 2;
447 ihv3->csum = 0;
448 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
449 break;
450 }
451
452 skb_put(skb, igmp_hdr_size);
453 __skb_pull(skb, sizeof(*eth));
454
455 out:
456 return skb;
457 }
458
459 #if IS_ENABLED(CONFIG_IPV6)
460 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
461 const struct in6_addr *grp,
462 u8 *igmp_type)
463 {
464 struct mld2_query *mld2q;
465 unsigned long interval;
466 struct ipv6hdr *ip6h;
467 struct mld_msg *mldq;
468 size_t mld_hdr_size;
469 struct sk_buff *skb;
470 struct ethhdr *eth;
471 u8 *hopopt;
472
473 mld_hdr_size = sizeof(*mldq);
474 if (br->multicast_mld_version == 2)
475 mld_hdr_size = sizeof(*mld2q);
476 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
477 8 + mld_hdr_size);
478 if (!skb)
479 goto out;
480
481 skb->protocol = htons(ETH_P_IPV6);
482
483 /* Ethernet header */
484 skb_reset_mac_header(skb);
485 eth = eth_hdr(skb);
486
487 ether_addr_copy(eth->h_source, br->dev->dev_addr);
488 eth->h_proto = htons(ETH_P_IPV6);
489 skb_put(skb, sizeof(*eth));
490
491 /* IPv6 header + HbH option */
492 skb_set_network_header(skb, skb->len);
493 ip6h = ipv6_hdr(skb);
494
495 *(__force __be32 *)ip6h = htonl(0x60000000);
496 ip6h->payload_len = htons(8 + mld_hdr_size);
497 ip6h->nexthdr = IPPROTO_HOPOPTS;
498 ip6h->hop_limit = 1;
499 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
500 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
501 &ip6h->saddr)) {
502 kfree_skb(skb);
503 br->has_ipv6_addr = 0;
504 return NULL;
505 }
506
507 br->has_ipv6_addr = 1;
508 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
509
510 hopopt = (u8 *)(ip6h + 1);
511 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
512 hopopt[1] = 0; /* length of HbH */
513 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
514 hopopt[3] = 2; /* Length of RA Option */
515 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
516 hopopt[5] = 0;
517 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
518 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
519
520 skb_put(skb, sizeof(*ip6h) + 8);
521
522 /* ICMPv6 */
523 skb_set_transport_header(skb, skb->len);
524 interval = ipv6_addr_any(grp) ?
525 br->multicast_query_response_interval :
526 br->multicast_last_member_interval;
527 *igmp_type = ICMPV6_MGM_QUERY;
528 switch (br->multicast_mld_version) {
529 case 1:
530 mldq = (struct mld_msg *)icmp6_hdr(skb);
531 mldq->mld_type = ICMPV6_MGM_QUERY;
532 mldq->mld_code = 0;
533 mldq->mld_cksum = 0;
534 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
535 mldq->mld_reserved = 0;
536 mldq->mld_mca = *grp;
537 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
538 sizeof(*mldq), IPPROTO_ICMPV6,
539 csum_partial(mldq,
540 sizeof(*mldq),
541 0));
542 break;
543 case 2:
544 mld2q = (struct mld2_query *)icmp6_hdr(skb);
545 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
546 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
547 mld2q->mld2q_code = 0;
548 mld2q->mld2q_cksum = 0;
549 mld2q->mld2q_resv1 = 0;
550 mld2q->mld2q_resv2 = 0;
551 mld2q->mld2q_suppress = 0;
552 mld2q->mld2q_qrv = 2;
553 mld2q->mld2q_nsrcs = 0;
554 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
555 mld2q->mld2q_mca = *grp;
556 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
557 sizeof(*mld2q),
558 IPPROTO_ICMPV6,
559 csum_partial(mld2q,
560 sizeof(*mld2q),
561 0));
562 break;
563 }
564 skb_put(skb, mld_hdr_size);
565
566 __skb_pull(skb, sizeof(*eth));
567
568 out:
569 return skb;
570 }
571 #endif
572
573 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
574 struct br_ip *addr,
575 u8 *igmp_type)
576 {
577 switch (addr->proto) {
578 case htons(ETH_P_IP):
579 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
580 #if IS_ENABLED(CONFIG_IPV6)
581 case htons(ETH_P_IPV6):
582 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
583 igmp_type);
584 #endif
585 }
586 return NULL;
587 }
588
589 static struct net_bridge_mdb_entry *br_multicast_get_group(
590 struct net_bridge *br, struct net_bridge_port *port,
591 struct br_ip *group, int hash)
592 {
593 struct net_bridge_mdb_htable *mdb;
594 struct net_bridge_mdb_entry *mp;
595 unsigned int count = 0;
596 unsigned int max;
597 int elasticity;
598 int err;
599
600 mdb = rcu_dereference_protected(br->mdb, 1);
601 hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
602 count++;
603 if (unlikely(br_ip_equal(group, &mp->addr)))
604 return mp;
605 }
606
607 elasticity = 0;
608 max = mdb->max;
609
610 if (unlikely(count > br->hash_elasticity && count)) {
611 if (net_ratelimit())
612 br_info(br, "Multicast hash table "
613 "chain limit reached: %s\n",
614 port ? port->dev->name : br->dev->name);
615
616 elasticity = br->hash_elasticity;
617 }
618
619 if (mdb->size >= max) {
620 max *= 2;
621 if (unlikely(max > br->hash_max)) {
622 br_warn(br, "Multicast hash table maximum of %d "
623 "reached, disabling snooping: %s\n",
624 br->hash_max,
625 port ? port->dev->name : br->dev->name);
626 err = -E2BIG;
627 disable:
628 br->multicast_disabled = 1;
629 goto err;
630 }
631 }
632
633 if (max > mdb->max || elasticity) {
634 if (mdb->old) {
635 if (net_ratelimit())
636 br_info(br, "Multicast hash table "
637 "on fire: %s\n",
638 port ? port->dev->name : br->dev->name);
639 err = -EEXIST;
640 goto err;
641 }
642
643 err = br_mdb_rehash(&br->mdb, max, elasticity);
644 if (err) {
645 br_warn(br, "Cannot rehash multicast "
646 "hash table, disabling snooping: %s, %d, %d\n",
647 port ? port->dev->name : br->dev->name,
648 mdb->size, err);
649 goto disable;
650 }
651
652 err = -EAGAIN;
653 goto err;
654 }
655
656 return NULL;
657
658 err:
659 mp = ERR_PTR(err);
660 return mp;
661 }
662
663 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
664 struct net_bridge_port *p,
665 struct br_ip *group)
666 {
667 struct net_bridge_mdb_htable *mdb;
668 struct net_bridge_mdb_entry *mp;
669 int hash;
670 int err;
671
672 mdb = rcu_dereference_protected(br->mdb, 1);
673 if (!mdb) {
674 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
675 if (err)
676 return ERR_PTR(err);
677 goto rehash;
678 }
679
680 hash = br_ip_hash(mdb, group);
681 mp = br_multicast_get_group(br, p, group, hash);
682 switch (PTR_ERR(mp)) {
683 case 0:
684 break;
685
686 case -EAGAIN:
687 rehash:
688 mdb = rcu_dereference_protected(br->mdb, 1);
689 hash = br_ip_hash(mdb, group);
690 break;
691
692 default:
693 goto out;
694 }
695
696 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
697 if (unlikely(!mp))
698 return ERR_PTR(-ENOMEM);
699
700 mp->br = br;
701 mp->addr = *group;
702 setup_timer(&mp->timer, br_multicast_group_expired,
703 (unsigned long)mp);
704
705 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
706 mdb->size++;
707
708 out:
709 return mp;
710 }
711
712 struct net_bridge_port_group *br_multicast_new_port_group(
713 struct net_bridge_port *port,
714 struct br_ip *group,
715 struct net_bridge_port_group __rcu *next,
716 unsigned char flags,
717 const unsigned char *src)
718 {
719 struct net_bridge_port_group *p;
720
721 p = kzalloc(sizeof(*p), GFP_ATOMIC);
722 if (unlikely(!p))
723 return NULL;
724
725 p->addr = *group;
726 p->port = port;
727 p->flags = flags;
728 rcu_assign_pointer(p->next, next);
729 hlist_add_head(&p->mglist, &port->mglist);
730 setup_timer(&p->timer, br_multicast_port_group_expired,
731 (unsigned long)p);
732
733 if (src)
734 memcpy(p->eth_addr, src, ETH_ALEN);
735 else
736 memset(p->eth_addr, 0xff, ETH_ALEN);
737
738 return p;
739 }
740
741 static bool br_port_group_equal(struct net_bridge_port_group *p,
742 struct net_bridge_port *port,
743 const unsigned char *src)
744 {
745 if (p->port != port)
746 return false;
747
748 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
749 return true;
750
751 return ether_addr_equal(src, p->eth_addr);
752 }
753
754 static int br_multicast_add_group(struct net_bridge *br,
755 struct net_bridge_port *port,
756 struct br_ip *group,
757 const unsigned char *src)
758 {
759 struct net_bridge_port_group __rcu **pp;
760 struct net_bridge_port_group *p;
761 struct net_bridge_mdb_entry *mp;
762 unsigned long now = jiffies;
763 int err;
764
765 spin_lock(&br->multicast_lock);
766 if (!netif_running(br->dev) ||
767 (port && port->state == BR_STATE_DISABLED))
768 goto out;
769
770 mp = br_multicast_new_group(br, port, group);
771 err = PTR_ERR(mp);
772 if (IS_ERR(mp))
773 goto err;
774
775 if (!port) {
776 mp->mglist = true;
777 mod_timer(&mp->timer, now + br->multicast_membership_interval);
778 goto out;
779 }
780
781 for (pp = &mp->ports;
782 (p = mlock_dereference(*pp, br)) != NULL;
783 pp = &p->next) {
784 if (br_port_group_equal(p, port, src))
785 goto found;
786 if ((unsigned long)p->port < (unsigned long)port)
787 break;
788 }
789
790 p = br_multicast_new_port_group(port, group, *pp, 0, src);
791 if (unlikely(!p))
792 goto err;
793 rcu_assign_pointer(*pp, p);
794 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
795
796 found:
797 mod_timer(&p->timer, now + br->multicast_membership_interval);
798 out:
799 err = 0;
800
801 err:
802 spin_unlock(&br->multicast_lock);
803 return err;
804 }
805
806 static int br_ip4_multicast_add_group(struct net_bridge *br,
807 struct net_bridge_port *port,
808 __be32 group,
809 __u16 vid,
810 const unsigned char *src)
811 {
812 struct br_ip br_group;
813
814 if (ipv4_is_local_multicast(group))
815 return 0;
816
817 br_group.u.ip4 = group;
818 br_group.proto = htons(ETH_P_IP);
819 br_group.vid = vid;
820
821 return br_multicast_add_group(br, port, &br_group, src);
822 }
823
824 #if IS_ENABLED(CONFIG_IPV6)
825 static int br_ip6_multicast_add_group(struct net_bridge *br,
826 struct net_bridge_port *port,
827 const struct in6_addr *group,
828 __u16 vid,
829 const unsigned char *src)
830 {
831 struct br_ip br_group;
832
833 if (ipv6_addr_is_ll_all_nodes(group))
834 return 0;
835
836 br_group.u.ip6 = *group;
837 br_group.proto = htons(ETH_P_IPV6);
838 br_group.vid = vid;
839
840 return br_multicast_add_group(br, port, &br_group, src);
841 }
842 #endif
843
844 static void br_multicast_router_expired(unsigned long data)
845 {
846 struct net_bridge_port *port = (void *)data;
847 struct net_bridge *br = port->br;
848
849 spin_lock(&br->multicast_lock);
850 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
851 port->multicast_router == MDB_RTR_TYPE_PERM ||
852 timer_pending(&port->multicast_router_timer) ||
853 hlist_unhashed(&port->rlist))
854 goto out;
855
856 hlist_del_init_rcu(&port->rlist);
857 br_rtr_notify(br->dev, port, RTM_DELMDB);
858 /* Don't allow timer refresh if the router expired */
859 if (port->multicast_router == MDB_RTR_TYPE_TEMP)
860 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
861
862 out:
863 spin_unlock(&br->multicast_lock);
864 }
865
866 static void br_multicast_local_router_expired(unsigned long data)
867 {
868 }
869
870 static void br_multicast_querier_expired(struct net_bridge *br,
871 struct bridge_mcast_own_query *query)
872 {
873 spin_lock(&br->multicast_lock);
874 if (!netif_running(br->dev) || br->multicast_disabled)
875 goto out;
876
877 br_multicast_start_querier(br, query);
878
879 out:
880 spin_unlock(&br->multicast_lock);
881 }
882
883 static void br_ip4_multicast_querier_expired(unsigned long data)
884 {
885 struct net_bridge *br = (void *)data;
886
887 br_multicast_querier_expired(br, &br->ip4_own_query);
888 }
889
890 #if IS_ENABLED(CONFIG_IPV6)
891 static void br_ip6_multicast_querier_expired(unsigned long data)
892 {
893 struct net_bridge *br = (void *)data;
894
895 br_multicast_querier_expired(br, &br->ip6_own_query);
896 }
897 #endif
898
899 static void br_multicast_select_own_querier(struct net_bridge *br,
900 struct br_ip *ip,
901 struct sk_buff *skb)
902 {
903 if (ip->proto == htons(ETH_P_IP))
904 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
905 #if IS_ENABLED(CONFIG_IPV6)
906 else
907 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
908 #endif
909 }
910
911 static void __br_multicast_send_query(struct net_bridge *br,
912 struct net_bridge_port *port,
913 struct br_ip *ip)
914 {
915 struct sk_buff *skb;
916 u8 igmp_type;
917
918 skb = br_multicast_alloc_query(br, ip, &igmp_type);
919 if (!skb)
920 return;
921
922 if (port) {
923 skb->dev = port->dev;
924 br_multicast_count(br, port, skb, igmp_type,
925 BR_MCAST_DIR_TX);
926 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
927 dev_net(port->dev), NULL, skb, NULL, skb->dev,
928 br_dev_queue_push_xmit);
929 } else {
930 br_multicast_select_own_querier(br, ip, skb);
931 br_multicast_count(br, port, skb, igmp_type,
932 BR_MCAST_DIR_RX);
933 netif_rx(skb);
934 }
935 }
936
937 static void br_multicast_send_query(struct net_bridge *br,
938 struct net_bridge_port *port,
939 struct bridge_mcast_own_query *own_query)
940 {
941 struct bridge_mcast_other_query *other_query = NULL;
942 struct br_ip br_group;
943 unsigned long time;
944
945 if (!netif_running(br->dev) || br->multicast_disabled ||
946 !br->multicast_querier)
947 return;
948
949 memset(&br_group.u, 0, sizeof(br_group.u));
950
951 if (port ? (own_query == &port->ip4_own_query) :
952 (own_query == &br->ip4_own_query)) {
953 other_query = &br->ip4_other_query;
954 br_group.proto = htons(ETH_P_IP);
955 #if IS_ENABLED(CONFIG_IPV6)
956 } else {
957 other_query = &br->ip6_other_query;
958 br_group.proto = htons(ETH_P_IPV6);
959 #endif
960 }
961
962 if (!other_query || timer_pending(&other_query->timer))
963 return;
964
965 __br_multicast_send_query(br, port, &br_group);
966
967 time = jiffies;
968 time += own_query->startup_sent < br->multicast_startup_query_count ?
969 br->multicast_startup_query_interval :
970 br->multicast_query_interval;
971 mod_timer(&own_query->timer, time);
972 }
973
974 static void
975 br_multicast_port_query_expired(struct net_bridge_port *port,
976 struct bridge_mcast_own_query *query)
977 {
978 struct net_bridge *br = port->br;
979
980 spin_lock(&br->multicast_lock);
981 if (port->state == BR_STATE_DISABLED ||
982 port->state == BR_STATE_BLOCKING)
983 goto out;
984
985 if (query->startup_sent < br->multicast_startup_query_count)
986 query->startup_sent++;
987
988 br_multicast_send_query(port->br, port, query);
989
990 out:
991 spin_unlock(&br->multicast_lock);
992 }
993
994 static void br_ip4_multicast_port_query_expired(unsigned long data)
995 {
996 struct net_bridge_port *port = (void *)data;
997
998 br_multicast_port_query_expired(port, &port->ip4_own_query);
999 }
1000
1001 #if IS_ENABLED(CONFIG_IPV6)
1002 static void br_ip6_multicast_port_query_expired(unsigned long data)
1003 {
1004 struct net_bridge_port *port = (void *)data;
1005
1006 br_multicast_port_query_expired(port, &port->ip6_own_query);
1007 }
1008 #endif
1009
1010 int br_multicast_add_port(struct net_bridge_port *port)
1011 {
1012 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1013
1014 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
1015 (unsigned long)port);
1016 setup_timer(&port->ip4_own_query.timer,
1017 br_ip4_multicast_port_query_expired, (unsigned long)port);
1018 #if IS_ENABLED(CONFIG_IPV6)
1019 setup_timer(&port->ip6_own_query.timer,
1020 br_ip6_multicast_port_query_expired, (unsigned long)port);
1021 #endif
1022 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1023 if (!port->mcast_stats)
1024 return -ENOMEM;
1025
1026 return 0;
1027 }
1028
1029 void br_multicast_del_port(struct net_bridge_port *port)
1030 {
1031 struct net_bridge *br = port->br;
1032 struct net_bridge_port_group *pg;
1033 struct hlist_node *n;
1034
1035 /* Take care of the remaining groups, only perm ones should be left */
1036 spin_lock_bh(&br->multicast_lock);
1037 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1038 br_multicast_del_pg(br, pg);
1039 spin_unlock_bh(&br->multicast_lock);
1040 del_timer_sync(&port->multicast_router_timer);
1041 free_percpu(port->mcast_stats);
1042 }
1043
1044 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1045 {
1046 query->startup_sent = 0;
1047
1048 if (try_to_del_timer_sync(&query->timer) >= 0 ||
1049 del_timer(&query->timer))
1050 mod_timer(&query->timer, jiffies);
1051 }
1052
1053 static void __br_multicast_enable_port(struct net_bridge_port *port)
1054 {
1055 struct net_bridge *br = port->br;
1056
1057 if (br->multicast_disabled || !netif_running(br->dev))
1058 return;
1059
1060 br_multicast_enable(&port->ip4_own_query);
1061 #if IS_ENABLED(CONFIG_IPV6)
1062 br_multicast_enable(&port->ip6_own_query);
1063 #endif
1064 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1065 hlist_unhashed(&port->rlist))
1066 br_multicast_add_router(br, port);
1067 }
1068
1069 void br_multicast_enable_port(struct net_bridge_port *port)
1070 {
1071 struct net_bridge *br = port->br;
1072
1073 spin_lock(&br->multicast_lock);
1074 __br_multicast_enable_port(port);
1075 spin_unlock(&br->multicast_lock);
1076 }
1077
1078 void br_multicast_disable_port(struct net_bridge_port *port)
1079 {
1080 struct net_bridge *br = port->br;
1081 struct net_bridge_port_group *pg;
1082 struct hlist_node *n;
1083
1084 spin_lock(&br->multicast_lock);
1085 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1086 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1087 br_multicast_del_pg(br, pg);
1088
1089 if (!hlist_unhashed(&port->rlist)) {
1090 hlist_del_init_rcu(&port->rlist);
1091 br_rtr_notify(br->dev, port, RTM_DELMDB);
1092 /* Don't allow timer refresh if disabling */
1093 if (port->multicast_router == MDB_RTR_TYPE_TEMP)
1094 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1095 }
1096 del_timer(&port->multicast_router_timer);
1097 del_timer(&port->ip4_own_query.timer);
1098 #if IS_ENABLED(CONFIG_IPV6)
1099 del_timer(&port->ip6_own_query.timer);
1100 #endif
1101 spin_unlock(&br->multicast_lock);
1102 }
1103
1104 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1105 struct net_bridge_port *port,
1106 struct sk_buff *skb,
1107 u16 vid)
1108 {
1109 const unsigned char *src;
1110 struct igmpv3_report *ih;
1111 struct igmpv3_grec *grec;
1112 int i;
1113 int len;
1114 int num;
1115 int type;
1116 int err = 0;
1117 __be32 group;
1118
1119 ih = igmpv3_report_hdr(skb);
1120 num = ntohs(ih->ngrec);
1121 len = skb_transport_offset(skb) + sizeof(*ih);
1122
1123 for (i = 0; i < num; i++) {
1124 len += sizeof(*grec);
1125 if (!pskb_may_pull(skb, len))
1126 return -EINVAL;
1127
1128 grec = (void *)(skb->data + len - sizeof(*grec));
1129 group = grec->grec_mca;
1130 type = grec->grec_type;
1131
1132 len += ntohs(grec->grec_nsrcs) * 4;
1133 if (!pskb_may_pull(skb, len))
1134 return -EINVAL;
1135
1136 /* We treat this as an IGMPv2 report for now. */
1137 switch (type) {
1138 case IGMPV3_MODE_IS_INCLUDE:
1139 case IGMPV3_MODE_IS_EXCLUDE:
1140 case IGMPV3_CHANGE_TO_INCLUDE:
1141 case IGMPV3_CHANGE_TO_EXCLUDE:
1142 case IGMPV3_ALLOW_NEW_SOURCES:
1143 case IGMPV3_BLOCK_OLD_SOURCES:
1144 break;
1145
1146 default:
1147 continue;
1148 }
1149
1150 src = eth_hdr(skb)->h_source;
1151 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1152 type == IGMPV3_MODE_IS_INCLUDE) &&
1153 ntohs(grec->grec_nsrcs) == 0) {
1154 br_ip4_multicast_leave_group(br, port, group, vid, src);
1155 } else {
1156 err = br_ip4_multicast_add_group(br, port, group, vid,
1157 src);
1158 if (err)
1159 break;
1160 }
1161 }
1162
1163 return err;
1164 }
1165
1166 #if IS_ENABLED(CONFIG_IPV6)
1167 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1168 struct net_bridge_port *port,
1169 struct sk_buff *skb,
1170 u16 vid)
1171 {
1172 const unsigned char *src;
1173 struct icmp6hdr *icmp6h;
1174 struct mld2_grec *grec;
1175 int i;
1176 int len;
1177 int num;
1178 int err = 0;
1179
1180 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
1181 return -EINVAL;
1182
1183 icmp6h = icmp6_hdr(skb);
1184 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1185 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1186
1187 for (i = 0; i < num; i++) {
1188 __be16 *nsrcs, _nsrcs;
1189
1190 nsrcs = skb_header_pointer(skb,
1191 len + offsetof(struct mld2_grec,
1192 grec_nsrcs),
1193 sizeof(_nsrcs), &_nsrcs);
1194 if (!nsrcs)
1195 return -EINVAL;
1196
1197 if (!pskb_may_pull(skb,
1198 len + sizeof(*grec) +
1199 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1200 return -EINVAL;
1201
1202 grec = (struct mld2_grec *)(skb->data + len);
1203 len += sizeof(*grec) +
1204 sizeof(struct in6_addr) * ntohs(*nsrcs);
1205
1206 /* We treat these as MLDv1 reports for now. */
1207 switch (grec->grec_type) {
1208 case MLD2_MODE_IS_INCLUDE:
1209 case MLD2_MODE_IS_EXCLUDE:
1210 case MLD2_CHANGE_TO_INCLUDE:
1211 case MLD2_CHANGE_TO_EXCLUDE:
1212 case MLD2_ALLOW_NEW_SOURCES:
1213 case MLD2_BLOCK_OLD_SOURCES:
1214 break;
1215
1216 default:
1217 continue;
1218 }
1219
1220 src = eth_hdr(skb)->h_source;
1221 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1222 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1223 ntohs(*nsrcs) == 0) {
1224 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1225 vid, src);
1226 } else {
1227 err = br_ip6_multicast_add_group(br, port,
1228 &grec->grec_mca, vid,
1229 src);
1230 if (err)
1231 break;
1232 }
1233 }
1234
1235 return err;
1236 }
1237 #endif
1238
1239 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1240 struct net_bridge_port *port,
1241 __be32 saddr)
1242 {
1243 if (!timer_pending(&br->ip4_own_query.timer) &&
1244 !timer_pending(&br->ip4_other_query.timer))
1245 goto update;
1246
1247 if (!br->ip4_querier.addr.u.ip4)
1248 goto update;
1249
1250 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1251 goto update;
1252
1253 return false;
1254
1255 update:
1256 br->ip4_querier.addr.u.ip4 = saddr;
1257
1258 /* update protected by general multicast_lock by caller */
1259 rcu_assign_pointer(br->ip4_querier.port, port);
1260
1261 return true;
1262 }
1263
1264 #if IS_ENABLED(CONFIG_IPV6)
1265 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1266 struct net_bridge_port *port,
1267 struct in6_addr *saddr)
1268 {
1269 if (!timer_pending(&br->ip6_own_query.timer) &&
1270 !timer_pending(&br->ip6_other_query.timer))
1271 goto update;
1272
1273 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1274 goto update;
1275
1276 return false;
1277
1278 update:
1279 br->ip6_querier.addr.u.ip6 = *saddr;
1280
1281 /* update protected by general multicast_lock by caller */
1282 rcu_assign_pointer(br->ip6_querier.port, port);
1283
1284 return true;
1285 }
1286 #endif
1287
1288 static bool br_multicast_select_querier(struct net_bridge *br,
1289 struct net_bridge_port *port,
1290 struct br_ip *saddr)
1291 {
1292 switch (saddr->proto) {
1293 case htons(ETH_P_IP):
1294 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1295 #if IS_ENABLED(CONFIG_IPV6)
1296 case htons(ETH_P_IPV6):
1297 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1298 #endif
1299 }
1300
1301 return false;
1302 }
1303
1304 static void
1305 br_multicast_update_query_timer(struct net_bridge *br,
1306 struct bridge_mcast_other_query *query,
1307 unsigned long max_delay)
1308 {
1309 if (!timer_pending(&query->timer))
1310 query->delay_time = jiffies + max_delay;
1311
1312 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1313 }
1314
1315 /*
1316 * Add port to router_list
1317 * list is maintained ordered by pointer value
1318 * and locked by br->multicast_lock and RCU
1319 */
1320 static void br_multicast_add_router(struct net_bridge *br,
1321 struct net_bridge_port *port)
1322 {
1323 struct net_bridge_port *p;
1324 struct hlist_node *slot = NULL;
1325
1326 if (!hlist_unhashed(&port->rlist))
1327 return;
1328
1329 hlist_for_each_entry(p, &br->router_list, rlist) {
1330 if ((unsigned long) port >= (unsigned long) p)
1331 break;
1332 slot = &p->rlist;
1333 }
1334
1335 if (slot)
1336 hlist_add_behind_rcu(&port->rlist, slot);
1337 else
1338 hlist_add_head_rcu(&port->rlist, &br->router_list);
1339 br_rtr_notify(br->dev, port, RTM_NEWMDB);
1340 }
1341
1342 static void br_multicast_mark_router(struct net_bridge *br,
1343 struct net_bridge_port *port)
1344 {
1345 unsigned long now = jiffies;
1346
1347 if (!port) {
1348 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY)
1349 mod_timer(&br->multicast_router_timer,
1350 now + br->multicast_querier_interval);
1351 return;
1352 }
1353
1354 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1355 port->multicast_router == MDB_RTR_TYPE_PERM)
1356 return;
1357
1358 br_multicast_add_router(br, port);
1359
1360 mod_timer(&port->multicast_router_timer,
1361 now + br->multicast_querier_interval);
1362 }
1363
1364 static void br_multicast_query_received(struct net_bridge *br,
1365 struct net_bridge_port *port,
1366 struct bridge_mcast_other_query *query,
1367 struct br_ip *saddr,
1368 unsigned long max_delay)
1369 {
1370 if (!br_multicast_select_querier(br, port, saddr))
1371 return;
1372
1373 br_multicast_update_query_timer(br, query, max_delay);
1374 br_multicast_mark_router(br, port);
1375 }
1376
1377 static int br_ip4_multicast_query(struct net_bridge *br,
1378 struct net_bridge_port *port,
1379 struct sk_buff *skb,
1380 u16 vid)
1381 {
1382 const struct iphdr *iph = ip_hdr(skb);
1383 struct igmphdr *ih = igmp_hdr(skb);
1384 struct net_bridge_mdb_entry *mp;
1385 struct igmpv3_query *ih3;
1386 struct net_bridge_port_group *p;
1387 struct net_bridge_port_group __rcu **pp;
1388 struct br_ip saddr;
1389 unsigned long max_delay;
1390 unsigned long now = jiffies;
1391 unsigned int offset = skb_transport_offset(skb);
1392 __be32 group;
1393 int err = 0;
1394
1395 spin_lock(&br->multicast_lock);
1396 if (!netif_running(br->dev) ||
1397 (port && port->state == BR_STATE_DISABLED))
1398 goto out;
1399
1400 group = ih->group;
1401
1402 if (skb->len == offset + sizeof(*ih)) {
1403 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1404
1405 if (!max_delay) {
1406 max_delay = 10 * HZ;
1407 group = 0;
1408 }
1409 } else if (skb->len >= offset + sizeof(*ih3)) {
1410 ih3 = igmpv3_query_hdr(skb);
1411 if (ih3->nsrcs)
1412 goto out;
1413
1414 max_delay = ih3->code ?
1415 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1416 } else {
1417 goto out;
1418 }
1419
1420 if (!group) {
1421 saddr.proto = htons(ETH_P_IP);
1422 saddr.u.ip4 = iph->saddr;
1423
1424 br_multicast_query_received(br, port, &br->ip4_other_query,
1425 &saddr, max_delay);
1426 goto out;
1427 }
1428
1429 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1430 if (!mp)
1431 goto out;
1432
1433 max_delay *= br->multicast_last_member_count;
1434
1435 if (mp->mglist &&
1436 (timer_pending(&mp->timer) ?
1437 time_after(mp->timer.expires, now + max_delay) :
1438 try_to_del_timer_sync(&mp->timer) >= 0))
1439 mod_timer(&mp->timer, now + max_delay);
1440
1441 for (pp = &mp->ports;
1442 (p = mlock_dereference(*pp, br)) != NULL;
1443 pp = &p->next) {
1444 if (timer_pending(&p->timer) ?
1445 time_after(p->timer.expires, now + max_delay) :
1446 try_to_del_timer_sync(&p->timer) >= 0)
1447 mod_timer(&p->timer, now + max_delay);
1448 }
1449
1450 out:
1451 spin_unlock(&br->multicast_lock);
1452 return err;
1453 }
1454
1455 #if IS_ENABLED(CONFIG_IPV6)
1456 static int br_ip6_multicast_query(struct net_bridge *br,
1457 struct net_bridge_port *port,
1458 struct sk_buff *skb,
1459 u16 vid)
1460 {
1461 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1462 struct mld_msg *mld;
1463 struct net_bridge_mdb_entry *mp;
1464 struct mld2_query *mld2q;
1465 struct net_bridge_port_group *p;
1466 struct net_bridge_port_group __rcu **pp;
1467 struct br_ip saddr;
1468 unsigned long max_delay;
1469 unsigned long now = jiffies;
1470 unsigned int offset = skb_transport_offset(skb);
1471 const struct in6_addr *group = NULL;
1472 bool is_general_query;
1473 int err = 0;
1474
1475 spin_lock(&br->multicast_lock);
1476 if (!netif_running(br->dev) ||
1477 (port && port->state == BR_STATE_DISABLED))
1478 goto out;
1479
1480 if (skb->len == offset + sizeof(*mld)) {
1481 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1482 err = -EINVAL;
1483 goto out;
1484 }
1485 mld = (struct mld_msg *) icmp6_hdr(skb);
1486 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1487 if (max_delay)
1488 group = &mld->mld_mca;
1489 } else {
1490 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1491 err = -EINVAL;
1492 goto out;
1493 }
1494 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1495 if (!mld2q->mld2q_nsrcs)
1496 group = &mld2q->mld2q_mca;
1497
1498 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1499 }
1500
1501 is_general_query = group && ipv6_addr_any(group);
1502
1503 if (is_general_query) {
1504 saddr.proto = htons(ETH_P_IPV6);
1505 saddr.u.ip6 = ip6h->saddr;
1506
1507 br_multicast_query_received(br, port, &br->ip6_other_query,
1508 &saddr, max_delay);
1509 goto out;
1510 } else if (!group) {
1511 goto out;
1512 }
1513
1514 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1515 if (!mp)
1516 goto out;
1517
1518 max_delay *= br->multicast_last_member_count;
1519 if (mp->mglist &&
1520 (timer_pending(&mp->timer) ?
1521 time_after(mp->timer.expires, now + max_delay) :
1522 try_to_del_timer_sync(&mp->timer) >= 0))
1523 mod_timer(&mp->timer, now + max_delay);
1524
1525 for (pp = &mp->ports;
1526 (p = mlock_dereference(*pp, br)) != NULL;
1527 pp = &p->next) {
1528 if (timer_pending(&p->timer) ?
1529 time_after(p->timer.expires, now + max_delay) :
1530 try_to_del_timer_sync(&p->timer) >= 0)
1531 mod_timer(&p->timer, now + max_delay);
1532 }
1533
1534 out:
1535 spin_unlock(&br->multicast_lock);
1536 return err;
1537 }
1538 #endif
1539
1540 static void
1541 br_multicast_leave_group(struct net_bridge *br,
1542 struct net_bridge_port *port,
1543 struct br_ip *group,
1544 struct bridge_mcast_other_query *other_query,
1545 struct bridge_mcast_own_query *own_query,
1546 const unsigned char *src)
1547 {
1548 struct net_bridge_mdb_htable *mdb;
1549 struct net_bridge_mdb_entry *mp;
1550 struct net_bridge_port_group *p;
1551 unsigned long now;
1552 unsigned long time;
1553
1554 spin_lock(&br->multicast_lock);
1555 if (!netif_running(br->dev) ||
1556 (port && port->state == BR_STATE_DISABLED))
1557 goto out;
1558
1559 mdb = mlock_dereference(br->mdb, br);
1560 mp = br_mdb_ip_get(mdb, group);
1561 if (!mp)
1562 goto out;
1563
1564 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1565 struct net_bridge_port_group __rcu **pp;
1566
1567 for (pp = &mp->ports;
1568 (p = mlock_dereference(*pp, br)) != NULL;
1569 pp = &p->next) {
1570 if (!br_port_group_equal(p, port, src))
1571 continue;
1572
1573 rcu_assign_pointer(*pp, p->next);
1574 hlist_del_init(&p->mglist);
1575 del_timer(&p->timer);
1576 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1577 br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1578 p->flags);
1579
1580 if (!mp->ports && !mp->mglist &&
1581 netif_running(br->dev))
1582 mod_timer(&mp->timer, jiffies);
1583 }
1584 goto out;
1585 }
1586
1587 if (timer_pending(&other_query->timer))
1588 goto out;
1589
1590 if (br->multicast_querier) {
1591 __br_multicast_send_query(br, port, &mp->addr);
1592
1593 time = jiffies + br->multicast_last_member_count *
1594 br->multicast_last_member_interval;
1595
1596 mod_timer(&own_query->timer, time);
1597
1598 for (p = mlock_dereference(mp->ports, br);
1599 p != NULL;
1600 p = mlock_dereference(p->next, br)) {
1601 if (!br_port_group_equal(p, port, src))
1602 continue;
1603
1604 if (!hlist_unhashed(&p->mglist) &&
1605 (timer_pending(&p->timer) ?
1606 time_after(p->timer.expires, time) :
1607 try_to_del_timer_sync(&p->timer) >= 0)) {
1608 mod_timer(&p->timer, time);
1609 }
1610
1611 break;
1612 }
1613 }
1614
1615 now = jiffies;
1616 time = now + br->multicast_last_member_count *
1617 br->multicast_last_member_interval;
1618
1619 if (!port) {
1620 if (mp->mglist &&
1621 (timer_pending(&mp->timer) ?
1622 time_after(mp->timer.expires, time) :
1623 try_to_del_timer_sync(&mp->timer) >= 0)) {
1624 mod_timer(&mp->timer, time);
1625 }
1626
1627 goto out;
1628 }
1629
1630 for (p = mlock_dereference(mp->ports, br);
1631 p != NULL;
1632 p = mlock_dereference(p->next, br)) {
1633 if (p->port != port)
1634 continue;
1635
1636 if (!hlist_unhashed(&p->mglist) &&
1637 (timer_pending(&p->timer) ?
1638 time_after(p->timer.expires, time) :
1639 try_to_del_timer_sync(&p->timer) >= 0)) {
1640 mod_timer(&p->timer, time);
1641 }
1642
1643 break;
1644 }
1645 out:
1646 spin_unlock(&br->multicast_lock);
1647 }
1648
1649 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1650 struct net_bridge_port *port,
1651 __be32 group,
1652 __u16 vid,
1653 const unsigned char *src)
1654 {
1655 struct br_ip br_group;
1656 struct bridge_mcast_own_query *own_query;
1657
1658 if (ipv4_is_local_multicast(group))
1659 return;
1660
1661 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1662
1663 br_group.u.ip4 = group;
1664 br_group.proto = htons(ETH_P_IP);
1665 br_group.vid = vid;
1666
1667 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1668 own_query, src);
1669 }
1670
1671 #if IS_ENABLED(CONFIG_IPV6)
1672 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1673 struct net_bridge_port *port,
1674 const struct in6_addr *group,
1675 __u16 vid,
1676 const unsigned char *src)
1677 {
1678 struct br_ip br_group;
1679 struct bridge_mcast_own_query *own_query;
1680
1681 if (ipv6_addr_is_ll_all_nodes(group))
1682 return;
1683
1684 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1685
1686 br_group.u.ip6 = *group;
1687 br_group.proto = htons(ETH_P_IPV6);
1688 br_group.vid = vid;
1689
1690 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1691 own_query, src);
1692 }
1693 #endif
1694
1695 static void br_multicast_err_count(const struct net_bridge *br,
1696 const struct net_bridge_port *p,
1697 __be16 proto)
1698 {
1699 struct bridge_mcast_stats __percpu *stats;
1700 struct bridge_mcast_stats *pstats;
1701
1702 if (!br->multicast_stats_enabled)
1703 return;
1704
1705 if (p)
1706 stats = p->mcast_stats;
1707 else
1708 stats = br->mcast_stats;
1709 if (WARN_ON(!stats))
1710 return;
1711
1712 pstats = this_cpu_ptr(stats);
1713
1714 u64_stats_update_begin(&pstats->syncp);
1715 switch (proto) {
1716 case htons(ETH_P_IP):
1717 pstats->mstats.igmp_parse_errors++;
1718 break;
1719 #if IS_ENABLED(CONFIG_IPV6)
1720 case htons(ETH_P_IPV6):
1721 pstats->mstats.mld_parse_errors++;
1722 break;
1723 #endif
1724 }
1725 u64_stats_update_end(&pstats->syncp);
1726 }
1727
1728 static void br_multicast_pim(struct net_bridge *br,
1729 struct net_bridge_port *port,
1730 const struct sk_buff *skb)
1731 {
1732 unsigned int offset = skb_transport_offset(skb);
1733 struct pimhdr *pimhdr, _pimhdr;
1734
1735 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1736 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1737 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1738 return;
1739
1740 br_multicast_mark_router(br, port);
1741 }
1742
1743 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1744 struct net_bridge_port *port,
1745 struct sk_buff *skb,
1746 u16 vid)
1747 {
1748 struct sk_buff *skb_trimmed = NULL;
1749 const unsigned char *src;
1750 struct igmphdr *ih;
1751 int err;
1752
1753 err = ip_mc_check_igmp(skb, &skb_trimmed);
1754
1755 if (err == -ENOMSG) {
1756 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1757 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1758 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1759 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1760 br_multicast_pim(br, port, skb);
1761 }
1762 return 0;
1763 } else if (err < 0) {
1764 br_multicast_err_count(br, port, skb->protocol);
1765 return err;
1766 }
1767
1768 ih = igmp_hdr(skb);
1769 src = eth_hdr(skb)->h_source;
1770 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1771
1772 switch (ih->type) {
1773 case IGMP_HOST_MEMBERSHIP_REPORT:
1774 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1775 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1776 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1777 break;
1778 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1779 err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
1780 break;
1781 case IGMP_HOST_MEMBERSHIP_QUERY:
1782 err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
1783 break;
1784 case IGMP_HOST_LEAVE_MESSAGE:
1785 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1786 break;
1787 }
1788
1789 if (skb_trimmed && skb_trimmed != skb)
1790 kfree_skb(skb_trimmed);
1791
1792 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1793 BR_MCAST_DIR_RX);
1794
1795 return err;
1796 }
1797
1798 #if IS_ENABLED(CONFIG_IPV6)
1799 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1800 struct net_bridge_port *port,
1801 struct sk_buff *skb,
1802 u16 vid)
1803 {
1804 struct sk_buff *skb_trimmed = NULL;
1805 const unsigned char *src;
1806 struct mld_msg *mld;
1807 int err;
1808
1809 err = ipv6_mc_check_mld(skb, &skb_trimmed);
1810
1811 if (err == -ENOMSG) {
1812 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1813 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1814 return 0;
1815 } else if (err < 0) {
1816 br_multicast_err_count(br, port, skb->protocol);
1817 return err;
1818 }
1819
1820 mld = (struct mld_msg *)skb_transport_header(skb);
1821 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1822
1823 switch (mld->mld_type) {
1824 case ICMPV6_MGM_REPORT:
1825 src = eth_hdr(skb)->h_source;
1826 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1827 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1828 src);
1829 break;
1830 case ICMPV6_MLD2_REPORT:
1831 err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
1832 break;
1833 case ICMPV6_MGM_QUERY:
1834 err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
1835 break;
1836 case ICMPV6_MGM_REDUCTION:
1837 src = eth_hdr(skb)->h_source;
1838 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1839 break;
1840 }
1841
1842 if (skb_trimmed && skb_trimmed != skb)
1843 kfree_skb(skb_trimmed);
1844
1845 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1846 BR_MCAST_DIR_RX);
1847
1848 return err;
1849 }
1850 #endif
1851
1852 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1853 struct sk_buff *skb, u16 vid)
1854 {
1855 int ret = 0;
1856
1857 BR_INPUT_SKB_CB(skb)->igmp = 0;
1858 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1859
1860 if (br->multicast_disabled)
1861 return 0;
1862
1863 switch (skb->protocol) {
1864 case htons(ETH_P_IP):
1865 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1866 break;
1867 #if IS_ENABLED(CONFIG_IPV6)
1868 case htons(ETH_P_IPV6):
1869 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1870 break;
1871 #endif
1872 }
1873
1874 return ret;
1875 }
1876
1877 static void br_multicast_query_expired(struct net_bridge *br,
1878 struct bridge_mcast_own_query *query,
1879 struct bridge_mcast_querier *querier)
1880 {
1881 spin_lock(&br->multicast_lock);
1882 if (query->startup_sent < br->multicast_startup_query_count)
1883 query->startup_sent++;
1884
1885 RCU_INIT_POINTER(querier->port, NULL);
1886 br_multicast_send_query(br, NULL, query);
1887 spin_unlock(&br->multicast_lock);
1888 }
1889
1890 static void br_ip4_multicast_query_expired(unsigned long data)
1891 {
1892 struct net_bridge *br = (void *)data;
1893
1894 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1895 }
1896
1897 #if IS_ENABLED(CONFIG_IPV6)
1898 static void br_ip6_multicast_query_expired(unsigned long data)
1899 {
1900 struct net_bridge *br = (void *)data;
1901
1902 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1903 }
1904 #endif
1905
1906 void br_multicast_init(struct net_bridge *br)
1907 {
1908 br->hash_elasticity = 4;
1909 br->hash_max = 512;
1910
1911 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1912 br->multicast_querier = 0;
1913 br->multicast_query_use_ifaddr = 0;
1914 br->multicast_last_member_count = 2;
1915 br->multicast_startup_query_count = 2;
1916
1917 br->multicast_last_member_interval = HZ;
1918 br->multicast_query_response_interval = 10 * HZ;
1919 br->multicast_startup_query_interval = 125 * HZ / 4;
1920 br->multicast_query_interval = 125 * HZ;
1921 br->multicast_querier_interval = 255 * HZ;
1922 br->multicast_membership_interval = 260 * HZ;
1923
1924 br->ip4_other_query.delay_time = 0;
1925 br->ip4_querier.port = NULL;
1926 br->multicast_igmp_version = 2;
1927 #if IS_ENABLED(CONFIG_IPV6)
1928 br->multicast_mld_version = 1;
1929 br->ip6_other_query.delay_time = 0;
1930 br->ip6_querier.port = NULL;
1931 #endif
1932 br->has_ipv6_addr = 1;
1933
1934 spin_lock_init(&br->multicast_lock);
1935 setup_timer(&br->multicast_router_timer,
1936 br_multicast_local_router_expired, 0);
1937 setup_timer(&br->ip4_other_query.timer,
1938 br_ip4_multicast_querier_expired, (unsigned long)br);
1939 setup_timer(&br->ip4_own_query.timer, br_ip4_multicast_query_expired,
1940 (unsigned long)br);
1941 #if IS_ENABLED(CONFIG_IPV6)
1942 setup_timer(&br->ip6_other_query.timer,
1943 br_ip6_multicast_querier_expired, (unsigned long)br);
1944 setup_timer(&br->ip6_own_query.timer, br_ip6_multicast_query_expired,
1945 (unsigned long)br);
1946 #endif
1947 }
1948
1949 static void __br_multicast_open(struct net_bridge *br,
1950 struct bridge_mcast_own_query *query)
1951 {
1952 query->startup_sent = 0;
1953
1954 if (br->multicast_disabled)
1955 return;
1956
1957 mod_timer(&query->timer, jiffies);
1958 }
1959
1960 void br_multicast_open(struct net_bridge *br)
1961 {
1962 __br_multicast_open(br, &br->ip4_own_query);
1963 #if IS_ENABLED(CONFIG_IPV6)
1964 __br_multicast_open(br, &br->ip6_own_query);
1965 #endif
1966 }
1967
1968 void br_multicast_stop(struct net_bridge *br)
1969 {
1970 del_timer_sync(&br->multicast_router_timer);
1971 del_timer_sync(&br->ip4_other_query.timer);
1972 del_timer_sync(&br->ip4_own_query.timer);
1973 #if IS_ENABLED(CONFIG_IPV6)
1974 del_timer_sync(&br->ip6_other_query.timer);
1975 del_timer_sync(&br->ip6_own_query.timer);
1976 #endif
1977 }
1978
1979 void br_multicast_dev_del(struct net_bridge *br)
1980 {
1981 struct net_bridge_mdb_htable *mdb;
1982 struct net_bridge_mdb_entry *mp;
1983 struct hlist_node *n;
1984 u32 ver;
1985 int i;
1986
1987 spin_lock_bh(&br->multicast_lock);
1988 mdb = mlock_dereference(br->mdb, br);
1989 if (!mdb)
1990 goto out;
1991
1992 br->mdb = NULL;
1993
1994 ver = mdb->ver;
1995 for (i = 0; i < mdb->max; i++) {
1996 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1997 hlist[ver]) {
1998 del_timer(&mp->timer);
1999 call_rcu_bh(&mp->rcu, br_multicast_free_group);
2000 }
2001 }
2002
2003 if (mdb->old) {
2004 spin_unlock_bh(&br->multicast_lock);
2005 rcu_barrier_bh();
2006 spin_lock_bh(&br->multicast_lock);
2007 WARN_ON(mdb->old);
2008 }
2009
2010 mdb->old = mdb;
2011 call_rcu_bh(&mdb->rcu, br_mdb_free);
2012
2013 out:
2014 spin_unlock_bh(&br->multicast_lock);
2015
2016 free_percpu(br->mcast_stats);
2017 }
2018
2019 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
2020 {
2021 int err = -EINVAL;
2022
2023 spin_lock_bh(&br->multicast_lock);
2024
2025 switch (val) {
2026 case MDB_RTR_TYPE_DISABLED:
2027 case MDB_RTR_TYPE_PERM:
2028 del_timer(&br->multicast_router_timer);
2029 /* fall through */
2030 case MDB_RTR_TYPE_TEMP_QUERY:
2031 br->multicast_router = val;
2032 err = 0;
2033 break;
2034 }
2035
2036 spin_unlock_bh(&br->multicast_lock);
2037
2038 return err;
2039 }
2040
2041 static void __del_port_router(struct net_bridge_port *p)
2042 {
2043 if (hlist_unhashed(&p->rlist))
2044 return;
2045 hlist_del_init_rcu(&p->rlist);
2046 br_rtr_notify(p->br->dev, p, RTM_DELMDB);
2047 }
2048
2049 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
2050 {
2051 struct net_bridge *br = p->br;
2052 unsigned long now = jiffies;
2053 int err = -EINVAL;
2054
2055 spin_lock(&br->multicast_lock);
2056 if (p->multicast_router == val) {
2057 /* Refresh the temp router port timer */
2058 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
2059 mod_timer(&p->multicast_router_timer,
2060 now + br->multicast_querier_interval);
2061 err = 0;
2062 goto unlock;
2063 }
2064 switch (val) {
2065 case MDB_RTR_TYPE_DISABLED:
2066 p->multicast_router = MDB_RTR_TYPE_DISABLED;
2067 __del_port_router(p);
2068 del_timer(&p->multicast_router_timer);
2069 break;
2070 case MDB_RTR_TYPE_TEMP_QUERY:
2071 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2072 __del_port_router(p);
2073 break;
2074 case MDB_RTR_TYPE_PERM:
2075 p->multicast_router = MDB_RTR_TYPE_PERM;
2076 del_timer(&p->multicast_router_timer);
2077 br_multicast_add_router(br, p);
2078 break;
2079 case MDB_RTR_TYPE_TEMP:
2080 p->multicast_router = MDB_RTR_TYPE_TEMP;
2081 br_multicast_mark_router(br, p);
2082 break;
2083 default:
2084 goto unlock;
2085 }
2086 err = 0;
2087 unlock:
2088 spin_unlock(&br->multicast_lock);
2089
2090 return err;
2091 }
2092
2093 static void br_multicast_start_querier(struct net_bridge *br,
2094 struct bridge_mcast_own_query *query)
2095 {
2096 struct net_bridge_port *port;
2097
2098 __br_multicast_open(br, query);
2099
2100 list_for_each_entry(port, &br->port_list, list) {
2101 if (port->state == BR_STATE_DISABLED ||
2102 port->state == BR_STATE_BLOCKING)
2103 continue;
2104
2105 if (query == &br->ip4_own_query)
2106 br_multicast_enable(&port->ip4_own_query);
2107 #if IS_ENABLED(CONFIG_IPV6)
2108 else
2109 br_multicast_enable(&port->ip6_own_query);
2110 #endif
2111 }
2112 }
2113
2114 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2115 {
2116 struct net_bridge_mdb_htable *mdb;
2117 struct net_bridge_port *port;
2118 int err = 0;
2119
2120 spin_lock_bh(&br->multicast_lock);
2121 if (br->multicast_disabled == !val)
2122 goto unlock;
2123
2124 br->multicast_disabled = !val;
2125 if (br->multicast_disabled)
2126 goto unlock;
2127
2128 if (!netif_running(br->dev))
2129 goto unlock;
2130
2131 mdb = mlock_dereference(br->mdb, br);
2132 if (mdb) {
2133 if (mdb->old) {
2134 err = -EEXIST;
2135 rollback:
2136 br->multicast_disabled = !!val;
2137 goto unlock;
2138 }
2139
2140 err = br_mdb_rehash(&br->mdb, mdb->max,
2141 br->hash_elasticity);
2142 if (err)
2143 goto rollback;
2144 }
2145
2146 br_multicast_open(br);
2147 list_for_each_entry(port, &br->port_list, list)
2148 __br_multicast_enable_port(port);
2149
2150 unlock:
2151 spin_unlock_bh(&br->multicast_lock);
2152
2153 return err;
2154 }
2155
2156 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2157 {
2158 unsigned long max_delay;
2159
2160 val = !!val;
2161
2162 spin_lock_bh(&br->multicast_lock);
2163 if (br->multicast_querier == val)
2164 goto unlock;
2165
2166 br->multicast_querier = val;
2167 if (!val)
2168 goto unlock;
2169
2170 max_delay = br->multicast_query_response_interval;
2171
2172 if (!timer_pending(&br->ip4_other_query.timer))
2173 br->ip4_other_query.delay_time = jiffies + max_delay;
2174
2175 br_multicast_start_querier(br, &br->ip4_own_query);
2176
2177 #if IS_ENABLED(CONFIG_IPV6)
2178 if (!timer_pending(&br->ip6_other_query.timer))
2179 br->ip6_other_query.delay_time = jiffies + max_delay;
2180
2181 br_multicast_start_querier(br, &br->ip6_own_query);
2182 #endif
2183
2184 unlock:
2185 spin_unlock_bh(&br->multicast_lock);
2186
2187 return 0;
2188 }
2189
2190 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
2191 {
2192 int err = -EINVAL;
2193 u32 old;
2194 struct net_bridge_mdb_htable *mdb;
2195
2196 spin_lock_bh(&br->multicast_lock);
2197 if (!is_power_of_2(val))
2198 goto unlock;
2199
2200 mdb = mlock_dereference(br->mdb, br);
2201 if (mdb && val < mdb->size)
2202 goto unlock;
2203
2204 err = 0;
2205
2206 old = br->hash_max;
2207 br->hash_max = val;
2208
2209 if (mdb) {
2210 if (mdb->old) {
2211 err = -EEXIST;
2212 rollback:
2213 br->hash_max = old;
2214 goto unlock;
2215 }
2216
2217 err = br_mdb_rehash(&br->mdb, br->hash_max,
2218 br->hash_elasticity);
2219 if (err)
2220 goto rollback;
2221 }
2222
2223 unlock:
2224 spin_unlock_bh(&br->multicast_lock);
2225
2226 return err;
2227 }
2228
2229 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2230 {
2231 /* Currently we support only version 2 and 3 */
2232 switch (val) {
2233 case 2:
2234 case 3:
2235 break;
2236 default:
2237 return -EINVAL;
2238 }
2239
2240 spin_lock_bh(&br->multicast_lock);
2241 br->multicast_igmp_version = val;
2242 spin_unlock_bh(&br->multicast_lock);
2243
2244 return 0;
2245 }
2246
2247 #if IS_ENABLED(CONFIG_IPV6)
2248 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2249 {
2250 /* Currently we support version 1 and 2 */
2251 switch (val) {
2252 case 1:
2253 case 2:
2254 break;
2255 default:
2256 return -EINVAL;
2257 }
2258
2259 spin_lock_bh(&br->multicast_lock);
2260 br->multicast_mld_version = val;
2261 spin_unlock_bh(&br->multicast_lock);
2262
2263 return 0;
2264 }
2265 #endif
2266
2267 /**
2268 * br_multicast_list_adjacent - Returns snooped multicast addresses
2269 * @dev: The bridge port adjacent to which to retrieve addresses
2270 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2271 *
2272 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2273 * snooping feature on all bridge ports of dev's bridge device, excluding
2274 * the addresses from dev itself.
2275 *
2276 * Returns the number of items added to br_ip_list.
2277 *
2278 * Notes:
2279 * - br_ip_list needs to be initialized by caller
2280 * - br_ip_list might contain duplicates in the end
2281 * (needs to be taken care of by caller)
2282 * - br_ip_list needs to be freed by caller
2283 */
2284 int br_multicast_list_adjacent(struct net_device *dev,
2285 struct list_head *br_ip_list)
2286 {
2287 struct net_bridge *br;
2288 struct net_bridge_port *port;
2289 struct net_bridge_port_group *group;
2290 struct br_ip_list *entry;
2291 int count = 0;
2292
2293 rcu_read_lock();
2294 if (!br_ip_list || !br_port_exists(dev))
2295 goto unlock;
2296
2297 port = br_port_get_rcu(dev);
2298 if (!port || !port->br)
2299 goto unlock;
2300
2301 br = port->br;
2302
2303 list_for_each_entry_rcu(port, &br->port_list, list) {
2304 if (!port->dev || port->dev == dev)
2305 continue;
2306
2307 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2308 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2309 if (!entry)
2310 goto unlock;
2311
2312 entry->addr = group->addr;
2313 list_add(&entry->list, br_ip_list);
2314 count++;
2315 }
2316 }
2317
2318 unlock:
2319 rcu_read_unlock();
2320 return count;
2321 }
2322 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2323
2324 /**
2325 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2326 * @dev: The bridge port providing the bridge on which to check for a querier
2327 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2328 *
2329 * Checks whether the given interface has a bridge on top and if so returns
2330 * true if a valid querier exists anywhere on the bridged link layer.
2331 * Otherwise returns false.
2332 */
2333 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2334 {
2335 struct net_bridge *br;
2336 struct net_bridge_port *port;
2337 struct ethhdr eth;
2338 bool ret = false;
2339
2340 rcu_read_lock();
2341 if (!br_port_exists(dev))
2342 goto unlock;
2343
2344 port = br_port_get_rcu(dev);
2345 if (!port || !port->br)
2346 goto unlock;
2347
2348 br = port->br;
2349
2350 memset(&eth, 0, sizeof(eth));
2351 eth.h_proto = htons(proto);
2352
2353 ret = br_multicast_querier_exists(br, &eth);
2354
2355 unlock:
2356 rcu_read_unlock();
2357 return ret;
2358 }
2359 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2360
2361 /**
2362 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2363 * @dev: The bridge port adjacent to which to check for a querier
2364 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2365 *
2366 * Checks whether the given interface has a bridge on top and if so returns
2367 * true if a selected querier is behind one of the other ports of this
2368 * bridge. Otherwise returns false.
2369 */
2370 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2371 {
2372 struct net_bridge *br;
2373 struct net_bridge_port *port;
2374 bool ret = false;
2375
2376 rcu_read_lock();
2377 if (!br_port_exists(dev))
2378 goto unlock;
2379
2380 port = br_port_get_rcu(dev);
2381 if (!port || !port->br)
2382 goto unlock;
2383
2384 br = port->br;
2385
2386 switch (proto) {
2387 case ETH_P_IP:
2388 if (!timer_pending(&br->ip4_other_query.timer) ||
2389 rcu_dereference(br->ip4_querier.port) == port)
2390 goto unlock;
2391 break;
2392 #if IS_ENABLED(CONFIG_IPV6)
2393 case ETH_P_IPV6:
2394 if (!timer_pending(&br->ip6_other_query.timer) ||
2395 rcu_dereference(br->ip6_querier.port) == port)
2396 goto unlock;
2397 break;
2398 #endif
2399 default:
2400 goto unlock;
2401 }
2402
2403 ret = true;
2404 unlock:
2405 rcu_read_unlock();
2406 return ret;
2407 }
2408 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2409
2410 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2411 const struct sk_buff *skb, u8 type, u8 dir)
2412 {
2413 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2414 __be16 proto = skb->protocol;
2415 unsigned int t_len;
2416
2417 u64_stats_update_begin(&pstats->syncp);
2418 switch (proto) {
2419 case htons(ETH_P_IP):
2420 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2421 switch (type) {
2422 case IGMP_HOST_MEMBERSHIP_REPORT:
2423 pstats->mstats.igmp_v1reports[dir]++;
2424 break;
2425 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2426 pstats->mstats.igmp_v2reports[dir]++;
2427 break;
2428 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2429 pstats->mstats.igmp_v3reports[dir]++;
2430 break;
2431 case IGMP_HOST_MEMBERSHIP_QUERY:
2432 if (t_len != sizeof(struct igmphdr)) {
2433 pstats->mstats.igmp_v3queries[dir]++;
2434 } else {
2435 unsigned int offset = skb_transport_offset(skb);
2436 struct igmphdr *ih, _ihdr;
2437
2438 ih = skb_header_pointer(skb, offset,
2439 sizeof(_ihdr), &_ihdr);
2440 if (!ih)
2441 break;
2442 if (!ih->code)
2443 pstats->mstats.igmp_v1queries[dir]++;
2444 else
2445 pstats->mstats.igmp_v2queries[dir]++;
2446 }
2447 break;
2448 case IGMP_HOST_LEAVE_MESSAGE:
2449 pstats->mstats.igmp_leaves[dir]++;
2450 break;
2451 }
2452 break;
2453 #if IS_ENABLED(CONFIG_IPV6)
2454 case htons(ETH_P_IPV6):
2455 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2456 sizeof(struct ipv6hdr);
2457 t_len -= skb_network_header_len(skb);
2458 switch (type) {
2459 case ICMPV6_MGM_REPORT:
2460 pstats->mstats.mld_v1reports[dir]++;
2461 break;
2462 case ICMPV6_MLD2_REPORT:
2463 pstats->mstats.mld_v2reports[dir]++;
2464 break;
2465 case ICMPV6_MGM_QUERY:
2466 if (t_len != sizeof(struct mld_msg))
2467 pstats->mstats.mld_v2queries[dir]++;
2468 else
2469 pstats->mstats.mld_v1queries[dir]++;
2470 break;
2471 case ICMPV6_MGM_REDUCTION:
2472 pstats->mstats.mld_leaves[dir]++;
2473 break;
2474 }
2475 break;
2476 #endif /* CONFIG_IPV6 */
2477 }
2478 u64_stats_update_end(&pstats->syncp);
2479 }
2480
2481 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2482 const struct sk_buff *skb, u8 type, u8 dir)
2483 {
2484 struct bridge_mcast_stats __percpu *stats;
2485
2486 /* if multicast_disabled is true then igmp type can't be set */
2487 if (!type || !br->multicast_stats_enabled)
2488 return;
2489
2490 if (p)
2491 stats = p->mcast_stats;
2492 else
2493 stats = br->mcast_stats;
2494 if (WARN_ON(!stats))
2495 return;
2496
2497 br_mcast_stats_add(stats, skb, type, dir);
2498 }
2499
2500 int br_multicast_init_stats(struct net_bridge *br)
2501 {
2502 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2503 if (!br->mcast_stats)
2504 return -ENOMEM;
2505
2506 return 0;
2507 }
2508
2509 static void mcast_stats_add_dir(u64 *dst, u64 *src)
2510 {
2511 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2512 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2513 }
2514
2515 void br_multicast_get_stats(const struct net_bridge *br,
2516 const struct net_bridge_port *p,
2517 struct br_mcast_stats *dest)
2518 {
2519 struct bridge_mcast_stats __percpu *stats;
2520 struct br_mcast_stats tdst;
2521 int i;
2522
2523 memset(dest, 0, sizeof(*dest));
2524 if (p)
2525 stats = p->mcast_stats;
2526 else
2527 stats = br->mcast_stats;
2528 if (WARN_ON(!stats))
2529 return;
2530
2531 memset(&tdst, 0, sizeof(tdst));
2532 for_each_possible_cpu(i) {
2533 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2534 struct br_mcast_stats temp;
2535 unsigned int start;
2536
2537 do {
2538 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2539 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2540 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2541
2542 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2543 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2544 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2545 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2546 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2547 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2548 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2549 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2550
2551 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2552 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2553 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2554 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2555 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2556 tdst.mld_parse_errors += temp.mld_parse_errors;
2557 }
2558 memcpy(dest, &tdst, sizeof(*dest));
2559 }