3 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 * keep pim6_mld.h open when working on this code. Most data structures are
22 * commented in the header.
24 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
25 * that this code will replace the old IGMP querier at some point.
29 #include <netinet/ip6.h>
31 #include "lib/memory.h"
32 #include "lib/jhash.h"
33 #include "lib/prefix.h"
34 #include "lib/checksum.h"
35 #include "lib/thread.h"
36 #include "termtable.h"
38 #include "pimd/pim6_mld.h"
39 #include "pimd/pim6_mld_protocol.h"
40 #include "pimd/pim_memory.h"
41 #include "pimd/pim_instance.h"
42 #include "pimd/pim_iface.h"
43 #include "pimd/pim6_cmd.h"
44 #include "pimd/pim_cmd_common.h"
45 #include "pimd/pim_util.h"
46 #include "pimd/pim_tib.h"
47 #include "pimd/pimd.h"
49 #ifndef IPV6_MULTICAST_ALL
50 #define IPV6_MULTICAST_ALL 29
53 DEFINE_MTYPE_STATIC(PIMD
, GM_IFACE
, "MLD interface");
54 DEFINE_MTYPE_STATIC(PIMD
, GM_PACKET
, "MLD packet");
55 DEFINE_MTYPE_STATIC(PIMD
, GM_SUBSCRIBER
, "MLD subscriber");
56 DEFINE_MTYPE_STATIC(PIMD
, GM_STATE
, "MLD subscription state");
57 DEFINE_MTYPE_STATIC(PIMD
, GM_SG
, "MLD (S,G)");
58 DEFINE_MTYPE_STATIC(PIMD
, GM_GRP_PENDING
, "MLD group query state");
59 DEFINE_MTYPE_STATIC(PIMD
, GM_GSQ_PENDING
, "MLD group/source query aggregate");
61 static void gm_t_query(struct thread
*t
);
62 static void gm_trigger_specific(struct gm_sg
*sg
);
63 static void gm_sg_timer_start(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
64 struct timeval expire_wait
);
66 /* shorthand for log messages */
67 #define log_ifp(msg) \
68 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
69 #define log_pkt_src(msg) \
70 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
72 #define log_sg(sg, msg) \
73 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
74 sg->iface->ifp->name, &sg->sgaddr
76 /* clang-format off */
78 static const pim_addr gm_all_hosts
= {
80 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
84 static const pim_addr gm_all_routers
= {
86 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
90 /* MLDv1 does not allow subscriber tracking due to report suppression
91 * hence, the source address is replaced with ffff:...:ffff
93 static const pim_addr gm_dummy_untracked
= {
95 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
96 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
101 static const pim_addr gm_all_hosts
= { .s_addr
= htonl(0xe0000001), };
103 static const pim_addr gm_all_routers
= { .s_addr
= htonl(0xe0000016), };
104 static const pim_addr gm_dummy_untracked
= { .s_addr
= 0xffffffff, };
106 /* clang-format on */
108 #define IPV6_MULTICAST_SCOPE_LINK 2
110 static inline uint8_t in6_multicast_scope(const pim_addr
*addr
)
112 return addr
->s6_addr
[1] & 0xf;
115 static inline bool in6_multicast_nofwd(const pim_addr
*addr
)
117 return in6_multicast_scope(addr
) <= IPV6_MULTICAST_SCOPE_LINK
;
121 * (S,G) -> subscriber,(S,G)
124 static int gm_packet_sg_cmp(const struct gm_packet_sg
*a
,
125 const struct gm_packet_sg
*b
)
127 const struct gm_packet_state
*s_a
, *s_b
;
129 s_a
= gm_packet_sg2state(a
);
130 s_b
= gm_packet_sg2state(b
);
131 return IPV6_ADDR_CMP(&s_a
->subscriber
->addr
, &s_b
->subscriber
->addr
);
134 DECLARE_RBTREE_UNIQ(gm_packet_sg_subs
, struct gm_packet_sg
, subs_itm
,
137 static struct gm_packet_sg
*gm_packet_sg_find(struct gm_sg
*sg
,
138 enum gm_sub_sense sense
,
139 struct gm_subscriber
*sub
)
142 struct gm_packet_state hdr
;
143 struct gm_packet_sg item
;
145 /* clang-format off */
152 /* clang-format on */
155 return gm_packet_sg_subs_find(&sg
->subs
[sense
], &ref
.item
);
159 * interface -> (*,G),pending
162 static int gm_grp_pending_cmp(const struct gm_grp_pending
*a
,
163 const struct gm_grp_pending
*b
)
165 return IPV6_ADDR_CMP(&a
->grp
, &b
->grp
);
168 DECLARE_RBTREE_UNIQ(gm_grp_pends
, struct gm_grp_pending
, itm
,
172 * interface -> ([S1,S2,...],G),pending
175 static int gm_gsq_pending_cmp(const struct gm_gsq_pending
*a
,
176 const struct gm_gsq_pending
*b
)
178 if (a
->s_bit
!= b
->s_bit
)
179 return numcmp(a
->s_bit
, b
->s_bit
);
181 return IPV6_ADDR_CMP(&a
->grp
, &b
->grp
);
184 static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending
*a
)
186 uint32_t seed
= a
->s_bit
? 0x68f0eb5e : 0x156b7f19;
188 return jhash(&a
->grp
, sizeof(a
->grp
), seed
);
191 DECLARE_HASH(gm_gsq_pends
, struct gm_gsq_pending
, itm
, gm_gsq_pending_cmp
,
192 gm_gsq_pending_hash
);
198 static int gm_sg_cmp(const struct gm_sg
*a
, const struct gm_sg
*b
)
200 return pim_sgaddr_cmp(a
->sgaddr
, b
->sgaddr
);
203 DECLARE_RBTREE_UNIQ(gm_sgs
, struct gm_sg
, itm
, gm_sg_cmp
);
205 static struct gm_sg
*gm_sg_find(struct gm_if
*gm_ifp
, pim_addr grp
,
208 struct gm_sg ref
= {};
210 ref
.sgaddr
.grp
= grp
;
211 ref
.sgaddr
.src
= src
;
212 return gm_sgs_find(gm_ifp
->sgs
, &ref
);
215 static struct gm_sg
*gm_sg_make(struct gm_if
*gm_ifp
, pim_addr grp
,
218 struct gm_sg
*ret
, *prev
;
220 ret
= XCALLOC(MTYPE_GM_SG
, sizeof(*ret
));
221 ret
->sgaddr
.grp
= grp
;
222 ret
->sgaddr
.src
= src
;
224 prev
= gm_sgs_add(gm_ifp
->sgs
, ret
);
227 XFREE(MTYPE_GM_SG
, ret
);
230 monotime(&ret
->created
);
231 gm_packet_sg_subs_init(ret
->subs_positive
);
232 gm_packet_sg_subs_init(ret
->subs_negative
);
238 * interface -> packets, sorted by expiry (because add_tail insert order)
241 DECLARE_DLIST(gm_packet_expires
, struct gm_packet_state
, exp_itm
);
244 * subscriber -> packets
247 DECLARE_DLIST(gm_packets
, struct gm_packet_state
, pkt_itm
);
250 * interface -> subscriber
253 static int gm_subscriber_cmp(const struct gm_subscriber
*a
,
254 const struct gm_subscriber
*b
)
256 return IPV6_ADDR_CMP(&a
->addr
, &b
->addr
);
259 static uint32_t gm_subscriber_hash(const struct gm_subscriber
*a
)
261 return jhash(&a
->addr
, sizeof(a
->addr
), 0xd0e94ad4);
264 DECLARE_HASH(gm_subscribers
, struct gm_subscriber
, itm
, gm_subscriber_cmp
,
267 static struct gm_subscriber
*gm_subscriber_findref(struct gm_if
*gm_ifp
,
270 struct gm_subscriber ref
= {}, *ret
;
273 ret
= gm_subscribers_find(gm_ifp
->subscribers
, &ref
);
279 static struct gm_subscriber
*gm_subscriber_get(struct gm_if
*gm_ifp
,
282 struct gm_subscriber ref
= {}, *ret
;
285 ret
= gm_subscribers_find(gm_ifp
->subscribers
, &ref
);
288 ret
= XCALLOC(MTYPE_GM_SUBSCRIBER
, sizeof(*ret
));
292 monotime(&ret
->created
);
293 gm_packets_init(ret
->packets
);
295 gm_subscribers_add(gm_ifp
->subscribers
, ret
);
300 static void gm_subscriber_drop(struct gm_subscriber
**subp
)
302 struct gm_subscriber
*sub
= *subp
;
303 struct gm_if
*gm_ifp
;
315 gm_subscribers_del(gm_ifp
->subscribers
, sub
);
316 XFREE(MTYPE_GM_SUBSCRIBER
, sub
);
319 /****************************************************************************/
321 /* bundle query timer values for combined v1/v2 handling */
322 struct gm_query_timers
{
324 unsigned int max_resp_ms
;
325 unsigned int qqic_ms
;
328 struct timeval expire_wait
;
331 static void gm_expiry_calc(struct gm_query_timers
*timers
)
333 unsigned int expire
=
334 (timers
->qrv
- 1) * timers
->qqic_ms
+ timers
->max_resp_ms
;
335 ldiv_t exp_div
= ldiv(expire
, 1000);
337 timers
->expire_wait
.tv_sec
= exp_div
.quot
;
338 timers
->expire_wait
.tv_usec
= exp_div
.rem
* 1000;
339 timeradd(&timers
->expire_wait
, &timers
->fuzz
, &timers
->expire_wait
);
342 static void gm_sg_free(struct gm_sg
*sg
)
344 /* t_sg_expiry is handled before this is reached */
345 THREAD_OFF(sg
->t_sg_query
);
346 gm_packet_sg_subs_fini(sg
->subs_negative
);
347 gm_packet_sg_subs_fini(sg
->subs_positive
);
348 XFREE(MTYPE_GM_SG
, sg
);
351 /* clang-format off */
352 static const char *const gm_states
[] = {
353 [GM_SG_NOINFO
] = "NOINFO",
354 [GM_SG_JOIN
] = "JOIN",
355 [GM_SG_JOIN_EXPIRING
] = "JOIN_EXPIRING",
356 [GM_SG_PRUNE
] = "PRUNE",
357 [GM_SG_NOPRUNE
] = "NOPRUNE",
358 [GM_SG_NOPRUNE_EXPIRING
] = "NOPRUNE_EXPIRING",
360 /* clang-format on */
362 CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
363 /* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
364 * joined. Whether we actually want/need to support this is a separate
365 * question - it is almost never used. In fact this is exactly what RFC5790
366 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
369 static void gm_sg_update(struct gm_sg
*sg
, bool has_expired
)
371 struct gm_if
*gm_ifp
= sg
->iface
;
372 enum gm_sg_state prev
, desired
;
374 struct gm_sg
*grp
= NULL
;
376 if (!pim_addr_is_any(sg
->sgaddr
.src
))
377 grp
= gm_sg_find(gm_ifp
, sg
->sgaddr
.grp
, PIMADDR_ANY
);
379 assert(sg
->state
!= GM_SG_PRUNE
);
381 if (gm_packet_sg_subs_count(sg
->subs_positive
)) {
382 desired
= GM_SG_JOIN
;
383 assert(!sg
->t_sg_expire
);
384 } else if ((sg
->state
== GM_SG_JOIN
||
385 sg
->state
== GM_SG_JOIN_EXPIRING
) &&
387 desired
= GM_SG_JOIN_EXPIRING
;
388 else if (!grp
|| !gm_packet_sg_subs_count(grp
->subs_positive
))
389 desired
= GM_SG_NOINFO
;
390 else if (gm_packet_sg_subs_count(grp
->subs_positive
) ==
391 gm_packet_sg_subs_count(sg
->subs_negative
)) {
392 if ((sg
->state
== GM_SG_NOPRUNE
||
393 sg
->state
== GM_SG_NOPRUNE_EXPIRING
) &&
395 desired
= GM_SG_NOPRUNE_EXPIRING
;
397 desired
= GM_SG_PRUNE
;
398 } else if (gm_packet_sg_subs_count(sg
->subs_negative
))
399 desired
= GM_SG_NOPRUNE
;
401 desired
= GM_SG_NOINFO
;
403 if (desired
!= sg
->state
&& !gm_ifp
->stopping
) {
404 if (PIM_DEBUG_GM_EVENTS
)
405 zlog_debug(log_sg(sg
, "%s => %s"), gm_states
[sg
->state
],
408 if (desired
== GM_SG_JOIN_EXPIRING
||
409 desired
== GM_SG_NOPRUNE_EXPIRING
) {
410 struct gm_query_timers timers
;
412 timers
.qrv
= gm_ifp
->cur_qrv
;
413 timers
.max_resp_ms
= gm_ifp
->cur_max_resp
;
414 timers
.qqic_ms
= gm_ifp
->cur_query_intv_trig
;
415 timers
.fuzz
= gm_ifp
->cfg_timing_fuzz
;
417 gm_expiry_calc(&timers
);
418 gm_sg_timer_start(gm_ifp
, sg
, timers
.expire_wait
);
420 THREAD_OFF(sg
->t_sg_query
);
421 sg
->n_query
= gm_ifp
->cur_lmqc
;
422 sg
->query_sbit
= false;
423 gm_trigger_specific(sg
);
429 if (in6_multicast_nofwd(&sg
->sgaddr
.grp
) || gm_ifp
->stopping
)
432 new_join
= gm_sg_state_want_join(desired
);
434 if (new_join
&& !sg
->tib_joined
) {
435 /* this will retry if join previously failed */
436 sg
->tib_joined
= tib_sg_gm_join(gm_ifp
->pim
, sg
->sgaddr
,
437 gm_ifp
->ifp
, &sg
->oil
);
440 "MLD join for %pSG%%%s not propagated into TIB",
441 &sg
->sgaddr
, gm_ifp
->ifp
->name
);
443 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg
->sgaddr
,
446 } else if (sg
->tib_joined
&& !new_join
) {
447 tib_sg_gm_prune(gm_ifp
->pim
, sg
->sgaddr
, gm_ifp
->ifp
, &sg
->oil
);
450 sg
->tib_joined
= false;
453 if (desired
== GM_SG_NOINFO
) {
454 assertf((!sg
->t_sg_expire
&&
455 !gm_packet_sg_subs_count(sg
->subs_positive
) &&
456 !gm_packet_sg_subs_count(sg
->subs_negative
)),
457 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
458 &sg
->sgaddr
, gm_ifp
->ifp
->name
, has_expired
,
459 sg
->t_sg_expire
, gm_states
[prev
], gm_states
[desired
],
460 gm_packet_sg_subs_count(sg
->subs_positive
),
461 gm_packet_sg_subs_count(sg
->subs_negative
), grp
);
463 if (PIM_DEBUG_GM_TRACE
)
464 zlog_debug(log_sg(sg
, "dropping"));
466 gm_sgs_del(gm_ifp
->sgs
, sg
);
471 /****************************************************************************/
473 /* the following bunch of functions deals with transferring state from
474 * received packets into gm_packet_state. As a reminder, the querier is
475 * structured to keep all items received in one packet together, since they
476 * will share expiry timers and thus allows efficient handling.
479 static void gm_packet_free(struct gm_packet_state
*pkt
)
481 gm_packet_expires_del(pkt
->iface
->expires
, pkt
);
482 gm_packets_del(pkt
->subscriber
->packets
, pkt
);
483 gm_subscriber_drop(&pkt
->subscriber
);
484 XFREE(MTYPE_GM_STATE
, pkt
);
487 static struct gm_packet_sg
*gm_packet_sg_setup(struct gm_packet_state
*pkt
,
488 struct gm_sg
*sg
, bool is_excl
,
491 struct gm_packet_sg
*item
;
493 assert(pkt
->n_active
< pkt
->n_sg
);
495 item
= &pkt
->items
[pkt
->n_active
];
497 item
->is_excl
= is_excl
;
498 item
->is_src
= is_src
;
499 item
->offset
= pkt
->n_active
;
505 static bool gm_packet_sg_drop(struct gm_packet_sg
*item
)
507 struct gm_packet_state
*pkt
;
512 pkt
= gm_packet_sg2state(item
);
513 if (item
->sg
->most_recent
== item
)
514 item
->sg
->most_recent
= NULL
;
516 for (i
= 0; i
< item
->n_exclude
; i
++) {
517 struct gm_packet_sg
*excl_item
;
519 excl_item
= item
+ 1 + i
;
523 gm_packet_sg_subs_del(excl_item
->sg
->subs_negative
, excl_item
);
524 excl_item
->sg
= NULL
;
527 assert(pkt
->n_active
> 0);
530 if (item
->is_excl
&& item
->is_src
)
531 gm_packet_sg_subs_del(item
->sg
->subs_negative
, item
);
533 gm_packet_sg_subs_del(item
->sg
->subs_positive
, item
);
537 if (!pkt
->n_active
) {
544 static void gm_packet_drop(struct gm_packet_state
*pkt
, bool trace
)
546 for (size_t i
= 0; i
< pkt
->n_sg
; i
++) {
547 struct gm_sg
*sg
= pkt
->items
[i
].sg
;
553 if (trace
&& PIM_DEBUG_GM_TRACE
)
554 zlog_debug(log_sg(sg
, "general-dropping from %pPA"),
555 &pkt
->subscriber
->addr
);
556 deleted
= gm_packet_sg_drop(&pkt
->items
[i
]);
558 gm_sg_update(sg
, true);
564 static void gm_packet_sg_remove_sources(struct gm_if
*gm_ifp
,
565 struct gm_subscriber
*subscriber
,
566 pim_addr grp
, pim_addr
*srcs
,
567 size_t n_src
, enum gm_sub_sense sense
)
570 struct gm_packet_sg
*old_src
;
573 for (i
= 0; i
< n_src
; i
++) {
574 sg
= gm_sg_find(gm_ifp
, grp
, srcs
[i
]);
578 old_src
= gm_packet_sg_find(sg
, sense
, subscriber
);
582 gm_packet_sg_drop(old_src
);
583 gm_sg_update(sg
, false);
587 static void gm_sg_expiry_cancel(struct gm_sg
*sg
)
589 if (sg
->t_sg_expire
&& PIM_DEBUG_GM_TRACE
)
590 zlog_debug(log_sg(sg
, "alive, cancelling expiry timer"));
591 THREAD_OFF(sg
->t_sg_expire
);
592 sg
->query_sbit
= true;
595 /* first pass: process all changes resulting in removal of state:
596 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
597 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
598 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
599 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
600 * note *replacing* state is NOT considered *removing* state here
602 * everything else is thrown into pkt for creation of state in pass 2
604 static void gm_handle_v2_pass1(struct gm_packet_state
*pkt
,
605 struct mld_v2_rec_hdr
*rechdr
)
607 /* NB: pkt->subscriber can be NULL here if the subscriber was not
610 struct gm_subscriber
*subscriber
= pkt
->subscriber
;
612 struct gm_packet_sg
*old_grp
= NULL
;
613 struct gm_packet_sg
*item
;
614 size_t n_src
= ntohs(rechdr
->n_src
);
616 bool is_excl
= false;
618 grp
= gm_sg_find(pkt
->iface
, rechdr
->grp
, PIMADDR_ANY
);
619 if (grp
&& subscriber
)
620 old_grp
= gm_packet_sg_find(grp
, GM_SUB_POS
, subscriber
);
622 assert(old_grp
== NULL
|| old_grp
->is_excl
);
624 switch (rechdr
->type
) {
625 case MLD_RECTYPE_IS_EXCLUDE
:
626 case MLD_RECTYPE_CHANGE_TO_EXCLUDE
:
627 /* this always replaces or creates state */
630 grp
= gm_sg_make(pkt
->iface
, rechdr
->grp
, PIMADDR_ANY
);
632 item
= gm_packet_sg_setup(pkt
, grp
, is_excl
, false);
633 item
->n_exclude
= n_src
;
635 /* [EXCL_INCL_SG_NOTE] referenced below
637 * in theory, we should drop any S,G that the host may have
638 * previously added in INCLUDE mode. In practice, this is both
639 * incredibly rare and entirely irrelevant. It only makes any
640 * difference if an S,G that the host previously had on the
641 * INCLUDE list is now on the blocked list for EXCLUDE, which
642 * we can cover in processing the S,G list in pass2_excl().
644 * Other S,G from the host are simply left to expire
645 * "naturally" through general expiry.
649 case MLD_RECTYPE_IS_INCLUDE
:
650 case MLD_RECTYPE_CHANGE_TO_INCLUDE
:
652 /* INCLUDE has no *,G state, so old_grp here refers to
653 * previous EXCLUDE => delete it
655 gm_packet_sg_drop(old_grp
);
656 gm_sg_update(grp
, false);
657 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
661 case MLD_RECTYPE_ALLOW_NEW_SOURCES
:
663 /* remove S,Gs from EXCLUDE, and then we're done */
664 gm_packet_sg_remove_sources(pkt
->iface
, subscriber
,
665 rechdr
->grp
, rechdr
->srcs
,
669 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
670 * idential to IS_INCLUDE (because the list of sources in
671 * IS_INCLUDE is not exhaustive)
675 case MLD_RECTYPE_BLOCK_OLD_SOURCES
:
677 /* this is intentionally not implemented because it
678 * would be complicated as hell. we only take the list
679 * of blocked sources from full group state records
685 gm_packet_sg_remove_sources(pkt
->iface
, subscriber
,
686 rechdr
->grp
, rechdr
->srcs
,
691 for (j
= 0; j
< n_src
; j
++) {
694 sg
= gm_sg_find(pkt
->iface
, rechdr
->grp
, rechdr
->srcs
[j
]);
696 sg
= gm_sg_make(pkt
->iface
, rechdr
->grp
,
699 gm_packet_sg_setup(pkt
, sg
, is_excl
, true);
703 /* second pass: creating/updating/refreshing state. All the items from the
704 * received packet have already been thrown into gm_packet_state.
707 static void gm_handle_v2_pass2_incl(struct gm_packet_state
*pkt
, size_t i
)
709 struct gm_packet_sg
*item
= &pkt
->items
[i
];
710 struct gm_packet_sg
*old
= NULL
;
711 struct gm_sg
*sg
= item
->sg
;
713 /* EXCLUDE state was already dropped in pass1 */
714 assert(!gm_packet_sg_find(sg
, GM_SUB_NEG
, pkt
->subscriber
));
716 old
= gm_packet_sg_find(sg
, GM_SUB_POS
, pkt
->subscriber
);
718 gm_packet_sg_drop(old
);
721 gm_packet_sg_subs_add(sg
->subs_positive
, item
);
723 sg
->most_recent
= item
;
724 gm_sg_expiry_cancel(sg
);
725 gm_sg_update(sg
, false);
728 static void gm_handle_v2_pass2_excl(struct gm_packet_state
*pkt
, size_t offs
)
730 struct gm_packet_sg
*item
= &pkt
->items
[offs
];
731 struct gm_packet_sg
*old_grp
, *item_dup
;
732 struct gm_sg
*sg_grp
= item
->sg
;
735 old_grp
= gm_packet_sg_find(sg_grp
, GM_SUB_POS
, pkt
->subscriber
);
737 for (i
= 0; i
< item
->n_exclude
; i
++) {
738 struct gm_packet_sg
*item_src
, *old_src
;
740 item_src
= &pkt
->items
[offs
+ 1 + i
];
741 old_src
= gm_packet_sg_find(item_src
->sg
, GM_SUB_NEG
,
744 gm_packet_sg_drop(old_src
);
746 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
747 * items left over if the host previously had INCLUDE
748 * mode going. Remove them here if we find any.
750 old_src
= gm_packet_sg_find(item_src
->sg
, GM_SUB_POS
,
753 gm_packet_sg_drop(old_src
);
756 /* the previous loop has removed the S,G entries which are
757 * still excluded after this update. So anything left on the
758 * old item was previously excluded but is now included
759 * => need to trigger update on S,G
761 for (i
= 0; i
< old_grp
->n_exclude
; i
++) {
762 struct gm_packet_sg
*old_src
;
763 struct gm_sg
*old_sg_src
;
765 old_src
= old_grp
+ 1 + i
;
766 old_sg_src
= old_src
->sg
;
770 gm_packet_sg_drop(old_src
);
771 gm_sg_update(old_sg_src
, false);
774 gm_packet_sg_drop(old_grp
);
777 item_dup
= gm_packet_sg_subs_add(sg_grp
->subs_positive
, item
);
781 sg_grp
->most_recent
= item
;
782 gm_sg_expiry_cancel(sg_grp
);
784 for (i
= 0; i
< item
->n_exclude
; i
++) {
785 struct gm_packet_sg
*item_src
;
787 item_src
= &pkt
->items
[offs
+ 1 + i
];
788 item_dup
= gm_packet_sg_subs_add(item_src
->sg
->subs_negative
,
795 gm_sg_update(item_src
->sg
, false);
799 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
800 * to get lower PIM churn/flapping
802 gm_sg_update(sg_grp
, false);
805 CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
806 /* on receiving a query, we need to update our robustness/query interval to
807 * match, so we correctly process group/source specific queries after last
811 static void gm_handle_v2_report(struct gm_if
*gm_ifp
,
812 const struct sockaddr_in6
*pkt_src
, char *data
,
815 struct mld_v2_report_hdr
*hdr
;
816 size_t i
, n_records
, max_entries
;
817 struct gm_packet_state
*pkt
;
819 if (len
< sizeof(*hdr
)) {
820 if (PIM_DEBUG_GM_PACKETS
)
821 zlog_debug(log_pkt_src(
822 "malformed MLDv2 report (truncated header)"));
823 gm_ifp
->stats
.rx_drop_malformed
++;
827 /* errors after this may at least partially process the packet */
828 gm_ifp
->stats
.rx_new_report
++;
830 hdr
= (struct mld_v2_report_hdr
*)data
;
831 data
+= sizeof(*hdr
);
834 /* can't have more *,G and S,G items than there is space for ipv6
835 * addresses, so just use this to allocate temporary buffer
837 max_entries
= len
/ sizeof(pim_addr
);
838 pkt
= XCALLOC(MTYPE_GM_STATE
,
839 offsetof(struct gm_packet_state
, items
[max_entries
]));
840 pkt
->n_sg
= max_entries
;
842 pkt
->subscriber
= gm_subscriber_findref(gm_ifp
, pkt_src
->sin6_addr
);
844 n_records
= ntohs(hdr
->n_records
);
846 /* validate & remove state in v2_pass1() */
847 for (i
= 0; i
< n_records
; i
++) {
848 struct mld_v2_rec_hdr
*rechdr
;
849 size_t n_src
, record_size
;
851 if (len
< sizeof(*rechdr
)) {
852 zlog_warn(log_pkt_src(
853 "malformed MLDv2 report (truncated record header)"));
854 gm_ifp
->stats
.rx_trunc_report
++;
858 rechdr
= (struct mld_v2_rec_hdr
*)data
;
859 data
+= sizeof(*rechdr
);
860 len
-= sizeof(*rechdr
);
862 n_src
= ntohs(rechdr
->n_src
);
863 record_size
= n_src
* sizeof(pim_addr
) + rechdr
->aux_len
* 4;
865 if (len
< record_size
) {
866 zlog_warn(log_pkt_src(
867 "malformed MLDv2 report (truncated source list)"));
868 gm_ifp
->stats
.rx_trunc_report
++;
871 if (!IN6_IS_ADDR_MULTICAST(&rechdr
->grp
)) {
874 "malformed MLDv2 report (invalid group %pI6)"),
876 gm_ifp
->stats
.rx_trunc_report
++;
883 gm_handle_v2_pass1(pkt
, rechdr
);
886 if (!pkt
->n_active
) {
887 gm_subscriber_drop(&pkt
->subscriber
);
888 XFREE(MTYPE_GM_STATE
, pkt
);
892 pkt
= XREALLOC(MTYPE_GM_STATE
, pkt
,
893 offsetof(struct gm_packet_state
, items
[pkt
->n_active
]));
894 pkt
->n_sg
= pkt
->n_active
;
897 monotime(&pkt
->received
);
898 if (!pkt
->subscriber
)
899 pkt
->subscriber
= gm_subscriber_get(gm_ifp
, pkt_src
->sin6_addr
);
900 gm_packets_add_tail(pkt
->subscriber
->packets
, pkt
);
901 gm_packet_expires_add_tail(gm_ifp
->expires
, pkt
);
903 for (i
= 0; i
< pkt
->n_sg
; i
++)
904 if (!pkt
->items
[i
].is_excl
)
905 gm_handle_v2_pass2_incl(pkt
, i
);
907 gm_handle_v2_pass2_excl(pkt
, i
);
908 i
+= pkt
->items
[i
].n_exclude
;
911 if (pkt
->n_active
== 0)
915 static void gm_handle_v1_report(struct gm_if
*gm_ifp
,
916 const struct sockaddr_in6
*pkt_src
, char *data
,
919 struct mld_v1_pkt
*hdr
;
920 struct gm_packet_state
*pkt
;
922 struct gm_packet_sg
*item
;
925 if (len
< sizeof(*hdr
)) {
926 if (PIM_DEBUG_GM_PACKETS
)
927 zlog_debug(log_pkt_src(
928 "malformed MLDv1 report (truncated)"));
929 gm_ifp
->stats
.rx_drop_malformed
++;
933 gm_ifp
->stats
.rx_old_report
++;
935 hdr
= (struct mld_v1_pkt
*)data
;
938 pkt
= XCALLOC(MTYPE_GM_STATE
,
939 offsetof(struct gm_packet_state
, items
[max_entries
]));
940 pkt
->n_sg
= max_entries
;
942 pkt
->subscriber
= gm_subscriber_findref(gm_ifp
, gm_dummy_untracked
);
944 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
946 grp
= gm_sg_find(pkt
->iface
, hdr
->grp
, PIMADDR_ANY
);
948 grp
= gm_sg_make(pkt
->iface
, hdr
->grp
, PIMADDR_ANY
);
950 item
= gm_packet_sg_setup(pkt
, grp
, true, false);
952 CPP_NOTICE("set v1-seen timer on grp here");
956 /* pass2 will count n_active back up to 1. Also since a v1 report
957 * has exactly 1 group, we can skip the realloc() that v2 needs here.
959 assert(pkt
->n_active
== 1);
960 pkt
->n_sg
= pkt
->n_active
;
963 monotime(&pkt
->received
);
964 if (!pkt
->subscriber
)
965 pkt
->subscriber
= gm_subscriber_get(gm_ifp
, gm_dummy_untracked
);
966 gm_packets_add_tail(pkt
->subscriber
->packets
, pkt
);
967 gm_packet_expires_add_tail(gm_ifp
->expires
, pkt
);
969 /* pass2 covers installing state & removing old state; all the v1
970 * compat is handled at this point.
972 * Note that "old state" may be v2; subscribers will switch from v2
973 * reports to v1 reports when the querier changes from v2 to v1. So,
974 * limiting this to v1 would be wrong.
976 gm_handle_v2_pass2_excl(pkt
, 0);
978 if (pkt
->n_active
== 0)
982 static void gm_handle_v1_leave(struct gm_if
*gm_ifp
,
983 const struct sockaddr_in6
*pkt_src
, char *data
,
986 struct mld_v1_pkt
*hdr
;
987 struct gm_subscriber
*subscriber
;
989 struct gm_packet_sg
*old_grp
;
991 if (len
< sizeof(*hdr
)) {
992 if (PIM_DEBUG_GM_PACKETS
)
993 zlog_debug(log_pkt_src(
994 "malformed MLDv1 leave (truncated)"));
995 gm_ifp
->stats
.rx_drop_malformed
++;
999 gm_ifp
->stats
.rx_old_leave
++;
1001 hdr
= (struct mld_v1_pkt
*)data
;
1003 subscriber
= gm_subscriber_findref(gm_ifp
, gm_dummy_untracked
);
1007 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1009 grp
= gm_sg_find(gm_ifp
, hdr
->grp
, PIMADDR_ANY
);
1011 old_grp
= gm_packet_sg_find(grp
, GM_SUB_POS
, subscriber
);
1013 gm_packet_sg_drop(old_grp
);
1014 gm_sg_update(grp
, false);
1015 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1021 /* nothing more to do here, pass2 is no-op for leaves */
1022 gm_subscriber_drop(&subscriber
);
1025 /* for each general query received (or sent), a timer is started to expire
1026 * _everything_ at the appropriate time (including robustness multiplier).
1028 * So when this timer hits, all packets - with all of their items - that were
1029 * received *before* the query are aged out, and state updated accordingly.
1030 * Note that when we receive a refresh/update, the previous/old packet is
1031 * already dropped and replaced with a new one, so in normal steady-state
1032 * operation, this timer won't be doing anything.
1034 * Additionally, if a subscriber actively leaves a group, that goes through
1035 * its own path too and won't hit this. This is really only triggered when a
1036 * host straight up disappears.
1038 static void gm_t_expire(struct thread
*t
)
1040 struct gm_if
*gm_ifp
= THREAD_ARG(t
);
1041 struct gm_packet_state
*pkt
;
1043 zlog_info(log_ifp("general expiry timer"));
1045 while (gm_ifp
->n_pending
) {
1046 struct gm_general_pending
*pend
= gm_ifp
->pending
;
1047 struct timeval remain
;
1050 remain_ms
= monotime_until(&pend
->expiry
, &remain
);
1051 if (remain_ms
> 0) {
1052 if (PIM_DEBUG_GM_EVENTS
)
1054 log_ifp("next general expiry in %" PRId64
"ms"),
1057 thread_add_timer_tv(router
->master
, gm_t_expire
, gm_ifp
,
1058 &remain
, &gm_ifp
->t_expire
);
1062 while ((pkt
= gm_packet_expires_first(gm_ifp
->expires
))) {
1063 if (timercmp(&pkt
->received
, &pend
->query
, >=))
1066 if (PIM_DEBUG_GM_PACKETS
)
1067 zlog_debug(log_ifp("expire packet %p"), pkt
);
1068 gm_packet_drop(pkt
, true);
1071 gm_ifp
->n_pending
--;
1072 memmove(gm_ifp
->pending
, gm_ifp
->pending
+ 1,
1073 gm_ifp
->n_pending
* sizeof(gm_ifp
->pending
[0]));
1076 if (PIM_DEBUG_GM_EVENTS
)
1077 zlog_debug(log_ifp("next general expiry waiting for query"));
1080 /* NB: the receive handlers will also run when sending packets, since we
1081 * receive our own packets back in.
1083 static void gm_handle_q_general(struct gm_if
*gm_ifp
,
1084 struct gm_query_timers
*timers
)
1086 struct timeval now
, expiry
;
1087 struct gm_general_pending
*pend
;
1090 timeradd(&now
, &timers
->expire_wait
, &expiry
);
1092 while (gm_ifp
->n_pending
) {
1093 pend
= &gm_ifp
->pending
[gm_ifp
->n_pending
- 1];
1095 if (timercmp(&pend
->expiry
, &expiry
, <))
1098 /* if we end up here, the last item in pending[] has an expiry
1099 * later than the expiry for this query. But our query time
1100 * (now) is later than that of the item (because, well, that's
1101 * how time works.) This makes this query meaningless since
1102 * it's "supersetted" within the preexisting query
1105 if (PIM_DEBUG_GM_TRACE_DETAIL
)
1107 log_ifp("zapping supersetted general timer %pTVMu"),
1110 gm_ifp
->n_pending
--;
1111 if (!gm_ifp
->n_pending
)
1112 THREAD_OFF(gm_ifp
->t_expire
);
1115 /* people might be messing with their configs or something */
1116 if (gm_ifp
->n_pending
== array_size(gm_ifp
->pending
))
1119 pend
= &gm_ifp
->pending
[gm_ifp
->n_pending
];
1121 pend
->expiry
= expiry
;
1123 if (!gm_ifp
->n_pending
++) {
1124 if (PIM_DEBUG_GM_TRACE
)
1126 log_ifp("starting general timer @ 0: %pTVMu"),
1128 thread_add_timer_tv(router
->master
, gm_t_expire
, gm_ifp
,
1129 &timers
->expire_wait
, &gm_ifp
->t_expire
);
1130 } else if (PIM_DEBUG_GM_TRACE
)
1131 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1132 gm_ifp
->n_pending
, &pend
->expiry
);
1135 static void gm_t_sg_expire(struct thread
*t
)
1137 struct gm_sg
*sg
= THREAD_ARG(t
);
1138 struct gm_if
*gm_ifp
= sg
->iface
;
1139 struct gm_packet_sg
*item
;
1141 assertf(sg
->state
== GM_SG_JOIN_EXPIRING
||
1142 sg
->state
== GM_SG_NOPRUNE_EXPIRING
,
1143 "%pSG%%%s %pTHD", &sg
->sgaddr
, gm_ifp
->ifp
->name
, t
);
1145 frr_each_safe (gm_packet_sg_subs
, sg
->subs_positive
, item
)
1146 /* this will also drop EXCLUDE mode S,G lists together with
1149 gm_packet_sg_drop(item
);
1151 /* subs_negative items are only timed out together with the *,G entry
1152 * since we won't get any reports for a group-and-source query
1154 gm_sg_update(sg
, true);
1157 static bool gm_sg_check_recent(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
1160 struct gm_packet_state
*pkt
;
1162 if (!sg
->most_recent
) {
1163 struct gm_packet_state
*best_pkt
= NULL
;
1164 struct gm_packet_sg
*item
;
1166 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
1167 pkt
= gm_packet_sg2state(item
);
1170 timercmp(&pkt
->received
, &best_pkt
->received
, >)) {
1172 sg
->most_recent
= item
;
1176 if (sg
->most_recent
) {
1177 struct timeval fuzz
;
1179 pkt
= gm_packet_sg2state(sg
->most_recent
);
1181 /* this shouldn't happen on plain old real ethernet segment,
1182 * but on something like a VXLAN or VPLS it is very possible
1183 * that we get a report before the query that triggered it.
1184 * (imagine a triangle scenario with 3 datacenters, it's very
1185 * possible A->B + B->C is faster than A->C due to odd routing)
1187 * This makes a little tolerance allowance to handle that case.
1189 timeradd(&pkt
->received
, &gm_ifp
->cfg_timing_fuzz
, &fuzz
);
1191 if (timercmp(&fuzz
, &ref
, >))
1197 static void gm_sg_timer_start(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
1198 struct timeval expire_wait
)
1204 if (sg
->state
== GM_SG_PRUNE
)
1208 if (gm_sg_check_recent(gm_ifp
, sg
, now
))
1211 if (PIM_DEBUG_GM_TRACE
)
1212 zlog_debug(log_sg(sg
, "expiring in %pTVI"), &expire_wait
);
1214 if (sg
->t_sg_expire
) {
1215 struct timeval remain
;
1217 remain
= thread_timer_remain(sg
->t_sg_expire
);
1218 if (timercmp(&remain
, &expire_wait
, <=))
1221 THREAD_OFF(sg
->t_sg_expire
);
1224 thread_add_timer_tv(router
->master
, gm_t_sg_expire
, sg
, &expire_wait
,
1228 static void gm_handle_q_groupsrc(struct gm_if
*gm_ifp
,
1229 struct gm_query_timers
*timers
, pim_addr grp
,
1230 const pim_addr
*srcs
, size_t n_src
)
1235 for (i
= 0; i
< n_src
; i
++) {
1236 sg
= gm_sg_find(gm_ifp
, grp
, srcs
[i
]);
1237 gm_sg_timer_start(gm_ifp
, sg
, timers
->expire_wait
);
1241 static void gm_t_grp_expire(struct thread
*t
)
1243 /* if we're here, that means when we received the group-specific query
1244 * there was one or more active S,G for this group. For *,G the timer
1245 * in sg->t_sg_expire is running separately and gets cancelled when we
1246 * receive a report, so that work is left to gm_t_sg_expire and we
1247 * shouldn't worry about it here.
1249 struct gm_grp_pending
*pend
= THREAD_ARG(t
);
1250 struct gm_if
*gm_ifp
= pend
->iface
;
1251 struct gm_sg
*sg
, *sg_start
, sg_ref
= {};
1253 if (PIM_DEBUG_GM_EVENTS
)
1254 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend
->grp
);
1256 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1257 * could technically be gt to skip a possible *,G
1259 sg_ref
.sgaddr
.grp
= pend
->grp
;
1260 sg_ref
.sgaddr
.src
= PIMADDR_ANY
;
1261 sg_start
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
1263 frr_each_from (gm_sgs
, gm_ifp
->sgs
, sg
, sg_start
) {
1264 struct gm_packet_sg
*item
;
1266 if (pim_addr_cmp(sg
->sgaddr
.grp
, pend
->grp
))
1268 if (pim_addr_is_any(sg
->sgaddr
.src
))
1269 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1271 if (gm_sg_check_recent(gm_ifp
, sg
, pend
->query
))
1274 /* we may also have a group-source-specific query going on in
1275 * parallel. But if we received nothing for the *,G query,
1276 * the S,G query is kinda irrelevant.
1278 THREAD_OFF(sg
->t_sg_expire
);
1280 frr_each_safe (gm_packet_sg_subs
, sg
->subs_positive
, item
)
1281 /* this will also drop the EXCLUDE S,G lists */
1282 gm_packet_sg_drop(item
);
1284 gm_sg_update(sg
, true);
1287 gm_grp_pends_del(gm_ifp
->grp_pends
, pend
);
1288 XFREE(MTYPE_GM_GRP_PENDING
, pend
);
1291 static void gm_handle_q_group(struct gm_if
*gm_ifp
,
1292 struct gm_query_timers
*timers
, pim_addr grp
)
1294 struct gm_sg
*sg
, sg_ref
= {};
1295 struct gm_grp_pending
*pend
, pend_ref
= {};
1297 sg_ref
.sgaddr
.grp
= grp
;
1298 sg_ref
.sgaddr
.src
= PIMADDR_ANY
;
1299 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1300 sg
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
1302 if (!sg
|| pim_addr_cmp(sg
->sgaddr
.grp
, grp
))
1303 /* we have nothing at all for this group - don't waste RAM */
1306 if (pim_addr_is_any(sg
->sgaddr
.src
)) {
1307 /* actually found *,G entry here */
1308 if (PIM_DEBUG_GM_TRACE
)
1309 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1311 gm_sg_timer_start(gm_ifp
, sg
, timers
->expire_wait
);
1313 sg
= gm_sgs_next(gm_ifp
->sgs
, sg
);
1314 if (!sg
|| pim_addr_cmp(sg
->sgaddr
.grp
, grp
))
1315 /* no S,G for this group */
1320 pend
= gm_grp_pends_find(gm_ifp
->grp_pends
, &pend_ref
);
1323 struct timeval remain
;
1325 remain
= thread_timer_remain(pend
->t_expire
);
1326 if (timercmp(&remain
, &timers
->expire_wait
, <=))
1329 THREAD_OFF(pend
->t_expire
);
1331 pend
= XCALLOC(MTYPE_GM_GRP_PENDING
, sizeof(*pend
));
1333 pend
->iface
= gm_ifp
;
1334 gm_grp_pends_add(gm_ifp
->grp_pends
, pend
);
1337 monotime(&pend
->query
);
1338 thread_add_timer_tv(router
->master
, gm_t_grp_expire
, pend
,
1339 &timers
->expire_wait
, &pend
->t_expire
);
1341 if (PIM_DEBUG_GM_TRACE
)
1342 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp
,
1346 static void gm_bump_querier(struct gm_if
*gm_ifp
)
1348 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1350 THREAD_OFF(gm_ifp
->t_query
);
1352 if (pim_addr_is_any(pim_ifp
->ll_lowest
))
1354 if (!IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
))
1357 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
1359 thread_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
1362 static void gm_t_other_querier(struct thread
*t
)
1364 struct gm_if
*gm_ifp
= THREAD_ARG(t
);
1365 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1367 zlog_info(log_ifp("other querier timer expired"));
1369 gm_ifp
->querier
= pim_ifp
->ll_lowest
;
1370 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
1372 thread_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
1375 static void gm_handle_query(struct gm_if
*gm_ifp
,
1376 const struct sockaddr_in6
*pkt_src
,
1377 pim_addr
*pkt_dst
, char *data
, size_t len
)
1379 struct mld_v2_query_hdr
*hdr
;
1380 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1381 struct gm_query_timers timers
;
1384 if (len
< sizeof(struct mld_v2_query_hdr
) &&
1385 len
!= sizeof(struct mld_v1_pkt
)) {
1386 zlog_warn(log_pkt_src("invalid query size"));
1387 gm_ifp
->stats
.rx_drop_malformed
++;
1391 hdr
= (struct mld_v2_query_hdr
*)data
;
1392 general_query
= pim_addr_is_any(hdr
->grp
);
1394 if (!general_query
&& !IN6_IS_ADDR_MULTICAST(&hdr
->grp
)) {
1395 zlog_warn(log_pkt_src(
1396 "malformed MLDv2 query (invalid group %pI6)"),
1398 gm_ifp
->stats
.rx_drop_malformed
++;
1402 if (len
>= sizeof(struct mld_v2_query_hdr
)) {
1403 size_t src_space
= ntohs(hdr
->n_src
) * sizeof(pim_addr
);
1405 if (len
< sizeof(struct mld_v2_query_hdr
) + src_space
) {
1406 zlog_warn(log_pkt_src(
1407 "malformed MLDv2 query (truncated source list)"));
1408 gm_ifp
->stats
.rx_drop_malformed
++;
1412 if (general_query
&& src_space
) {
1413 zlog_warn(log_pkt_src(
1414 "malformed MLDv2 query (general query with non-empty source list)"));
1415 gm_ifp
->stats
.rx_drop_malformed
++;
1420 /* accepting queries unicast to us (or addressed to a wrong group)
1421 * can mess up querier election as well as cause us to terminate
1422 * traffic (since after a unicast query no reports will be coming in)
1424 if (!IPV6_ADDR_SAME(pkt_dst
, &gm_all_hosts
)) {
1425 if (pim_addr_is_any(hdr
->grp
)) {
1428 "wrong destination %pPA for general query"),
1430 gm_ifp
->stats
.rx_drop_dstaddr
++;
1434 if (!IPV6_ADDR_SAME(&hdr
->grp
, pkt_dst
)) {
1435 gm_ifp
->stats
.rx_drop_dstaddr
++;
1438 "wrong destination %pPA for group specific query"),
1444 if (IPV6_ADDR_CMP(&pkt_src
->sin6_addr
, &gm_ifp
->querier
) < 0) {
1445 if (PIM_DEBUG_GM_EVENTS
)
1447 log_pkt_src("replacing elected querier %pPA"),
1450 gm_ifp
->querier
= pkt_src
->sin6_addr
;
1453 if (len
== sizeof(struct mld_v1_pkt
)) {
1454 timers
.qrv
= gm_ifp
->cur_qrv
;
1455 timers
.max_resp_ms
= hdr
->max_resp_code
;
1456 timers
.qqic_ms
= gm_ifp
->cur_query_intv
;
1458 timers
.qrv
= (hdr
->flags
& 0x7) ?: 8;
1459 timers
.max_resp_ms
= mld_max_resp_decode(hdr
->max_resp_code
);
1460 timers
.qqic_ms
= igmp_msg_decode8to16(hdr
->qqic
) * 1000;
1462 timers
.fuzz
= gm_ifp
->cfg_timing_fuzz
;
1464 gm_expiry_calc(&timers
);
1466 if (PIM_DEBUG_GM_TRACE_DETAIL
)
1468 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1469 timers
.qrv
, timers
.max_resp_ms
, timers
.qqic_ms
,
1470 &timers
.expire_wait
);
1472 if (IPV6_ADDR_CMP(&pkt_src
->sin6_addr
, &pim_ifp
->ll_lowest
) < 0) {
1473 unsigned int other_ms
;
1475 THREAD_OFF(gm_ifp
->t_query
);
1476 THREAD_OFF(gm_ifp
->t_other_querier
);
1478 other_ms
= timers
.qrv
* timers
.qqic_ms
+ timers
.max_resp_ms
/ 2;
1479 thread_add_timer_msec(router
->master
, gm_t_other_querier
,
1481 &gm_ifp
->t_other_querier
);
1484 if (len
== sizeof(struct mld_v1_pkt
)) {
1485 if (general_query
) {
1486 gm_handle_q_general(gm_ifp
, &timers
);
1487 gm_ifp
->stats
.rx_query_old_general
++;
1489 gm_handle_q_group(gm_ifp
, &timers
, hdr
->grp
);
1490 gm_ifp
->stats
.rx_query_old_group
++;
1495 /* v2 query - [S]uppress bit */
1496 if (hdr
->flags
& 0x8) {
1497 gm_ifp
->stats
.rx_query_new_sbit
++;
1501 if (general_query
) {
1502 gm_handle_q_general(gm_ifp
, &timers
);
1503 gm_ifp
->stats
.rx_query_new_general
++;
1504 } else if (!ntohs(hdr
->n_src
)) {
1505 gm_handle_q_group(gm_ifp
, &timers
, hdr
->grp
);
1506 gm_ifp
->stats
.rx_query_new_group
++;
1508 gm_handle_q_groupsrc(gm_ifp
, &timers
, hdr
->grp
, hdr
->srcs
,
1510 gm_ifp
->stats
.rx_query_new_groupsrc
++;
1514 static void gm_rx_process(struct gm_if
*gm_ifp
,
1515 const struct sockaddr_in6
*pkt_src
, pim_addr
*pkt_dst
,
1516 void *data
, size_t pktlen
)
1518 struct icmp6_plain_hdr
*icmp6
= data
;
1519 uint16_t pkt_csum
, ref_csum
;
1520 struct ipv6_ph ph6
= {
1521 .src
= pkt_src
->sin6_addr
,
1523 .ulpl
= htons(pktlen
),
1524 .next_hdr
= IPPROTO_ICMPV6
,
1527 pkt_csum
= icmp6
->icmp6_cksum
;
1528 icmp6
->icmp6_cksum
= 0;
1529 ref_csum
= in_cksum_with_ph6(&ph6
, data
, pktlen
);
1531 if (pkt_csum
!= ref_csum
) {
1534 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1535 pkt_dst
, pkt_csum
, ref_csum
);
1536 gm_ifp
->stats
.rx_drop_csum
++;
1541 pktlen
-= sizeof(*icmp6
);
1543 switch (icmp6
->icmp6_type
) {
1544 case ICMP6_MLD_QUERY
:
1545 gm_handle_query(gm_ifp
, pkt_src
, pkt_dst
, data
, pktlen
);
1547 case ICMP6_MLD_V1_REPORT
:
1548 gm_handle_v1_report(gm_ifp
, pkt_src
, data
, pktlen
);
1550 case ICMP6_MLD_V1_DONE
:
1551 gm_handle_v1_leave(gm_ifp
, pkt_src
, data
, pktlen
);
1553 case ICMP6_MLD_V2_REPORT
:
1554 gm_handle_v2_report(gm_ifp
, pkt_src
, data
, pktlen
);
1559 static bool ip6_check_hopopts_ra(uint8_t *hopopts
, size_t hopopt_len
,
1560 uint16_t alert_type
)
1562 uint8_t *hopopt_end
;
1566 if (hopopt_len
< (hopopts
[1] + 1U) * 8U)
1569 hopopt_end
= hopopts
+ (hopopts
[1] + 1) * 8;
1572 while (hopopts
< hopopt_end
) {
1573 if (hopopts
[0] == IP6OPT_PAD1
) {
1578 if (hopopts
> hopopt_end
- 2)
1580 if (hopopts
> hopopt_end
- 2 - hopopts
[1])
1583 if (hopopts
[0] == IP6OPT_ROUTER_ALERT
&& hopopts
[1] == 2) {
1584 uint16_t have_type
= (hopopts
[2] << 8) | hopopts
[3];
1586 if (have_type
== alert_type
)
1590 hopopts
+= 2 + hopopts
[1];
1595 static void gm_t_recv(struct thread
*t
)
1597 struct pim_instance
*pim
= THREAD_ARG(t
);
1599 char buf
[CMSG_SPACE(sizeof(struct in6_pktinfo
)) +
1600 CMSG_SPACE(256) /* hop options */ +
1601 CMSG_SPACE(sizeof(int)) /* hopcount */];
1602 struct cmsghdr align
;
1604 struct cmsghdr
*cmsg
;
1605 struct in6_pktinfo
*pktinfo
= NULL
;
1606 uint8_t *hopopts
= NULL
;
1607 size_t hopopt_len
= 0;
1608 int *hoplimit
= NULL
;
1610 struct msghdr mh
[1] = {};
1611 struct iovec iov
[1];
1612 struct sockaddr_in6 pkt_src
[1] = {};
1616 thread_add_read(router
->master
, gm_t_recv
, pim
, pim
->gm_socket
,
1619 iov
->iov_base
= rxbuf
;
1620 iov
->iov_len
= sizeof(rxbuf
);
1622 mh
->msg_name
= pkt_src
;
1623 mh
->msg_namelen
= sizeof(pkt_src
);
1624 mh
->msg_control
= cmsgbuf
.buf
;
1625 mh
->msg_controllen
= sizeof(cmsgbuf
.buf
);
1627 mh
->msg_iovlen
= array_size(iov
);
1630 nread
= recvmsg(pim
->gm_socket
, mh
, MSG_PEEK
| MSG_TRUNC
);
1632 zlog_err("(VRF %s) RX error: %m", pim
->vrf
->name
);
1633 pim
->gm_rx_drop_sys
++;
1637 if ((size_t)nread
> sizeof(rxbuf
)) {
1638 iov
->iov_base
= XMALLOC(MTYPE_GM_PACKET
, nread
);
1639 iov
->iov_len
= nread
;
1641 nread
= recvmsg(pim
->gm_socket
, mh
, 0);
1643 zlog_err("(VRF %s) RX error: %m", pim
->vrf
->name
);
1644 pim
->gm_rx_drop_sys
++;
1648 struct interface
*ifp
;
1650 ifp
= if_lookup_by_index(pkt_src
->sin6_scope_id
, pim
->vrf
->vrf_id
);
1651 if (!ifp
|| !ifp
->info
)
1654 struct pim_interface
*pim_ifp
= ifp
->info
;
1655 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
1660 for (cmsg
= CMSG_FIRSTHDR(mh
); cmsg
; cmsg
= CMSG_NXTHDR(mh
, cmsg
)) {
1661 if (cmsg
->cmsg_level
!= SOL_IPV6
)
1664 switch (cmsg
->cmsg_type
) {
1666 pktinfo
= (struct in6_pktinfo
*)CMSG_DATA(cmsg
);
1669 hopopts
= CMSG_DATA(cmsg
);
1670 hopopt_len
= cmsg
->cmsg_len
- sizeof(*cmsg
);
1673 hoplimit
= (int *)CMSG_DATA(cmsg
);
1678 if (!pktinfo
|| !hoplimit
) {
1680 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
1681 pim
->gm_rx_drop_sys
++;
1685 if (*hoplimit
!= 1) {
1686 zlog_err(log_pkt_src("packet with hop limit != 1"));
1687 /* spoofing attempt => count on srcaddr counter */
1688 gm_ifp
->stats
.rx_drop_srcaddr
++;
1692 if (!ip6_check_hopopts_ra(hopopts
, hopopt_len
, IP6_ALERT_MLD
)) {
1693 zlog_err(log_pkt_src(
1694 "packet without IPv6 Router Alert MLD option"));
1695 gm_ifp
->stats
.rx_drop_ra
++;
1699 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src
->sin6_addr
))
1700 /* reports from :: happen in normal operation for DAD, so
1701 * don't spam log messages about this
1705 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src
->sin6_addr
)) {
1706 zlog_warn(log_pkt_src("packet from invalid source address"));
1707 gm_ifp
->stats
.rx_drop_srcaddr
++;
1712 if (pktlen
< sizeof(struct icmp6_plain_hdr
)) {
1713 zlog_warn(log_pkt_src("truncated packet"));
1714 gm_ifp
->stats
.rx_drop_malformed
++;
1718 gm_rx_process(gm_ifp
, pkt_src
, &pktinfo
->ipi6_addr
, iov
->iov_base
,
1722 if (iov
->iov_base
!= rxbuf
)
1723 XFREE(MTYPE_GM_PACKET
, iov
->iov_base
);
1726 static void gm_send_query(struct gm_if
*gm_ifp
, pim_addr grp
,
1727 const pim_addr
*srcs
, size_t n_srcs
, bool s_bit
)
1729 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1730 struct sockaddr_in6 dstaddr
= {
1731 .sin6_family
= AF_INET6
,
1732 .sin6_scope_id
= gm_ifp
->ifp
->ifindex
,
1735 struct icmp6_plain_hdr hdr
;
1736 struct mld_v2_query_hdr v2_query
;
1738 /* clang-format off */
1740 .icmp6_type
= ICMP6_MLD_QUERY
,
1746 /* clang-format on */
1748 struct ipv6_ph ph6
= {
1749 .src
= pim_ifp
->ll_lowest
,
1750 .ulpl
= htons(sizeof(query
)),
1751 .next_hdr
= IPPROTO_ICMPV6
,
1754 char buf
[CMSG_SPACE(8) /* hop options */ +
1755 CMSG_SPACE(sizeof(struct in6_pktinfo
))];
1756 struct cmsghdr align
;
1758 struct cmsghdr
*cmh
;
1759 struct msghdr mh
[1] = {};
1760 struct iovec iov
[3];
1762 ssize_t ret
, expect_ret
;
1764 struct in6_pktinfo
*pktinfo
;
1766 if (if_is_loopback(gm_ifp
->ifp
)) {
1767 /* Linux is a bit odd with multicast on loopback */
1768 ph6
.src
= in6addr_loopback
;
1769 dstaddr
.sin6_addr
= in6addr_loopback
;
1770 } else if (pim_addr_is_any(grp
))
1771 dstaddr
.sin6_addr
= gm_all_hosts
;
1773 dstaddr
.sin6_addr
= grp
;
1775 query
.v2_query
.max_resp_code
=
1776 mld_max_resp_encode(gm_ifp
->cur_max_resp
);
1777 query
.v2_query
.flags
= (gm_ifp
->cur_qrv
< 8) ? gm_ifp
->cur_qrv
: 0;
1779 query
.v2_query
.flags
|= 0x08;
1780 query
.v2_query
.qqic
=
1781 igmp_msg_encode16to8(gm_ifp
->cur_query_intv
/ 1000);
1782 query
.v2_query
.n_src
= htons(n_srcs
);
1784 ph6
.dst
= dstaddr
.sin6_addr
;
1786 /* ph6 not included in sendmsg */
1787 iov
[0].iov_base
= &ph6
;
1788 iov
[0].iov_len
= sizeof(ph6
);
1789 iov
[1].iov_base
= &query
;
1790 if (gm_ifp
->cur_version
== GM_MLDV1
) {
1792 iov
[1].iov_len
= sizeof(query
.hdr
) + sizeof(struct mld_v1_pkt
);
1793 } else if (!n_srcs
) {
1795 iov
[1].iov_len
= sizeof(query
);
1797 iov
[1].iov_len
= sizeof(query
);
1798 iov
[2].iov_base
= (void *)srcs
;
1799 iov
[2].iov_len
= n_srcs
* sizeof(srcs
[0]);
1803 query
.hdr
.icmp6_cksum
= in_cksumv(iov
, iov_len
);
1805 if (PIM_DEBUG_GM_PACKETS
)
1807 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1808 &pim_ifp
->ll_lowest
, &dstaddr
.sin6_addr
, &grp
, n_srcs
);
1810 mh
->msg_name
= &dstaddr
;
1811 mh
->msg_namelen
= sizeof(dstaddr
);
1812 mh
->msg_iov
= iov
+ 1;
1813 mh
->msg_iovlen
= iov_len
- 1;
1814 mh
->msg_control
= &cmsg
;
1815 mh
->msg_controllen
= sizeof(cmsg
.buf
);
1817 cmh
= CMSG_FIRSTHDR(mh
);
1818 cmh
->cmsg_level
= IPPROTO_IPV6
;
1819 cmh
->cmsg_type
= IPV6_HOPOPTS
;
1820 cmh
->cmsg_len
= CMSG_LEN(8);
1821 dp
= CMSG_DATA(cmh
);
1822 *dp
++ = 0; /* next header */
1823 *dp
++ = 0; /* length (8-byte blocks, minus 1) */
1824 *dp
++ = IP6OPT_ROUTER_ALERT
; /* router alert */
1825 *dp
++ = 2; /* length */
1826 *dp
++ = 0; /* value (2 bytes) */
1827 *dp
++ = 0; /* value (2 bytes) (0 = MLD) */
1828 *dp
++ = 0; /* pad0 */
1829 *dp
++ = 0; /* pad0 */
1831 cmh
= CMSG_NXTHDR(mh
, cmh
);
1832 cmh
->cmsg_level
= IPPROTO_IPV6
;
1833 cmh
->cmsg_type
= IPV6_PKTINFO
;
1834 cmh
->cmsg_len
= CMSG_LEN(sizeof(struct in6_pktinfo
));
1835 pktinfo
= (struct in6_pktinfo
*)CMSG_DATA(cmh
);
1836 pktinfo
->ipi6_ifindex
= gm_ifp
->ifp
->ifindex
;
1837 pktinfo
->ipi6_addr
= gm_ifp
->cur_ll_lowest
;
1839 expect_ret
= iov
[1].iov_len
;
1841 expect_ret
+= iov
[2].iov_len
;
1843 frr_with_privs (&pimd_privs
) {
1844 ret
= sendmsg(gm_ifp
->pim
->gm_socket
, mh
, 0);
1847 if (ret
!= expect_ret
) {
1848 zlog_warn(log_ifp("failed to send query: %m"));
1849 gm_ifp
->stats
.tx_query_fail
++;
1851 if (gm_ifp
->cur_version
== GM_MLDV1
) {
1852 if (pim_addr_is_any(grp
))
1853 gm_ifp
->stats
.tx_query_old_general
++;
1855 gm_ifp
->stats
.tx_query_old_group
++;
1857 if (pim_addr_is_any(grp
))
1858 gm_ifp
->stats
.tx_query_new_general
++;
1860 gm_ifp
->stats
.tx_query_new_group
++;
1862 gm_ifp
->stats
.tx_query_new_groupsrc
++;
1867 static void gm_t_query(struct thread
*t
)
1869 struct gm_if
*gm_ifp
= THREAD_ARG(t
);
1870 unsigned int timer_ms
= gm_ifp
->cur_query_intv
;
1872 if (gm_ifp
->n_startup
) {
1874 gm_ifp
->n_startup
--;
1877 thread_add_timer_msec(router
->master
, gm_t_query
, gm_ifp
, timer_ms
,
1880 gm_send_query(gm_ifp
, PIMADDR_ANY
, NULL
, 0, false);
1883 static void gm_t_sg_query(struct thread
*t
)
1885 struct gm_sg
*sg
= THREAD_ARG(t
);
1887 gm_trigger_specific(sg
);
1890 /* S,G specific queries (triggered by a member leaving) get a little slack
1891 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1893 static void gm_send_specific(struct gm_gsq_pending
*pend_gsq
)
1895 struct gm_if
*gm_ifp
= pend_gsq
->iface
;
1897 gm_send_query(gm_ifp
, pend_gsq
->grp
, pend_gsq
->srcs
, pend_gsq
->n_src
,
1900 gm_gsq_pends_del(gm_ifp
->gsq_pends
, pend_gsq
);
1901 XFREE(MTYPE_GM_GSQ_PENDING
, pend_gsq
);
1904 static void gm_t_gsq_pend(struct thread
*t
)
1906 struct gm_gsq_pending
*pend_gsq
= THREAD_ARG(t
);
1908 gm_send_specific(pend_gsq
);
1911 static void gm_trigger_specific(struct gm_sg
*sg
)
1913 struct gm_if
*gm_ifp
= sg
->iface
;
1914 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1915 struct gm_gsq_pending
*pend_gsq
, ref
= {};
1919 thread_add_timer_msec(router
->master
, gm_t_sg_query
, sg
,
1920 gm_ifp
->cur_query_intv_trig
,
1923 if (!IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
))
1925 if (gm_ifp
->pim
->gm_socket
== -1)
1928 if (PIM_DEBUG_GM_TRACE
)
1929 zlog_debug(log_sg(sg
, "triggered query"));
1931 if (pim_addr_is_any(sg
->sgaddr
.src
)) {
1932 gm_send_query(gm_ifp
, sg
->sgaddr
.grp
, NULL
, 0, sg
->query_sbit
);
1936 ref
.grp
= sg
->sgaddr
.grp
;
1937 ref
.s_bit
= sg
->query_sbit
;
1939 pend_gsq
= gm_gsq_pends_find(gm_ifp
->gsq_pends
, &ref
);
1941 pend_gsq
= XCALLOC(MTYPE_GM_GSQ_PENDING
, sizeof(*pend_gsq
));
1942 pend_gsq
->grp
= sg
->sgaddr
.grp
;
1943 pend_gsq
->s_bit
= sg
->query_sbit
;
1944 pend_gsq
->iface
= gm_ifp
;
1945 gm_gsq_pends_add(gm_ifp
->gsq_pends
, pend_gsq
);
1947 thread_add_timer_tv(router
->master
, gm_t_gsq_pend
, pend_gsq
,
1948 &gm_ifp
->cfg_timing_fuzz
,
1952 assert(pend_gsq
->n_src
< array_size(pend_gsq
->srcs
));
1954 pend_gsq
->srcs
[pend_gsq
->n_src
] = sg
->sgaddr
.src
;
1957 if (pend_gsq
->n_src
== array_size(pend_gsq
->srcs
)) {
1958 THREAD_OFF(pend_gsq
->t_send
);
1959 gm_send_specific(pend_gsq
);
1964 static void gm_vrf_socket_incref(struct pim_instance
*pim
)
1966 struct vrf
*vrf
= pim
->vrf
;
1968 struct icmp6_filter filter
[1];
1970 if (pim
->gm_socket_if_count
++ && pim
->gm_socket
!= -1)
1973 ICMP6_FILTER_SETBLOCKALL(filter
);
1974 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY
, filter
);
1975 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT
, filter
);
1976 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE
, filter
);
1977 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT
, filter
);
1979 frr_with_privs (&pimd_privs
) {
1980 pim
->gm_socket
= vrf_socket(AF_INET6
, SOCK_RAW
, IPPROTO_ICMPV6
,
1981 vrf
->vrf_id
, vrf
->name
);
1982 if (pim
->gm_socket
< 0) {
1983 zlog_err("(VRF %s) could not create MLD socket: %m",
1988 ret
= setsockopt(pim
->gm_socket
, SOL_ICMPV6
, ICMP6_FILTER
,
1989 filter
, sizeof(filter
));
1991 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1995 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVPKTINFO
,
1996 &intval
, sizeof(intval
));
1998 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
2002 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVHOPOPTS
,
2003 &intval
, sizeof(intval
));
2005 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
2009 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVHOPLIMIT
,
2010 &intval
, sizeof(intval
));
2012 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2016 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_LOOP
,
2017 &intval
, sizeof(intval
));
2020 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2024 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_HOPS
,
2025 &intval
, sizeof(intval
));
2028 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2031 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2032 * RX filtering in Linux. It only means "receive all groups
2033 * that something on the system has joined". To actually
2034 * receive *all* MLD packets - which is what we need -
2035 * multicast routing must be enabled on the interface. And
2036 * this only works for MLD packets specifically.
2038 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2039 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2041 * Also note that the code there explicitly checks for the IPv6
2042 * router alert MLD option (which is required by the RFC to be
2043 * on MLD packets.) That implies trying to support hosts which
2044 * erroneously don't add that option is just not possible.
2047 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_ALL
,
2048 &intval
, sizeof(intval
));
2051 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2055 thread_add_read(router
->master
, gm_t_recv
, pim
, pim
->gm_socket
,
2059 static void gm_vrf_socket_decref(struct pim_instance
*pim
)
2061 if (--pim
->gm_socket_if_count
)
2064 THREAD_OFF(pim
->t_gm_recv
);
2065 close(pim
->gm_socket
);
2066 pim
->gm_socket
= -1;
2069 static void gm_start(struct interface
*ifp
)
2071 struct pim_interface
*pim_ifp
= ifp
->info
;
2072 struct gm_if
*gm_ifp
;
2075 assert(pim_ifp
->pim
);
2076 assert(pim_ifp
->mroute_vif_index
>= 0);
2077 assert(!pim_ifp
->mld
);
2079 gm_vrf_socket_incref(pim_ifp
->pim
);
2081 gm_ifp
= XCALLOC(MTYPE_GM_IFACE
, sizeof(*gm_ifp
));
2083 pim_ifp
->mld
= gm_ifp
;
2084 gm_ifp
->pim
= pim_ifp
->pim
;
2085 monotime(&gm_ifp
->started
);
2087 zlog_info(log_ifp("starting MLD"));
2089 if (pim_ifp
->mld_version
== 1)
2090 gm_ifp
->cur_version
= GM_MLDV1
;
2092 gm_ifp
->cur_version
= GM_MLDV2
;
2094 gm_ifp
->cur_qrv
= pim_ifp
->gm_default_robustness_variable
;
2095 gm_ifp
->cur_query_intv
= pim_ifp
->gm_default_query_interval
* 1000;
2096 gm_ifp
->cur_query_intv_trig
=
2097 pim_ifp
->gm_specific_query_max_response_time_dsec
* 100;
2098 gm_ifp
->cur_max_resp
= pim_ifp
->gm_query_max_response_time_dsec
* 100;
2099 gm_ifp
->cur_lmqc
= pim_ifp
->gm_last_member_query_count
;
2101 gm_ifp
->cfg_timing_fuzz
.tv_sec
= 0;
2102 gm_ifp
->cfg_timing_fuzz
.tv_usec
= 10 * 1000;
2104 gm_sgs_init(gm_ifp
->sgs
);
2105 gm_subscribers_init(gm_ifp
->subscribers
);
2106 gm_packet_expires_init(gm_ifp
->expires
);
2107 gm_grp_pends_init(gm_ifp
->grp_pends
);
2108 gm_gsq_pends_init(gm_ifp
->gsq_pends
);
2110 frr_with_privs (&pimd_privs
) {
2111 struct ipv6_mreq mreq
;
2114 /* all-MLDv2 group */
2115 mreq
.ipv6mr_multiaddr
= gm_all_routers
;
2116 mreq
.ipv6mr_interface
= ifp
->ifindex
;
2117 ret
= setsockopt(gm_ifp
->pim
->gm_socket
, SOL_IPV6
,
2118 IPV6_JOIN_GROUP
, &mreq
, sizeof(mreq
));
2120 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2125 void gm_group_delete(struct gm_if
*gm_ifp
)
2128 struct gm_packet_state
*pkt
;
2129 struct gm_grp_pending
*pend_grp
;
2130 struct gm_gsq_pending
*pend_gsq
;
2131 struct gm_subscriber
*subscriber
;
2133 while ((pkt
= gm_packet_expires_first(gm_ifp
->expires
)))
2134 gm_packet_drop(pkt
, false);
2136 while ((pend_grp
= gm_grp_pends_pop(gm_ifp
->grp_pends
))) {
2137 THREAD_OFF(pend_grp
->t_expire
);
2138 XFREE(MTYPE_GM_GRP_PENDING
, pend_grp
);
2141 while ((pend_gsq
= gm_gsq_pends_pop(gm_ifp
->gsq_pends
))) {
2142 THREAD_OFF(pend_gsq
->t_send
);
2143 XFREE(MTYPE_GM_GSQ_PENDING
, pend_gsq
);
2146 while ((sg
= gm_sgs_pop(gm_ifp
->sgs
))) {
2147 THREAD_OFF(sg
->t_sg_expire
);
2148 assertf(!gm_packet_sg_subs_count(sg
->subs_negative
), "%pSG",
2150 assertf(!gm_packet_sg_subs_count(sg
->subs_positive
), "%pSG",
2155 while ((subscriber
= gm_subscribers_pop(gm_ifp
->subscribers
))) {
2156 assertf(!gm_packets_count(subscriber
->packets
), "%pPA",
2158 XFREE(MTYPE_GM_SUBSCRIBER
, subscriber
);
2162 void gm_ifp_teardown(struct interface
*ifp
)
2164 struct pim_interface
*pim_ifp
= ifp
->info
;
2165 struct gm_if
*gm_ifp
;
2167 if (!pim_ifp
|| !pim_ifp
->mld
)
2170 gm_ifp
= pim_ifp
->mld
;
2171 gm_ifp
->stopping
= true;
2172 if (PIM_DEBUG_GM_EVENTS
)
2173 zlog_debug(log_ifp("MLD stop"));
2175 THREAD_OFF(gm_ifp
->t_query
);
2176 THREAD_OFF(gm_ifp
->t_other_querier
);
2177 THREAD_OFF(gm_ifp
->t_expire
);
2179 frr_with_privs (&pimd_privs
) {
2180 struct ipv6_mreq mreq
;
2183 /* all-MLDv2 group */
2184 mreq
.ipv6mr_multiaddr
= gm_all_routers
;
2185 mreq
.ipv6mr_interface
= ifp
->ifindex
;
2186 ret
= setsockopt(gm_ifp
->pim
->gm_socket
, SOL_IPV6
,
2187 IPV6_LEAVE_GROUP
, &mreq
, sizeof(mreq
));
2190 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2194 gm_vrf_socket_decref(gm_ifp
->pim
);
2196 gm_group_delete(gm_ifp
);
2198 gm_grp_pends_fini(gm_ifp
->grp_pends
);
2199 gm_packet_expires_fini(gm_ifp
->expires
);
2200 gm_subscribers_fini(gm_ifp
->subscribers
);
2201 gm_sgs_fini(gm_ifp
->sgs
);
2203 XFREE(MTYPE_GM_IFACE
, gm_ifp
);
2204 pim_ifp
->mld
= NULL
;
2207 static void gm_update_ll(struct interface
*ifp
)
2209 struct pim_interface
*pim_ifp
= ifp
->info
;
2210 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
2214 !IPV6_ADDR_CMP(&gm_ifp
->cur_ll_lowest
, &gm_ifp
->querier
) &&
2215 !pim_addr_is_any(gm_ifp
->querier
);
2217 gm_ifp
->cur_ll_lowest
= pim_ifp
->ll_lowest
;
2219 gm_ifp
->querier
= pim_ifp
->ll_lowest
;
2220 THREAD_OFF(gm_ifp
->t_query
);
2222 if (pim_addr_is_any(gm_ifp
->cur_ll_lowest
)) {
2225 "lost link-local address, stopping querier"));
2230 zlog_info(log_ifp("new link-local %pPA while querier"),
2231 &gm_ifp
->cur_ll_lowest
);
2232 else if (IPV6_ADDR_CMP(&gm_ifp
->cur_ll_lowest
, &gm_ifp
->querier
) < 0 ||
2233 pim_addr_is_any(gm_ifp
->querier
)) {
2234 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2235 &gm_ifp
->cur_ll_lowest
);
2236 gm_ifp
->querier
= gm_ifp
->cur_ll_lowest
;
2240 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
2241 thread_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
2244 void gm_ifp_update(struct interface
*ifp
)
2246 struct pim_interface
*pim_ifp
= ifp
->info
;
2247 struct gm_if
*gm_ifp
;
2248 bool changed
= false;
2252 if (!if_is_operative(ifp
) || !pim_ifp
->pim
||
2253 pim_ifp
->mroute_vif_index
< 0) {
2254 gm_ifp_teardown(ifp
);
2259 * If ipv6 mld is not enabled on interface, do not start mld activites.
2261 if (!pim_ifp
->gm_enable
)
2264 if (!pim_ifp
->mld
) {
2269 gm_ifp
= pim_ifp
->mld
;
2270 if (IPV6_ADDR_CMP(&pim_ifp
->ll_lowest
, &gm_ifp
->cur_ll_lowest
))
2273 unsigned int cfg_query_intv
= pim_ifp
->gm_default_query_interval
* 1000;
2275 if (gm_ifp
->cur_query_intv
!= cfg_query_intv
) {
2276 gm_ifp
->cur_query_intv
= cfg_query_intv
;
2280 unsigned int cfg_query_intv_trig
=
2281 pim_ifp
->gm_specific_query_max_response_time_dsec
* 100;
2283 if (gm_ifp
->cur_query_intv_trig
!= cfg_query_intv_trig
) {
2284 gm_ifp
->cur_query_intv_trig
= cfg_query_intv_trig
;
2288 unsigned int cfg_max_response
=
2289 pim_ifp
->gm_query_max_response_time_dsec
* 100;
2291 if (gm_ifp
->cur_max_resp
!= cfg_max_response
)
2292 gm_ifp
->cur_max_resp
= cfg_max_response
;
2294 if (gm_ifp
->cur_lmqc
!= pim_ifp
->gm_last_member_query_count
)
2295 gm_ifp
->cur_lmqc
= pim_ifp
->gm_last_member_query_count
;
2297 enum gm_version cfg_version
;
2299 if (pim_ifp
->mld_version
== 1)
2300 cfg_version
= GM_MLDV1
;
2302 cfg_version
= GM_MLDV2
;
2303 if (gm_ifp
->cur_version
!= cfg_version
) {
2304 gm_ifp
->cur_version
= cfg_version
;
2309 if (PIM_DEBUG_GM_TRACE
)
2311 "MLD querier config changed, querying"));
2312 gm_bump_querier(gm_ifp
);
2317 * CLI (show commands only)
2320 #include "lib/command.h"
2322 #include "pimd/pim6_mld_clippy.c"
2324 static struct vrf
*gm_cmd_vrf_lookup(struct vty
*vty
, const char *vrf_str
,
2330 return vrf_lookup_by_id(VRF_DEFAULT
);
2331 if (!strcmp(vrf_str
, "all"))
2333 ret
= vrf_lookup_by_name(vrf_str
);
2337 vty_out(vty
, "%% VRF %pSQq does not exist\n", vrf_str
);
2342 static void gm_show_if_one_detail(struct vty
*vty
, struct interface
*ifp
)
2344 struct pim_interface
*pim_ifp
= (struct pim_interface
*)ifp
->info
;
2345 struct gm_if
*gm_ifp
;
2350 vty_out(vty
, "Interface %s: no PIM/MLD config\n\n", ifp
->name
);
2354 gm_ifp
= pim_ifp
->mld
;
2356 vty_out(vty
, "Interface %s: MLD not running\n\n", ifp
->name
);
2360 querier
= IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
);
2362 vty_out(vty
, "Interface %s: MLD running\n", ifp
->name
);
2363 vty_out(vty
, " Uptime: %pTVMs\n", &gm_ifp
->started
);
2364 vty_out(vty
, " MLD version: %d\n", gm_ifp
->cur_version
);
2365 vty_out(vty
, " Querier: %pPA%s\n", &gm_ifp
->querier
,
2366 querier
? " (this system)" : "");
2367 vty_out(vty
, " Query timer: %pTH\n", gm_ifp
->t_query
);
2368 vty_out(vty
, " Other querier timer: %pTH\n",
2369 gm_ifp
->t_other_querier
);
2370 vty_out(vty
, " Robustness value: %u\n", gm_ifp
->cur_qrv
);
2371 vty_out(vty
, " Query interval: %ums\n",
2372 gm_ifp
->cur_query_intv
);
2373 vty_out(vty
, " Query response timer: %ums\n", gm_ifp
->cur_max_resp
);
2374 vty_out(vty
, " Last member query intv.: %ums\n",
2375 gm_ifp
->cur_query_intv_trig
);
2376 vty_out(vty
, " %u expiry timers from general queries:\n",
2378 for (i
= 0; i
< gm_ifp
->n_pending
; i
++) {
2379 struct gm_general_pending
*p
= &gm_ifp
->pending
[i
];
2381 vty_out(vty
, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2382 &p
->query
, &p
->expiry
);
2384 vty_out(vty
, " %zu expiry timers from *,G queries\n",
2385 gm_grp_pends_count(gm_ifp
->grp_pends
));
2386 vty_out(vty
, " %zu expiry timers from S,G queries\n",
2387 gm_gsq_pends_count(gm_ifp
->gsq_pends
));
2388 vty_out(vty
, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2389 gm_sgs_count(gm_ifp
->sgs
),
2390 gm_subscribers_count(gm_ifp
->subscribers
),
2391 gm_packet_expires_count(gm_ifp
->expires
));
2395 static void gm_show_if_one(struct vty
*vty
, struct interface
*ifp
,
2398 struct pim_interface
*pim_ifp
= (struct pim_interface
*)ifp
->info
;
2399 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
2404 json_object_string_add(js_if
, "state", "down");
2406 vty_out(vty
, "%-16s %5s\n", ifp
->name
, "down");
2410 querier
= IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
);
2413 json_object_string_add(js_if
, "name", ifp
->name
);
2414 json_object_string_add(js_if
, "state", "up");
2415 json_object_string_addf(js_if
, "version", "%d",
2416 gm_ifp
->cur_version
);
2417 json_object_string_addf(js_if
, "upTime", "%pTVMs",
2419 json_object_boolean_add(js_if
, "querier", querier
);
2420 json_object_string_addf(js_if
, "querierIp", "%pPA",
2423 json_object_string_addf(js_if
, "queryTimer", "%pTH",
2426 json_object_string_addf(js_if
, "otherQuerierTimer",
2428 gm_ifp
->t_other_querier
);
2429 json_object_int_add(js_if
, "timerRobustnessValue",
2431 json_object_int_add(js_if
, "lastMemberQueryCount",
2433 json_object_int_add(js_if
, "timerQueryIntervalMsec",
2434 gm_ifp
->cur_query_intv
);
2435 json_object_int_add(js_if
, "timerQueryResponseTimerMsec",
2436 gm_ifp
->cur_max_resp
);
2437 json_object_int_add(js_if
, "timerLastMemberQueryIntervalMsec",
2438 gm_ifp
->cur_query_intv_trig
);
2440 vty_out(vty
, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n",
2441 ifp
->name
, "up", gm_ifp
->cur_version
, &gm_ifp
->querier
,
2442 querier
? "query" : "other",
2443 querier
? gm_ifp
->t_query
: gm_ifp
->t_other_querier
,
2448 static void gm_show_if_vrf(struct vty
*vty
, struct vrf
*vrf
, const char *ifname
,
2449 bool detail
, json_object
*js
)
2451 struct interface
*ifp
;
2452 json_object
*js_vrf
;
2455 js_vrf
= json_object_new_object();
2456 json_object_object_add(js
, vrf
->name
, js_vrf
);
2459 FOR_ALL_INTERFACES (vrf
, ifp
) {
2460 json_object
*js_if
= NULL
;
2462 if (ifname
&& strcmp(ifp
->name
, ifname
))
2464 if (detail
&& !js
) {
2465 gm_show_if_one_detail(vty
, ifp
);
2472 js_if
= json_object_new_object();
2473 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2476 gm_show_if_one(vty
, ifp
, js_if
);
2480 static void gm_show_if(struct vty
*vty
, struct vrf
*vrf
, const char *ifname
,
2481 bool detail
, json_object
*js
)
2484 vty_out(vty
, "%-16s %-5s V %-25s %-18s %s\n", "Interface",
2485 "State", "Querier", "Timer", "Uptime");
2488 gm_show_if_vrf(vty
, vrf
, ifname
, detail
, js
);
2490 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2491 gm_show_if_vrf(vty
, vrf
, ifname
, detail
, js
);
2494 DEFPY(gm_show_interface
,
2495 gm_show_interface_cmd
,
2496 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
2500 VRF_FULL_CMD_HELP_STR
2501 "MLD interface information\n"
2506 int ret
= CMD_SUCCESS
;
2508 json_object
*js
= NULL
;
2510 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2511 if (ret
!= CMD_SUCCESS
)
2515 js
= json_object_new_object();
2516 gm_show_if(vty
, vrf
, ifname
, !!detail
, js
);
2517 return vty_json(vty
, js
);
2520 static void gm_show_stats_one(struct vty
*vty
, struct gm_if
*gm_ifp
,
2523 struct gm_if_stats
*stats
= &gm_ifp
->stats
;
2524 /* clang-format off */
2529 } *item
, items
[] = {
2530 { "v2 reports received", "rxV2Reports", &stats
->rx_new_report
},
2531 { "v1 reports received", "rxV1Reports", &stats
->rx_old_report
},
2532 { "v1 done received", "rxV1Done", &stats
->rx_old_leave
},
2534 { "v2 *,* queries received", "rxV2QueryGeneral", &stats
->rx_query_new_general
},
2535 { "v2 *,G queries received", "rxV2QueryGroup", &stats
->rx_query_new_group
},
2536 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats
->rx_query_new_groupsrc
},
2537 { "v2 S-bit queries received", "rxV2QuerySBit", &stats
->rx_query_new_sbit
},
2538 { "v1 *,* queries received", "rxV1QueryGeneral", &stats
->rx_query_old_general
},
2539 { "v1 *,G queries received", "rxV1QueryGroup", &stats
->rx_query_old_group
},
2541 { "v2 *,* queries sent", "txV2QueryGeneral", &stats
->tx_query_new_general
},
2542 { "v2 *,G queries sent", "txV2QueryGroup", &stats
->tx_query_new_group
},
2543 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats
->tx_query_new_groupsrc
},
2544 { "v1 *,* queries sent", "txV1QueryGeneral", &stats
->tx_query_old_general
},
2545 { "v1 *,G queries sent", "txV1QueryGroup", &stats
->tx_query_old_group
},
2546 { "TX errors", "txErrors", &stats
->tx_query_fail
},
2548 { "RX dropped (checksum error)", "rxDropChecksum", &stats
->rx_drop_csum
},
2549 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats
->rx_drop_srcaddr
},
2550 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats
->rx_drop_dstaddr
},
2551 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats
->rx_drop_ra
},
2552 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats
->rx_drop_malformed
},
2553 { "RX truncated reports", "rxTruncatedRep", &stats
->rx_trunc_report
},
2555 /* clang-format on */
2557 for (item
= items
; item
< items
+ array_size(items
); item
++) {
2559 json_object_int_add(js_if
, item
->js_key
, *item
->val
);
2561 vty_out(vty
, " %-30s %" PRIu64
"\n", item
->text
,
2566 static void gm_show_stats_vrf(struct vty
*vty
, struct vrf
*vrf
,
2567 const char *ifname
, json_object
*js
)
2569 struct interface
*ifp
;
2570 json_object
*js_vrf
;
2573 js_vrf
= json_object_new_object();
2574 json_object_object_add(js
, vrf
->name
, js_vrf
);
2577 FOR_ALL_INTERFACES (vrf
, ifp
) {
2578 struct pim_interface
*pim_ifp
;
2579 struct gm_if
*gm_ifp
;
2580 json_object
*js_if
= NULL
;
2582 if (ifname
&& strcmp(ifp
->name
, ifname
))
2587 pim_ifp
= ifp
->info
;
2590 gm_ifp
= pim_ifp
->mld
;
2593 js_if
= json_object_new_object();
2594 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2596 vty_out(vty
, "Interface: %s\n", ifp
->name
);
2598 gm_show_stats_one(vty
, gm_ifp
, js_if
);
2604 DEFPY(gm_show_interface_stats
,
2605 gm_show_interface_stats_cmd
,
2606 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2610 VRF_FULL_CMD_HELP_STR
2616 int ret
= CMD_SUCCESS
;
2618 json_object
*js
= NULL
;
2620 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2621 if (ret
!= CMD_SUCCESS
)
2625 js
= json_object_new_object();
2628 gm_show_stats_vrf(vty
, vrf
, ifname
, js
);
2630 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2631 gm_show_stats_vrf(vty
, vrf
, ifname
, js
);
2632 return vty_json(vty
, js
);
2635 static void gm_show_joins_one(struct vty
*vty
, struct gm_if
*gm_ifp
,
2636 const struct prefix_ipv6
*groups
,
2637 const struct prefix_ipv6
*sources
, bool detail
,
2640 struct gm_sg
*sg
, *sg_start
;
2641 json_object
*js_group
= NULL
;
2642 pim_addr js_grpaddr
= PIMADDR_ANY
;
2643 struct gm_subscriber sub_ref
= {}, *sub_untracked
;
2646 struct gm_sg sg_ref
= {};
2648 sg_ref
.sgaddr
.grp
= pim_addr_from_prefix(groups
);
2649 sg_start
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
2651 sg_start
= gm_sgs_first(gm_ifp
->sgs
);
2653 sub_ref
.addr
= gm_dummy_untracked
;
2654 sub_untracked
= gm_subscribers_find(gm_ifp
->subscribers
, &sub_ref
);
2655 /* NB: sub_untracked may be NULL if no untracked joins exist */
2657 frr_each_from (gm_sgs
, gm_ifp
->sgs
, sg
, sg_start
) {
2658 struct timeval
*recent
= NULL
, *untracked
= NULL
;
2659 json_object
*js_src
;
2662 struct prefix grp_p
;
2664 pim_addr_to_prefix(&grp_p
, sg
->sgaddr
.grp
);
2665 if (!prefix_match(groups
, &grp_p
))
2670 struct prefix src_p
;
2672 pim_addr_to_prefix(&src_p
, sg
->sgaddr
.src
);
2673 if (!prefix_match(sources
, &src_p
))
2677 if (sg
->most_recent
) {
2678 struct gm_packet_state
*packet
;
2680 packet
= gm_packet_sg2state(sg
->most_recent
);
2681 recent
= &packet
->received
;
2684 if (sub_untracked
) {
2685 struct gm_packet_state
*packet
;
2686 struct gm_packet_sg
*item
;
2688 item
= gm_packet_sg_find(sg
, GM_SUB_POS
, sub_untracked
);
2690 packet
= gm_packet_sg2state(item
);
2691 untracked
= &packet
->received
;
2696 FMT_NSTD_BEGIN
; /* %.0p */
2698 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2699 &sg
->sgaddr
.grp
, &sg
->sgaddr
.src
,
2700 gm_states
[sg
->state
], recent
, untracked
,
2706 struct gm_packet_sg
*item
;
2707 struct gm_packet_state
*packet
;
2709 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
2710 packet
= gm_packet_sg2state(item
);
2712 if (packet
->subscriber
== sub_untracked
)
2714 vty_out(vty
, " %-58pPA %-16s %10.0pTVMs\n",
2715 &packet
->subscriber
->addr
, "(JOIN)",
2718 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
2719 packet
= gm_packet_sg2state(item
);
2721 if (packet
->subscriber
== sub_untracked
)
2723 vty_out(vty
, " %-58pPA %-16s %10.0pTVMs\n",
2724 &packet
->subscriber
->addr
, "(PRUNE)",
2727 FMT_NSTD_END
; /* %.0p */
2732 if (!js_group
|| pim_addr_cmp(js_grpaddr
, sg
->sgaddr
.grp
)) {
2733 js_group
= json_object_new_object();
2734 json_object_object_addf(js_if
, js_group
, "%pPA",
2736 js_grpaddr
= sg
->sgaddr
.grp
;
2739 js_src
= json_object_new_object();
2740 json_object_object_addf(js_group
, js_src
, "%pPA",
2743 json_object_string_add(js_src
, "state", gm_states
[sg
->state
]);
2744 json_object_string_addf(js_src
, "created", "%pTVMs",
2746 json_object_string_addf(js_src
, "lastSeen", "%pTVMs", recent
);
2749 json_object_string_addf(js_src
, "untrackedLastSeen",
2750 "%pTVMs", untracked
);
2754 json_object
*js_subs
;
2755 struct gm_packet_sg
*item
;
2756 struct gm_packet_state
*packet
;
2758 js_subs
= json_object_new_object();
2759 json_object_object_add(js_src
, "joinedBy", js_subs
);
2760 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
2761 packet
= gm_packet_sg2state(item
);
2762 if (packet
->subscriber
== sub_untracked
)
2765 json_object
*js_sub
;
2767 js_sub
= json_object_new_object();
2768 json_object_object_addf(js_subs
, js_sub
, "%pPA",
2769 &packet
->subscriber
->addr
);
2770 json_object_string_addf(js_sub
, "lastSeen", "%pTVMs",
2774 js_subs
= json_object_new_object();
2775 json_object_object_add(js_src
, "prunedBy", js_subs
);
2776 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
2777 packet
= gm_packet_sg2state(item
);
2778 if (packet
->subscriber
== sub_untracked
)
2781 json_object
*js_sub
;
2783 js_sub
= json_object_new_object();
2784 json_object_object_addf(js_subs
, js_sub
, "%pPA",
2785 &packet
->subscriber
->addr
);
2786 json_object_string_addf(js_sub
, "lastSeen", "%pTVMs",
2792 static void gm_show_joins_vrf(struct vty
*vty
, struct vrf
*vrf
,
2794 const struct prefix_ipv6
*groups
,
2795 const struct prefix_ipv6
*sources
, bool detail
,
2798 struct interface
*ifp
;
2799 json_object
*js_vrf
;
2802 js_vrf
= json_object_new_object();
2803 json_object_object_add(js
, vrf
->name
, js_vrf
);
2806 FOR_ALL_INTERFACES (vrf
, ifp
) {
2807 struct pim_interface
*pim_ifp
;
2808 struct gm_if
*gm_ifp
;
2809 json_object
*js_if
= NULL
;
2811 if (ifname
&& strcmp(ifp
->name
, ifname
))
2816 pim_ifp
= ifp
->info
;
2819 gm_ifp
= pim_ifp
->mld
;
2822 js_if
= json_object_new_object();
2823 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2827 vty_out(vty
, "\nOn interface %s:\n", ifp
->name
);
2829 gm_show_joins_one(vty
, gm_ifp
, groups
, sources
, detail
, js_if
);
2833 DEFPY(gm_show_interface_joins
,
2834 gm_show_interface_joins_cmd
,
2835 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2839 VRF_FULL_CMD_HELP_STR
2840 "MLD joined groups & sources\n"
2843 "Limit output to group range\n"
2844 "Show groups covered by this prefix\n"
2845 "Limit output to source range\n"
2846 "Show sources covered by this prefix\n"
2847 "Show details, including tracked receivers\n"
2850 int ret
= CMD_SUCCESS
;
2852 json_object
*js
= NULL
;
2854 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2855 if (ret
!= CMD_SUCCESS
)
2859 js
= json_object_new_object();
2861 vty_out(vty
, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2862 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2865 gm_show_joins_vrf(vty
, vrf
, ifname
, groups
, sources
, !!detail
,
2868 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2869 gm_show_joins_vrf(vty
, vrf
, ifname
, groups
, sources
,
2871 return vty_json(vty
, js
);
2874 static void gm_show_groups(struct vty
*vty
, struct vrf
*vrf
, bool uj
)
2876 struct interface
*ifp
;
2877 struct ttable
*tt
= NULL
;
2879 json_object
*json
= NULL
;
2880 json_object
*json_iface
= NULL
;
2881 json_object
*json_group
= NULL
;
2882 json_object
*json_groups
= NULL
;
2883 struct pim_instance
*pim
= vrf
->info
;
2886 json
= json_object_new_object();
2887 json_object_int_add(json
, "totalGroups", pim
->gm_group_count
);
2888 json_object_int_add(json
, "watermarkLimit",
2889 pim
->gm_watermark_limit
);
2891 /* Prepare table. */
2892 tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
2893 ttable_add_row(tt
, "Interface|Group|Version|Uptime");
2894 tt
->style
.cell
.rpad
= 2;
2895 tt
->style
.corner
= '+';
2898 vty_out(vty
, "Total MLD groups: %u\n", pim
->gm_group_count
);
2899 vty_out(vty
, "Watermark warn limit(%s): %u\n",
2900 pim
->gm_watermark_limit
? "Set" : "Not Set",
2901 pim
->gm_watermark_limit
);
2904 /* scan interfaces */
2905 FOR_ALL_INTERFACES (vrf
, ifp
) {
2907 struct pim_interface
*pim_ifp
= ifp
->info
;
2908 struct gm_if
*gm_ifp
;
2914 gm_ifp
= pim_ifp
->mld
;
2918 /* scan mld groups */
2919 frr_each (gm_sgs
, gm_ifp
->sgs
, sg
) {
2922 json_object_object_get_ex(json
, ifp
->name
,
2926 json_iface
= json_object_new_object();
2927 json_object_pim_ifp_add(json_iface
,
2929 json_object_object_add(json
, ifp
->name
,
2931 json_groups
= json_object_new_array();
2932 json_object_object_add(json_iface
,
2937 json_group
= json_object_new_object();
2938 json_object_string_addf(json_group
, "group",
2942 json_object_int_add(json_group
, "version",
2943 pim_ifp
->mld_version
);
2944 json_object_string_addf(json_group
, "uptime",
2945 "%pTVMs", &sg
->created
);
2946 json_object_array_add(json_groups
, json_group
);
2948 ttable_add_row(tt
, "%s|%pPAs|%d|%pTVMs",
2949 ifp
->name
, &sg
->sgaddr
.grp
,
2950 pim_ifp
->mld_version
,
2953 } /* scan gm groups */
2954 } /* scan interfaces */
2957 vty_json(vty
, json
);
2959 /* Dump the generated table. */
2960 table
= ttable_dump(tt
, "\n");
2961 vty_out(vty
, "%s\n", table
);
2962 XFREE(MTYPE_TMP
, table
);
2967 DEFPY(gm_show_mld_groups
,
2968 gm_show_mld_groups_cmd
,
2969 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2973 VRF_FULL_CMD_HELP_STR
2977 int ret
= CMD_SUCCESS
;
2980 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2981 if (ret
!= CMD_SUCCESS
)
2985 gm_show_groups(vty
, vrf
, !!json
);
2987 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2988 gm_show_groups(vty
, vrf
, !!json
);
2993 DEFPY(gm_debug_show
,
2995 "debug show mld interface IFNAME",
3002 struct interface
*ifp
;
3003 struct pim_interface
*pim_ifp
;
3004 struct gm_if
*gm_ifp
;
3006 ifp
= if_lookup_by_name(ifname
, VRF_DEFAULT
);
3008 vty_out(vty
, "%% no such interface: %pSQq\n", ifname
);
3012 pim_ifp
= ifp
->info
;
3014 vty_out(vty
, "%% no PIM state for interface %pSQq\n", ifname
);
3018 gm_ifp
= pim_ifp
->mld
;
3020 vty_out(vty
, "%% no MLD state for interface %pSQq\n", ifname
);
3024 vty_out(vty
, "querier: %pPA\n", &gm_ifp
->querier
);
3025 vty_out(vty
, "ll_lowest: %pPA\n\n", &pim_ifp
->ll_lowest
);
3026 vty_out(vty
, "t_query: %pTHD\n", gm_ifp
->t_query
);
3027 vty_out(vty
, "t_other_querier: %pTHD\n", gm_ifp
->t_other_querier
);
3028 vty_out(vty
, "t_expire: %pTHD\n", gm_ifp
->t_expire
);
3030 vty_out(vty
, "\nn_pending: %u\n", gm_ifp
->n_pending
);
3031 for (size_t i
= 0; i
< gm_ifp
->n_pending
; i
++) {
3032 int64_t query
, expiry
;
3034 query
= monotime_since(&gm_ifp
->pending
[i
].query
, NULL
);
3035 expiry
= monotime_until(&gm_ifp
->pending
[i
].expiry
, NULL
);
3037 vty_out(vty
, "[%zu]: query %"PRId64
"ms ago, expiry in %"PRId64
"ms\n",
3038 i
, query
/ 1000, expiry
/ 1000);
3042 struct gm_packet_state
*pkt
;
3043 struct gm_packet_sg
*item
;
3044 struct gm_subscriber
*subscriber
;
3046 vty_out(vty
, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp
->sgs
));
3047 frr_each (gm_sgs
, gm_ifp
->sgs
, sg
) {
3048 vty_out(vty
, "\t%pSG t_expire=%pTHD\n", &sg
->sgaddr
,
3051 vty_out(vty
, "\t @pos:%zu\n",
3052 gm_packet_sg_subs_count(sg
->subs_positive
));
3053 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
3054 pkt
= gm_packet_sg2state(item
);
3056 vty_out(vty
, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3057 item
->is_src
? "S" : "",
3058 item
->is_excl
? "E" : "",
3059 &pkt
->subscriber
->addr
, pkt
->subscriber
, pkt
,
3062 assert(item
->sg
== sg
);
3064 vty_out(vty
, "\t @neg:%zu\n",
3065 gm_packet_sg_subs_count(sg
->subs_negative
));
3066 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
3067 pkt
= gm_packet_sg2state(item
);
3069 vty_out(vty
, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3070 item
->is_src
? "S" : "",
3071 item
->is_excl
? "E" : "",
3072 &pkt
->subscriber
->addr
, pkt
->subscriber
, pkt
,
3075 assert(item
->sg
== sg
);
3079 vty_out(vty
, "\n%zu subscribers:\n",
3080 gm_subscribers_count(gm_ifp
->subscribers
));
3081 frr_each (gm_subscribers
, gm_ifp
->subscribers
, subscriber
) {
3082 vty_out(vty
, "\t%pPA %p %zu packets\n", &subscriber
->addr
,
3083 subscriber
, gm_packets_count(subscriber
->packets
));
3085 frr_each (gm_packets
, subscriber
->packets
, pkt
) {
3086 vty_out(vty
, "\t\t%p %.3fs ago %u of %u items active\n",
3088 monotime_since(&pkt
->received
, NULL
) *
3090 pkt
->n_active
, pkt
->n_sg
);
3092 for (size_t i
= 0; i
< pkt
->n_sg
; i
++) {
3093 item
= pkt
->items
+ i
;
3095 vty_out(vty
, "\t\t[%zu]", i
);
3098 vty_out(vty
, " inactive\n");
3102 vty_out(vty
, " %s%s %pSG nE=%u\n",
3103 item
->is_src
? "S" : "",
3104 item
->is_excl
? "E" : "",
3105 &item
->sg
->sgaddr
, item
->n_exclude
);
3113 DEFPY(gm_debug_iface_cfg
,
3114 gm_debug_iface_cfg_cmd
,
3117 "query-max-response-time (1-8387584)"
3121 "Multicast Listener Discovery\n"
3123 "maxresp\nmaxresp\n")
3125 VTY_DECLVAR_CONTEXT(interface
, ifp
);
3126 struct pim_interface
*pim_ifp
;
3127 struct gm_if
*gm_ifp
;
3128 bool changed
= false;
3130 pim_ifp
= ifp
->info
;
3132 vty_out(vty
, "%% no PIM state for interface %pSQq\n",
3136 gm_ifp
= pim_ifp
->mld
;
3138 vty_out(vty
, "%% no MLD state for interface %pSQq\n",
3143 if (robustness_str
&& gm_ifp
->cur_qrv
!= robustness
) {
3144 gm_ifp
->cur_qrv
= robustness
;
3147 if (query_max_response_time_str
&&
3148 gm_ifp
->cur_max_resp
!= (unsigned int)query_max_response_time
) {
3149 gm_ifp
->cur_max_resp
= query_max_response_time
;
3154 vty_out(vty
, "%% MLD querier config changed, bumping\n");
3155 gm_bump_querier(gm_ifp
);
3160 void gm_cli_init(void);
3162 void gm_cli_init(void)
3164 install_element(VIEW_NODE
, &gm_show_interface_cmd
);
3165 install_element(VIEW_NODE
, &gm_show_interface_stats_cmd
);
3166 install_element(VIEW_NODE
, &gm_show_interface_joins_cmd
);
3167 install_element(VIEW_NODE
, &gm_show_mld_groups_cmd
);
3169 install_element(VIEW_NODE
, &gm_debug_show_cmd
);
3170 install_element(INTERFACE_NODE
, &gm_debug_iface_cfg_cmd
);