1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
16 #include <netinet/ip6.h>
18 #include "lib/memory.h"
19 #include "lib/jhash.h"
20 #include "lib/prefix.h"
21 #include "lib/checksum.h"
22 #include "lib/frrevent.h"
23 #include "termtable.h"
25 #include "pimd/pim6_mld.h"
26 #include "pimd/pim6_mld_protocol.h"
27 #include "pimd/pim_memory.h"
28 #include "pimd/pim_instance.h"
29 #include "pimd/pim_iface.h"
30 #include "pimd/pim6_cmd.h"
31 #include "pimd/pim_cmd_common.h"
32 #include "pimd/pim_util.h"
33 #include "pimd/pim_tib.h"
34 #include "pimd/pimd.h"
36 #ifndef IPV6_MULTICAST_ALL
37 #define IPV6_MULTICAST_ALL 29
40 DEFINE_MTYPE_STATIC(PIMD
, GM_IFACE
, "MLD interface");
41 DEFINE_MTYPE_STATIC(PIMD
, GM_PACKET
, "MLD packet");
42 DEFINE_MTYPE_STATIC(PIMD
, GM_SUBSCRIBER
, "MLD subscriber");
43 DEFINE_MTYPE_STATIC(PIMD
, GM_STATE
, "MLD subscription state");
44 DEFINE_MTYPE_STATIC(PIMD
, GM_SG
, "MLD (S,G)");
45 DEFINE_MTYPE_STATIC(PIMD
, GM_GRP_PENDING
, "MLD group query state");
46 DEFINE_MTYPE_STATIC(PIMD
, GM_GSQ_PENDING
, "MLD group/source query aggregate");
48 static void gm_t_query(struct event
*t
);
49 static void gm_trigger_specific(struct gm_sg
*sg
);
50 static void gm_sg_timer_start(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
51 struct timeval expire_wait
);
53 /* shorthand for log messages */
54 #define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56 #define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
59 #define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
63 /* clang-format off */
65 static const pim_addr gm_all_hosts
= {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
71 static const pim_addr gm_all_routers
= {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
77 /* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
80 static const pim_addr gm_dummy_untracked
= {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
88 static const pim_addr gm_all_hosts
= { .s_addr
= htonl(0xe0000001), };
90 static const pim_addr gm_all_routers
= { .s_addr
= htonl(0xe0000016), };
91 static const pim_addr gm_dummy_untracked
= { .s_addr
= 0xffffffff, };
95 #define IPV6_MULTICAST_SCOPE_LINK 2
97 static inline uint8_t in6_multicast_scope(const pim_addr
*addr
)
99 return addr
->s6_addr
[1] & 0xf;
102 bool in6_multicast_nofwd(const pim_addr
*addr
)
104 return in6_multicast_scope(addr
) <= IPV6_MULTICAST_SCOPE_LINK
;
108 * (S,G) -> subscriber,(S,G)
111 static int gm_packet_sg_cmp(const struct gm_packet_sg
*a
,
112 const struct gm_packet_sg
*b
)
114 const struct gm_packet_state
*s_a
, *s_b
;
116 s_a
= gm_packet_sg2state(a
);
117 s_b
= gm_packet_sg2state(b
);
118 return IPV6_ADDR_CMP(&s_a
->subscriber
->addr
, &s_b
->subscriber
->addr
);
121 DECLARE_RBTREE_UNIQ(gm_packet_sg_subs
, struct gm_packet_sg
, subs_itm
,
124 static struct gm_packet_sg
*gm_packet_sg_find(struct gm_sg
*sg
,
125 enum gm_sub_sense sense
,
126 struct gm_subscriber
*sub
)
129 struct gm_packet_state hdr
;
130 struct gm_packet_sg item
;
132 /* clang-format off */
139 /* clang-format on */
142 return gm_packet_sg_subs_find(&sg
->subs
[sense
], &ref
.item
);
146 * interface -> (*,G),pending
149 static int gm_grp_pending_cmp(const struct gm_grp_pending
*a
,
150 const struct gm_grp_pending
*b
)
152 return IPV6_ADDR_CMP(&a
->grp
, &b
->grp
);
155 DECLARE_RBTREE_UNIQ(gm_grp_pends
, struct gm_grp_pending
, itm
,
159 * interface -> ([S1,S2,...],G),pending
162 static int gm_gsq_pending_cmp(const struct gm_gsq_pending
*a
,
163 const struct gm_gsq_pending
*b
)
165 if (a
->s_bit
!= b
->s_bit
)
166 return numcmp(a
->s_bit
, b
->s_bit
);
168 return IPV6_ADDR_CMP(&a
->grp
, &b
->grp
);
171 static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending
*a
)
173 uint32_t seed
= a
->s_bit
? 0x68f0eb5e : 0x156b7f19;
175 return jhash(&a
->grp
, sizeof(a
->grp
), seed
);
178 DECLARE_HASH(gm_gsq_pends
, struct gm_gsq_pending
, itm
, gm_gsq_pending_cmp
,
179 gm_gsq_pending_hash
);
185 int gm_sg_cmp(const struct gm_sg
*a
, const struct gm_sg
*b
)
187 return pim_sgaddr_cmp(a
->sgaddr
, b
->sgaddr
);
190 static struct gm_sg
*gm_sg_find(struct gm_if
*gm_ifp
, pim_addr grp
,
193 struct gm_sg ref
= {};
195 ref
.sgaddr
.grp
= grp
;
196 ref
.sgaddr
.src
= src
;
197 return gm_sgs_find(gm_ifp
->sgs
, &ref
);
200 static struct gm_sg
*gm_sg_make(struct gm_if
*gm_ifp
, pim_addr grp
,
203 struct gm_sg
*ret
, *prev
;
205 ret
= XCALLOC(MTYPE_GM_SG
, sizeof(*ret
));
206 ret
->sgaddr
.grp
= grp
;
207 ret
->sgaddr
.src
= src
;
209 prev
= gm_sgs_add(gm_ifp
->sgs
, ret
);
212 XFREE(MTYPE_GM_SG
, ret
);
215 monotime(&ret
->created
);
216 gm_packet_sg_subs_init(ret
->subs_positive
);
217 gm_packet_sg_subs_init(ret
->subs_negative
);
223 * interface -> packets, sorted by expiry (because add_tail insert order)
226 DECLARE_DLIST(gm_packet_expires
, struct gm_packet_state
, exp_itm
);
229 * subscriber -> packets
232 DECLARE_DLIST(gm_packets
, struct gm_packet_state
, pkt_itm
);
235 * interface -> subscriber
238 static int gm_subscriber_cmp(const struct gm_subscriber
*a
,
239 const struct gm_subscriber
*b
)
241 return IPV6_ADDR_CMP(&a
->addr
, &b
->addr
);
244 static uint32_t gm_subscriber_hash(const struct gm_subscriber
*a
)
246 return jhash(&a
->addr
, sizeof(a
->addr
), 0xd0e94ad4);
249 DECLARE_HASH(gm_subscribers
, struct gm_subscriber
, itm
, gm_subscriber_cmp
,
252 static struct gm_subscriber
*gm_subscriber_findref(struct gm_if
*gm_ifp
,
255 struct gm_subscriber ref
= {}, *ret
;
258 ret
= gm_subscribers_find(gm_ifp
->subscribers
, &ref
);
264 static struct gm_subscriber
*gm_subscriber_get(struct gm_if
*gm_ifp
,
267 struct gm_subscriber ref
= {}, *ret
;
270 ret
= gm_subscribers_find(gm_ifp
->subscribers
, &ref
);
273 ret
= XCALLOC(MTYPE_GM_SUBSCRIBER
, sizeof(*ret
));
277 monotime(&ret
->created
);
278 gm_packets_init(ret
->packets
);
280 gm_subscribers_add(gm_ifp
->subscribers
, ret
);
285 static void gm_subscriber_drop(struct gm_subscriber
**subp
)
287 struct gm_subscriber
*sub
= *subp
;
288 struct gm_if
*gm_ifp
;
300 gm_subscribers_del(gm_ifp
->subscribers
, sub
);
301 XFREE(MTYPE_GM_SUBSCRIBER
, sub
);
304 /****************************************************************************/
306 /* bundle query timer values for combined v1/v2 handling */
307 struct gm_query_timers
{
309 unsigned int max_resp_ms
;
310 unsigned int qqic_ms
;
313 struct timeval expire_wait
;
316 static void gm_expiry_calc(struct gm_query_timers
*timers
)
318 unsigned int expire
=
319 (timers
->qrv
- 1) * timers
->qqic_ms
+ timers
->max_resp_ms
;
320 ldiv_t exp_div
= ldiv(expire
, 1000);
322 timers
->expire_wait
.tv_sec
= exp_div
.quot
;
323 timers
->expire_wait
.tv_usec
= exp_div
.rem
* 1000;
324 timeradd(&timers
->expire_wait
, &timers
->fuzz
, &timers
->expire_wait
);
327 static void gm_sg_free(struct gm_sg
*sg
)
329 /* t_sg_expiry is handled before this is reached */
330 EVENT_OFF(sg
->t_sg_query
);
331 gm_packet_sg_subs_fini(sg
->subs_negative
);
332 gm_packet_sg_subs_fini(sg
->subs_positive
);
333 XFREE(MTYPE_GM_SG
, sg
);
336 /* clang-format off */
337 static const char *const gm_states
[] = {
338 [GM_SG_NOINFO
] = "NOINFO",
339 [GM_SG_JOIN
] = "JOIN",
340 [GM_SG_JOIN_EXPIRING
] = "JOIN_EXPIRING",
341 [GM_SG_PRUNE
] = "PRUNE",
342 [GM_SG_NOPRUNE
] = "NOPRUNE",
343 [GM_SG_NOPRUNE_EXPIRING
] = "NOPRUNE_EXPIRING",
345 /* clang-format on */
347 CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
348 /* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
349 * joined. Whether we actually want/need to support this is a separate
350 * question - it is almost never used. In fact this is exactly what RFC5790
351 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
354 static void gm_sg_update(struct gm_sg
*sg
, bool has_expired
)
356 struct gm_if
*gm_ifp
= sg
->iface
;
357 enum gm_sg_state prev
, desired
;
359 struct gm_sg
*grp
= NULL
;
361 if (!pim_addr_is_any(sg
->sgaddr
.src
))
362 grp
= gm_sg_find(gm_ifp
, sg
->sgaddr
.grp
, PIMADDR_ANY
);
364 assert(sg
->state
!= GM_SG_PRUNE
);
366 if (gm_packet_sg_subs_count(sg
->subs_positive
)) {
367 desired
= GM_SG_JOIN
;
368 assert(!sg
->t_sg_expire
);
369 } else if ((sg
->state
== GM_SG_JOIN
||
370 sg
->state
== GM_SG_JOIN_EXPIRING
) &&
372 desired
= GM_SG_JOIN_EXPIRING
;
373 else if (!grp
|| !gm_packet_sg_subs_count(grp
->subs_positive
))
374 desired
= GM_SG_NOINFO
;
375 else if (gm_packet_sg_subs_count(grp
->subs_positive
) ==
376 gm_packet_sg_subs_count(sg
->subs_negative
)) {
377 if ((sg
->state
== GM_SG_NOPRUNE
||
378 sg
->state
== GM_SG_NOPRUNE_EXPIRING
) &&
380 desired
= GM_SG_NOPRUNE_EXPIRING
;
382 desired
= GM_SG_PRUNE
;
383 } else if (gm_packet_sg_subs_count(sg
->subs_negative
))
384 desired
= GM_SG_NOPRUNE
;
386 desired
= GM_SG_NOINFO
;
388 if (desired
!= sg
->state
&& !gm_ifp
->stopping
) {
389 if (PIM_DEBUG_GM_EVENTS
)
390 zlog_debug(log_sg(sg
, "%s => %s"), gm_states
[sg
->state
],
393 if (desired
== GM_SG_JOIN_EXPIRING
||
394 desired
== GM_SG_NOPRUNE_EXPIRING
) {
395 struct gm_query_timers timers
;
397 timers
.qrv
= gm_ifp
->cur_qrv
;
398 timers
.max_resp_ms
= gm_ifp
->cur_max_resp
;
399 timers
.qqic_ms
= gm_ifp
->cur_query_intv_trig
;
400 timers
.fuzz
= gm_ifp
->cfg_timing_fuzz
;
402 gm_expiry_calc(&timers
);
403 gm_sg_timer_start(gm_ifp
, sg
, timers
.expire_wait
);
405 EVENT_OFF(sg
->t_sg_query
);
406 sg
->n_query
= gm_ifp
->cur_lmqc
;
407 sg
->query_sbit
= false;
408 gm_trigger_specific(sg
);
414 if (in6_multicast_nofwd(&sg
->sgaddr
.grp
) || gm_ifp
->stopping
)
417 new_join
= gm_sg_state_want_join(desired
);
419 if (new_join
&& !sg
->tib_joined
) {
420 /* this will retry if join previously failed */
421 sg
->tib_joined
= tib_sg_gm_join(gm_ifp
->pim
, sg
->sgaddr
,
422 gm_ifp
->ifp
, &sg
->oil
);
425 "MLD join for %pSG%%%s not propagated into TIB",
426 &sg
->sgaddr
, gm_ifp
->ifp
->name
);
428 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg
->sgaddr
,
431 } else if (sg
->tib_joined
&& !new_join
) {
432 tib_sg_gm_prune(gm_ifp
->pim
, sg
->sgaddr
, gm_ifp
->ifp
, &sg
->oil
);
435 sg
->tib_joined
= false;
438 if (desired
== GM_SG_NOINFO
) {
439 /* multiple paths can lead to the last state going away;
440 * t_sg_expire can still be running if we're arriving from
444 EVENT_OFF(sg
->t_sg_expire
);
446 assertf((!sg
->t_sg_expire
&&
447 !gm_packet_sg_subs_count(sg
->subs_positive
) &&
448 !gm_packet_sg_subs_count(sg
->subs_negative
)),
449 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
450 &sg
->sgaddr
, gm_ifp
->ifp
->name
, has_expired
,
451 sg
->t_sg_expire
, gm_states
[prev
], gm_states
[desired
],
452 gm_packet_sg_subs_count(sg
->subs_positive
),
453 gm_packet_sg_subs_count(sg
->subs_negative
), grp
);
455 if (PIM_DEBUG_GM_TRACE
)
456 zlog_debug(log_sg(sg
, "dropping"));
458 gm_sgs_del(gm_ifp
->sgs
, sg
);
463 /****************************************************************************/
465 /* the following bunch of functions deals with transferring state from
466 * received packets into gm_packet_state. As a reminder, the querier is
467 * structured to keep all items received in one packet together, since they
468 * will share expiry timers and thus allows efficient handling.
471 static void gm_packet_free(struct gm_packet_state
*pkt
)
473 gm_packet_expires_del(pkt
->iface
->expires
, pkt
);
474 gm_packets_del(pkt
->subscriber
->packets
, pkt
);
475 gm_subscriber_drop(&pkt
->subscriber
);
476 XFREE(MTYPE_GM_STATE
, pkt
);
479 static struct gm_packet_sg
*gm_packet_sg_setup(struct gm_packet_state
*pkt
,
480 struct gm_sg
*sg
, bool is_excl
,
483 struct gm_packet_sg
*item
;
485 assert(pkt
->n_active
< pkt
->n_sg
);
487 item
= &pkt
->items
[pkt
->n_active
];
489 item
->is_excl
= is_excl
;
490 item
->is_src
= is_src
;
491 item
->offset
= pkt
->n_active
;
497 static bool gm_packet_sg_drop(struct gm_packet_sg
*item
)
499 struct gm_packet_state
*pkt
;
504 pkt
= gm_packet_sg2state(item
);
505 if (item
->sg
->most_recent
== item
)
506 item
->sg
->most_recent
= NULL
;
508 for (i
= 0; i
< item
->n_exclude
; i
++) {
509 struct gm_packet_sg
*excl_item
;
511 excl_item
= item
+ 1 + i
;
515 gm_packet_sg_subs_del(excl_item
->sg
->subs_negative
, excl_item
);
516 excl_item
->sg
= NULL
;
519 assert(pkt
->n_active
> 0);
522 if (item
->is_excl
&& item
->is_src
)
523 gm_packet_sg_subs_del(item
->sg
->subs_negative
, item
);
525 gm_packet_sg_subs_del(item
->sg
->subs_positive
, item
);
529 if (!pkt
->n_active
) {
536 static void gm_packet_drop(struct gm_packet_state
*pkt
, bool trace
)
538 for (size_t i
= 0; i
< pkt
->n_sg
; i
++) {
539 struct gm_sg
*sg
= pkt
->items
[i
].sg
;
545 if (trace
&& PIM_DEBUG_GM_TRACE
)
546 zlog_debug(log_sg(sg
, "general-dropping from %pPA"),
547 &pkt
->subscriber
->addr
);
548 deleted
= gm_packet_sg_drop(&pkt
->items
[i
]);
550 gm_sg_update(sg
, true);
556 static void gm_packet_sg_remove_sources(struct gm_if
*gm_ifp
,
557 struct gm_subscriber
*subscriber
,
558 pim_addr grp
, pim_addr
*srcs
,
559 size_t n_src
, enum gm_sub_sense sense
)
562 struct gm_packet_sg
*old_src
;
565 for (i
= 0; i
< n_src
; i
++) {
566 sg
= gm_sg_find(gm_ifp
, grp
, srcs
[i
]);
570 old_src
= gm_packet_sg_find(sg
, sense
, subscriber
);
574 gm_packet_sg_drop(old_src
);
575 gm_sg_update(sg
, false);
579 static void gm_sg_expiry_cancel(struct gm_sg
*sg
)
581 if (sg
->t_sg_expire
&& PIM_DEBUG_GM_TRACE
)
582 zlog_debug(log_sg(sg
, "alive, cancelling expiry timer"));
583 EVENT_OFF(sg
->t_sg_expire
);
584 sg
->query_sbit
= true;
587 /* first pass: process all changes resulting in removal of state:
588 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
589 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
590 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
591 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
592 * note *replacing* state is NOT considered *removing* state here
594 * everything else is thrown into pkt for creation of state in pass 2
596 static void gm_handle_v2_pass1(struct gm_packet_state
*pkt
,
597 struct mld_v2_rec_hdr
*rechdr
)
599 /* NB: pkt->subscriber can be NULL here if the subscriber was not
602 struct gm_subscriber
*subscriber
= pkt
->subscriber
;
604 struct gm_packet_sg
*old_grp
= NULL
;
605 struct gm_packet_sg
*item
;
606 size_t n_src
= ntohs(rechdr
->n_src
);
608 bool is_excl
= false;
610 grp
= gm_sg_find(pkt
->iface
, rechdr
->grp
, PIMADDR_ANY
);
611 if (grp
&& subscriber
)
612 old_grp
= gm_packet_sg_find(grp
, GM_SUB_POS
, subscriber
);
614 assert(old_grp
== NULL
|| old_grp
->is_excl
);
616 switch (rechdr
->type
) {
617 case MLD_RECTYPE_IS_EXCLUDE
:
618 case MLD_RECTYPE_CHANGE_TO_EXCLUDE
:
619 /* this always replaces or creates state */
622 grp
= gm_sg_make(pkt
->iface
, rechdr
->grp
, PIMADDR_ANY
);
624 item
= gm_packet_sg_setup(pkt
, grp
, is_excl
, false);
625 item
->n_exclude
= n_src
;
627 /* [EXCL_INCL_SG_NOTE] referenced below
629 * in theory, we should drop any S,G that the host may have
630 * previously added in INCLUDE mode. In practice, this is both
631 * incredibly rare and entirely irrelevant. It only makes any
632 * difference if an S,G that the host previously had on the
633 * INCLUDE list is now on the blocked list for EXCLUDE, which
634 * we can cover in processing the S,G list in pass2_excl().
636 * Other S,G from the host are simply left to expire
637 * "naturally" through general expiry.
641 case MLD_RECTYPE_IS_INCLUDE
:
642 case MLD_RECTYPE_CHANGE_TO_INCLUDE
:
644 /* INCLUDE has no *,G state, so old_grp here refers to
645 * previous EXCLUDE => delete it
647 gm_packet_sg_drop(old_grp
);
648 gm_sg_update(grp
, false);
649 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
653 case MLD_RECTYPE_ALLOW_NEW_SOURCES
:
655 /* remove S,Gs from EXCLUDE, and then we're done */
656 gm_packet_sg_remove_sources(pkt
->iface
, subscriber
,
657 rechdr
->grp
, rechdr
->srcs
,
661 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
662 * idential to IS_INCLUDE (because the list of sources in
663 * IS_INCLUDE is not exhaustive)
667 case MLD_RECTYPE_BLOCK_OLD_SOURCES
:
669 /* this is intentionally not implemented because it
670 * would be complicated as hell. we only take the list
671 * of blocked sources from full group state records
677 gm_packet_sg_remove_sources(pkt
->iface
, subscriber
,
678 rechdr
->grp
, rechdr
->srcs
,
683 for (j
= 0; j
< n_src
; j
++) {
686 sg
= gm_sg_find(pkt
->iface
, rechdr
->grp
, rechdr
->srcs
[j
]);
688 sg
= gm_sg_make(pkt
->iface
, rechdr
->grp
,
691 gm_packet_sg_setup(pkt
, sg
, is_excl
, true);
695 /* second pass: creating/updating/refreshing state. All the items from the
696 * received packet have already been thrown into gm_packet_state.
699 static void gm_handle_v2_pass2_incl(struct gm_packet_state
*pkt
, size_t i
)
701 struct gm_packet_sg
*item
= &pkt
->items
[i
];
702 struct gm_packet_sg
*old
= NULL
;
703 struct gm_sg
*sg
= item
->sg
;
705 /* EXCLUDE state was already dropped in pass1 */
706 assert(!gm_packet_sg_find(sg
, GM_SUB_NEG
, pkt
->subscriber
));
708 old
= gm_packet_sg_find(sg
, GM_SUB_POS
, pkt
->subscriber
);
710 gm_packet_sg_drop(old
);
713 gm_packet_sg_subs_add(sg
->subs_positive
, item
);
715 sg
->most_recent
= item
;
716 gm_sg_expiry_cancel(sg
);
717 gm_sg_update(sg
, false);
720 static void gm_handle_v2_pass2_excl(struct gm_packet_state
*pkt
, size_t offs
)
722 struct gm_packet_sg
*item
= &pkt
->items
[offs
];
723 struct gm_packet_sg
*old_grp
, *item_dup
;
724 struct gm_sg
*sg_grp
= item
->sg
;
727 old_grp
= gm_packet_sg_find(sg_grp
, GM_SUB_POS
, pkt
->subscriber
);
729 for (i
= 0; i
< item
->n_exclude
; i
++) {
730 struct gm_packet_sg
*item_src
, *old_src
;
732 item_src
= &pkt
->items
[offs
+ 1 + i
];
733 old_src
= gm_packet_sg_find(item_src
->sg
, GM_SUB_NEG
,
736 gm_packet_sg_drop(old_src
);
738 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
739 * items left over if the host previously had INCLUDE
740 * mode going. Remove them here if we find any.
742 old_src
= gm_packet_sg_find(item_src
->sg
, GM_SUB_POS
,
745 gm_packet_sg_drop(old_src
);
748 /* the previous loop has removed the S,G entries which are
749 * still excluded after this update. So anything left on the
750 * old item was previously excluded but is now included
751 * => need to trigger update on S,G
753 for (i
= 0; i
< old_grp
->n_exclude
; i
++) {
754 struct gm_packet_sg
*old_src
;
755 struct gm_sg
*old_sg_src
;
757 old_src
= old_grp
+ 1 + i
;
758 old_sg_src
= old_src
->sg
;
762 gm_packet_sg_drop(old_src
);
763 gm_sg_update(old_sg_src
, false);
766 gm_packet_sg_drop(old_grp
);
769 item_dup
= gm_packet_sg_subs_add(sg_grp
->subs_positive
, item
);
773 sg_grp
->most_recent
= item
;
774 gm_sg_expiry_cancel(sg_grp
);
776 for (i
= 0; i
< item
->n_exclude
; i
++) {
777 struct gm_packet_sg
*item_src
;
779 item_src
= &pkt
->items
[offs
+ 1 + i
];
780 item_dup
= gm_packet_sg_subs_add(item_src
->sg
->subs_negative
,
787 gm_sg_update(item_src
->sg
, false);
791 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
792 * to get lower PIM churn/flapping
794 gm_sg_update(sg_grp
, false);
797 CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
798 /* on receiving a query, we need to update our robustness/query interval to
799 * match, so we correctly process group/source specific queries after last
803 static void gm_handle_v2_report(struct gm_if
*gm_ifp
,
804 const struct sockaddr_in6
*pkt_src
, char *data
,
807 struct mld_v2_report_hdr
*hdr
;
808 size_t i
, n_records
, max_entries
;
809 struct gm_packet_state
*pkt
;
811 if (len
< sizeof(*hdr
)) {
812 if (PIM_DEBUG_GM_PACKETS
)
813 zlog_debug(log_pkt_src(
814 "malformed MLDv2 report (truncated header)"));
815 gm_ifp
->stats
.rx_drop_malformed
++;
819 /* errors after this may at least partially process the packet */
820 gm_ifp
->stats
.rx_new_report
++;
822 hdr
= (struct mld_v2_report_hdr
*)data
;
823 data
+= sizeof(*hdr
);
826 /* can't have more *,G and S,G items than there is space for ipv6
827 * addresses, so just use this to allocate temporary buffer
829 max_entries
= len
/ sizeof(pim_addr
);
830 pkt
= XCALLOC(MTYPE_GM_STATE
,
831 offsetof(struct gm_packet_state
, items
[max_entries
]));
832 pkt
->n_sg
= max_entries
;
834 pkt
->subscriber
= gm_subscriber_findref(gm_ifp
, pkt_src
->sin6_addr
);
836 n_records
= ntohs(hdr
->n_records
);
838 /* validate & remove state in v2_pass1() */
839 for (i
= 0; i
< n_records
; i
++) {
840 struct mld_v2_rec_hdr
*rechdr
;
841 size_t n_src
, record_size
;
843 if (len
< sizeof(*rechdr
)) {
844 zlog_warn(log_pkt_src(
845 "malformed MLDv2 report (truncated record header)"));
846 gm_ifp
->stats
.rx_trunc_report
++;
850 rechdr
= (struct mld_v2_rec_hdr
*)data
;
851 data
+= sizeof(*rechdr
);
852 len
-= sizeof(*rechdr
);
854 n_src
= ntohs(rechdr
->n_src
);
855 record_size
= n_src
* sizeof(pim_addr
) + rechdr
->aux_len
* 4;
857 if (len
< record_size
) {
858 zlog_warn(log_pkt_src(
859 "malformed MLDv2 report (truncated source list)"));
860 gm_ifp
->stats
.rx_trunc_report
++;
863 if (!IN6_IS_ADDR_MULTICAST(&rechdr
->grp
)) {
866 "malformed MLDv2 report (invalid group %pI6)"),
868 gm_ifp
->stats
.rx_trunc_report
++;
875 gm_handle_v2_pass1(pkt
, rechdr
);
878 if (!pkt
->n_active
) {
879 gm_subscriber_drop(&pkt
->subscriber
);
880 XFREE(MTYPE_GM_STATE
, pkt
);
884 pkt
= XREALLOC(MTYPE_GM_STATE
, pkt
,
885 offsetof(struct gm_packet_state
, items
[pkt
->n_active
]));
886 pkt
->n_sg
= pkt
->n_active
;
889 monotime(&pkt
->received
);
890 if (!pkt
->subscriber
)
891 pkt
->subscriber
= gm_subscriber_get(gm_ifp
, pkt_src
->sin6_addr
);
892 gm_packets_add_tail(pkt
->subscriber
->packets
, pkt
);
893 gm_packet_expires_add_tail(gm_ifp
->expires
, pkt
);
895 for (i
= 0; i
< pkt
->n_sg
; i
++)
896 if (!pkt
->items
[i
].is_excl
)
897 gm_handle_v2_pass2_incl(pkt
, i
);
899 gm_handle_v2_pass2_excl(pkt
, i
);
900 i
+= pkt
->items
[i
].n_exclude
;
903 if (pkt
->n_active
== 0)
907 static void gm_handle_v1_report(struct gm_if
*gm_ifp
,
908 const struct sockaddr_in6
*pkt_src
, char *data
,
911 struct mld_v1_pkt
*hdr
;
912 struct gm_packet_state
*pkt
;
914 struct gm_packet_sg
*item
;
917 if (len
< sizeof(*hdr
)) {
918 if (PIM_DEBUG_GM_PACKETS
)
919 zlog_debug(log_pkt_src(
920 "malformed MLDv1 report (truncated)"));
921 gm_ifp
->stats
.rx_drop_malformed
++;
925 gm_ifp
->stats
.rx_old_report
++;
927 hdr
= (struct mld_v1_pkt
*)data
;
930 pkt
= XCALLOC(MTYPE_GM_STATE
,
931 offsetof(struct gm_packet_state
, items
[max_entries
]));
932 pkt
->n_sg
= max_entries
;
934 pkt
->subscriber
= gm_subscriber_findref(gm_ifp
, gm_dummy_untracked
);
936 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
938 grp
= gm_sg_find(pkt
->iface
, hdr
->grp
, PIMADDR_ANY
);
940 grp
= gm_sg_make(pkt
->iface
, hdr
->grp
, PIMADDR_ANY
);
942 item
= gm_packet_sg_setup(pkt
, grp
, true, false);
944 CPP_NOTICE("set v1-seen timer on grp here");
948 /* pass2 will count n_active back up to 1. Also since a v1 report
949 * has exactly 1 group, we can skip the realloc() that v2 needs here.
951 assert(pkt
->n_active
== 1);
952 pkt
->n_sg
= pkt
->n_active
;
955 monotime(&pkt
->received
);
956 if (!pkt
->subscriber
)
957 pkt
->subscriber
= gm_subscriber_get(gm_ifp
, gm_dummy_untracked
);
958 gm_packets_add_tail(pkt
->subscriber
->packets
, pkt
);
959 gm_packet_expires_add_tail(gm_ifp
->expires
, pkt
);
961 /* pass2 covers installing state & removing old state; all the v1
962 * compat is handled at this point.
964 * Note that "old state" may be v2; subscribers will switch from v2
965 * reports to v1 reports when the querier changes from v2 to v1. So,
966 * limiting this to v1 would be wrong.
968 gm_handle_v2_pass2_excl(pkt
, 0);
970 if (pkt
->n_active
== 0)
974 static void gm_handle_v1_leave(struct gm_if
*gm_ifp
,
975 const struct sockaddr_in6
*pkt_src
, char *data
,
978 struct mld_v1_pkt
*hdr
;
979 struct gm_subscriber
*subscriber
;
981 struct gm_packet_sg
*old_grp
;
983 if (len
< sizeof(*hdr
)) {
984 if (PIM_DEBUG_GM_PACKETS
)
985 zlog_debug(log_pkt_src(
986 "malformed MLDv1 leave (truncated)"));
987 gm_ifp
->stats
.rx_drop_malformed
++;
991 gm_ifp
->stats
.rx_old_leave
++;
993 hdr
= (struct mld_v1_pkt
*)data
;
995 subscriber
= gm_subscriber_findref(gm_ifp
, gm_dummy_untracked
);
999 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1001 grp
= gm_sg_find(gm_ifp
, hdr
->grp
, PIMADDR_ANY
);
1003 old_grp
= gm_packet_sg_find(grp
, GM_SUB_POS
, subscriber
);
1005 gm_packet_sg_drop(old_grp
);
1006 gm_sg_update(grp
, false);
1007 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1013 /* nothing more to do here, pass2 is no-op for leaves */
1014 gm_subscriber_drop(&subscriber
);
1017 /* for each general query received (or sent), a timer is started to expire
1018 * _everything_ at the appropriate time (including robustness multiplier).
1020 * So when this timer hits, all packets - with all of their items - that were
1021 * received *before* the query are aged out, and state updated accordingly.
1022 * Note that when we receive a refresh/update, the previous/old packet is
1023 * already dropped and replaced with a new one, so in normal steady-state
1024 * operation, this timer won't be doing anything.
1026 * Additionally, if a subscriber actively leaves a group, that goes through
1027 * its own path too and won't hit this. This is really only triggered when a
1028 * host straight up disappears.
1030 static void gm_t_expire(struct event
*t
)
1032 struct gm_if
*gm_ifp
= EVENT_ARG(t
);
1033 struct gm_packet_state
*pkt
;
1035 zlog_info(log_ifp("general expiry timer"));
1037 while (gm_ifp
->n_pending
) {
1038 struct gm_general_pending
*pend
= gm_ifp
->pending
;
1039 struct timeval remain
;
1042 remain_ms
= monotime_until(&pend
->expiry
, &remain
);
1043 if (remain_ms
> 0) {
1044 if (PIM_DEBUG_GM_EVENTS
)
1046 log_ifp("next general expiry in %" PRId64
"ms"),
1049 event_add_timer_tv(router
->master
, gm_t_expire
, gm_ifp
,
1050 &remain
, &gm_ifp
->t_expire
);
1054 while ((pkt
= gm_packet_expires_first(gm_ifp
->expires
))) {
1055 if (timercmp(&pkt
->received
, &pend
->query
, >=))
1058 if (PIM_DEBUG_GM_PACKETS
)
1059 zlog_debug(log_ifp("expire packet %p"), pkt
);
1060 gm_packet_drop(pkt
, true);
1063 gm_ifp
->n_pending
--;
1064 memmove(gm_ifp
->pending
, gm_ifp
->pending
+ 1,
1065 gm_ifp
->n_pending
* sizeof(gm_ifp
->pending
[0]));
1068 if (PIM_DEBUG_GM_EVENTS
)
1069 zlog_debug(log_ifp("next general expiry waiting for query"));
1072 /* NB: the receive handlers will also run when sending packets, since we
1073 * receive our own packets back in.
1075 static void gm_handle_q_general(struct gm_if
*gm_ifp
,
1076 struct gm_query_timers
*timers
)
1078 struct timeval now
, expiry
;
1079 struct gm_general_pending
*pend
;
1082 timeradd(&now
, &timers
->expire_wait
, &expiry
);
1084 while (gm_ifp
->n_pending
) {
1085 pend
= &gm_ifp
->pending
[gm_ifp
->n_pending
- 1];
1087 if (timercmp(&pend
->expiry
, &expiry
, <))
1090 /* if we end up here, the last item in pending[] has an expiry
1091 * later than the expiry for this query. But our query time
1092 * (now) is later than that of the item (because, well, that's
1093 * how time works.) This makes this query meaningless since
1094 * it's "supersetted" within the preexisting query
1097 if (PIM_DEBUG_GM_TRACE_DETAIL
)
1099 log_ifp("zapping supersetted general timer %pTVMu"),
1102 gm_ifp
->n_pending
--;
1103 if (!gm_ifp
->n_pending
)
1104 EVENT_OFF(gm_ifp
->t_expire
);
1107 /* people might be messing with their configs or something */
1108 if (gm_ifp
->n_pending
== array_size(gm_ifp
->pending
))
1111 pend
= &gm_ifp
->pending
[gm_ifp
->n_pending
];
1113 pend
->expiry
= expiry
;
1115 if (!gm_ifp
->n_pending
++) {
1116 if (PIM_DEBUG_GM_TRACE
)
1118 log_ifp("starting general timer @ 0: %pTVMu"),
1120 event_add_timer_tv(router
->master
, gm_t_expire
, gm_ifp
,
1121 &timers
->expire_wait
, &gm_ifp
->t_expire
);
1122 } else if (PIM_DEBUG_GM_TRACE
)
1123 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1124 gm_ifp
->n_pending
, &pend
->expiry
);
1127 static void gm_t_sg_expire(struct event
*t
)
1129 struct gm_sg
*sg
= EVENT_ARG(t
);
1130 struct gm_if
*gm_ifp
= sg
->iface
;
1131 struct gm_packet_sg
*item
;
1133 assertf(sg
->state
== GM_SG_JOIN_EXPIRING
||
1134 sg
->state
== GM_SG_NOPRUNE_EXPIRING
,
1135 "%pSG%%%s %pTHD", &sg
->sgaddr
, gm_ifp
->ifp
->name
, t
);
1137 frr_each_safe (gm_packet_sg_subs
, sg
->subs_positive
, item
)
1138 /* this will also drop EXCLUDE mode S,G lists together with
1141 gm_packet_sg_drop(item
);
1143 /* subs_negative items are only timed out together with the *,G entry
1144 * since we won't get any reports for a group-and-source query
1146 gm_sg_update(sg
, true);
1149 static bool gm_sg_check_recent(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
1152 struct gm_packet_state
*pkt
;
1154 if (!sg
->most_recent
) {
1155 struct gm_packet_state
*best_pkt
= NULL
;
1156 struct gm_packet_sg
*item
;
1158 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
1159 pkt
= gm_packet_sg2state(item
);
1162 timercmp(&pkt
->received
, &best_pkt
->received
, >)) {
1164 sg
->most_recent
= item
;
1168 if (sg
->most_recent
) {
1169 struct timeval fuzz
;
1171 pkt
= gm_packet_sg2state(sg
->most_recent
);
1173 /* this shouldn't happen on plain old real ethernet segment,
1174 * but on something like a VXLAN or VPLS it is very possible
1175 * that we get a report before the query that triggered it.
1176 * (imagine a triangle scenario with 3 datacenters, it's very
1177 * possible A->B + B->C is faster than A->C due to odd routing)
1179 * This makes a little tolerance allowance to handle that case.
1181 timeradd(&pkt
->received
, &gm_ifp
->cfg_timing_fuzz
, &fuzz
);
1183 if (timercmp(&fuzz
, &ref
, >))
1189 static void gm_sg_timer_start(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
1190 struct timeval expire_wait
)
1196 if (sg
->state
== GM_SG_PRUNE
)
1200 if (gm_sg_check_recent(gm_ifp
, sg
, now
))
1203 if (PIM_DEBUG_GM_TRACE
)
1204 zlog_debug(log_sg(sg
, "expiring in %pTVI"), &expire_wait
);
1206 if (sg
->t_sg_expire
) {
1207 struct timeval remain
;
1209 remain
= event_timer_remain(sg
->t_sg_expire
);
1210 if (timercmp(&remain
, &expire_wait
, <=))
1213 EVENT_OFF(sg
->t_sg_expire
);
1216 event_add_timer_tv(router
->master
, gm_t_sg_expire
, sg
, &expire_wait
,
1220 static void gm_handle_q_groupsrc(struct gm_if
*gm_ifp
,
1221 struct gm_query_timers
*timers
, pim_addr grp
,
1222 const pim_addr
*srcs
, size_t n_src
)
1227 for (i
= 0; i
< n_src
; i
++) {
1228 sg
= gm_sg_find(gm_ifp
, grp
, srcs
[i
]);
1229 gm_sg_timer_start(gm_ifp
, sg
, timers
->expire_wait
);
1233 static void gm_t_grp_expire(struct event
*t
)
1235 /* if we're here, that means when we received the group-specific query
1236 * there was one or more active S,G for this group. For *,G the timer
1237 * in sg->t_sg_expire is running separately and gets cancelled when we
1238 * receive a report, so that work is left to gm_t_sg_expire and we
1239 * shouldn't worry about it here.
1241 struct gm_grp_pending
*pend
= EVENT_ARG(t
);
1242 struct gm_if
*gm_ifp
= pend
->iface
;
1243 struct gm_sg
*sg
, *sg_start
, sg_ref
= {};
1245 if (PIM_DEBUG_GM_EVENTS
)
1246 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend
->grp
);
1248 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1249 * could technically be gt to skip a possible *,G
1251 sg_ref
.sgaddr
.grp
= pend
->grp
;
1252 sg_ref
.sgaddr
.src
= PIMADDR_ANY
;
1253 sg_start
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
1255 frr_each_from (gm_sgs
, gm_ifp
->sgs
, sg
, sg_start
) {
1256 struct gm_packet_sg
*item
;
1258 if (pim_addr_cmp(sg
->sgaddr
.grp
, pend
->grp
))
1260 if (pim_addr_is_any(sg
->sgaddr
.src
))
1261 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1263 if (gm_sg_check_recent(gm_ifp
, sg
, pend
->query
))
1266 /* we may also have a group-source-specific query going on in
1267 * parallel. But if we received nothing for the *,G query,
1268 * the S,G query is kinda irrelevant.
1270 EVENT_OFF(sg
->t_sg_expire
);
1272 frr_each_safe (gm_packet_sg_subs
, sg
->subs_positive
, item
)
1273 /* this will also drop the EXCLUDE S,G lists */
1274 gm_packet_sg_drop(item
);
1276 gm_sg_update(sg
, true);
1279 gm_grp_pends_del(gm_ifp
->grp_pends
, pend
);
1280 XFREE(MTYPE_GM_GRP_PENDING
, pend
);
1283 static void gm_handle_q_group(struct gm_if
*gm_ifp
,
1284 struct gm_query_timers
*timers
, pim_addr grp
)
1286 struct gm_sg
*sg
, sg_ref
= {};
1287 struct gm_grp_pending
*pend
, pend_ref
= {};
1289 sg_ref
.sgaddr
.grp
= grp
;
1290 sg_ref
.sgaddr
.src
= PIMADDR_ANY
;
1291 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1292 sg
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
1294 if (!sg
|| pim_addr_cmp(sg
->sgaddr
.grp
, grp
))
1295 /* we have nothing at all for this group - don't waste RAM */
1298 if (pim_addr_is_any(sg
->sgaddr
.src
)) {
1299 /* actually found *,G entry here */
1300 if (PIM_DEBUG_GM_TRACE
)
1301 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1303 gm_sg_timer_start(gm_ifp
, sg
, timers
->expire_wait
);
1305 sg
= gm_sgs_next(gm_ifp
->sgs
, sg
);
1306 if (!sg
|| pim_addr_cmp(sg
->sgaddr
.grp
, grp
))
1307 /* no S,G for this group */
1312 pend
= gm_grp_pends_find(gm_ifp
->grp_pends
, &pend_ref
);
1315 struct timeval remain
;
1317 remain
= event_timer_remain(pend
->t_expire
);
1318 if (timercmp(&remain
, &timers
->expire_wait
, <=))
1321 EVENT_OFF(pend
->t_expire
);
1323 pend
= XCALLOC(MTYPE_GM_GRP_PENDING
, sizeof(*pend
));
1325 pend
->iface
= gm_ifp
;
1326 gm_grp_pends_add(gm_ifp
->grp_pends
, pend
);
1329 monotime(&pend
->query
);
1330 event_add_timer_tv(router
->master
, gm_t_grp_expire
, pend
,
1331 &timers
->expire_wait
, &pend
->t_expire
);
1333 if (PIM_DEBUG_GM_TRACE
)
1334 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp
,
1338 static void gm_bump_querier(struct gm_if
*gm_ifp
)
1340 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1342 EVENT_OFF(gm_ifp
->t_query
);
1344 if (pim_addr_is_any(pim_ifp
->ll_lowest
))
1346 if (!IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
))
1349 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
1351 event_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
1354 static void gm_t_other_querier(struct event
*t
)
1356 struct gm_if
*gm_ifp
= EVENT_ARG(t
);
1357 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1359 zlog_info(log_ifp("other querier timer expired"));
1361 gm_ifp
->querier
= pim_ifp
->ll_lowest
;
1362 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
1364 event_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
1367 static void gm_handle_query(struct gm_if
*gm_ifp
,
1368 const struct sockaddr_in6
*pkt_src
,
1369 pim_addr
*pkt_dst
, char *data
, size_t len
)
1371 struct mld_v2_query_hdr
*hdr
;
1372 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1373 struct gm_query_timers timers
;
1376 if (len
< sizeof(struct mld_v2_query_hdr
) &&
1377 len
!= sizeof(struct mld_v1_pkt
)) {
1378 zlog_warn(log_pkt_src("invalid query size"));
1379 gm_ifp
->stats
.rx_drop_malformed
++;
1383 hdr
= (struct mld_v2_query_hdr
*)data
;
1384 general_query
= pim_addr_is_any(hdr
->grp
);
1386 if (!general_query
&& !IN6_IS_ADDR_MULTICAST(&hdr
->grp
)) {
1387 zlog_warn(log_pkt_src(
1388 "malformed MLDv2 query (invalid group %pI6)"),
1390 gm_ifp
->stats
.rx_drop_malformed
++;
1394 if (len
>= sizeof(struct mld_v2_query_hdr
)) {
1395 size_t src_space
= ntohs(hdr
->n_src
) * sizeof(pim_addr
);
1397 if (len
< sizeof(struct mld_v2_query_hdr
) + src_space
) {
1398 zlog_warn(log_pkt_src(
1399 "malformed MLDv2 query (truncated source list)"));
1400 gm_ifp
->stats
.rx_drop_malformed
++;
1404 if (general_query
&& src_space
) {
1405 zlog_warn(log_pkt_src(
1406 "malformed MLDv2 query (general query with non-empty source list)"));
1407 gm_ifp
->stats
.rx_drop_malformed
++;
1412 /* accepting queries unicast to us (or addressed to a wrong group)
1413 * can mess up querier election as well as cause us to terminate
1414 * traffic (since after a unicast query no reports will be coming in)
1416 if (!IPV6_ADDR_SAME(pkt_dst
, &gm_all_hosts
)) {
1417 if (pim_addr_is_any(hdr
->grp
)) {
1420 "wrong destination %pPA for general query"),
1422 gm_ifp
->stats
.rx_drop_dstaddr
++;
1426 if (!IPV6_ADDR_SAME(&hdr
->grp
, pkt_dst
)) {
1427 gm_ifp
->stats
.rx_drop_dstaddr
++;
1430 "wrong destination %pPA for group specific query"),
1436 if (IPV6_ADDR_CMP(&pkt_src
->sin6_addr
, &gm_ifp
->querier
) < 0) {
1437 if (PIM_DEBUG_GM_EVENTS
)
1439 log_pkt_src("replacing elected querier %pPA"),
1442 gm_ifp
->querier
= pkt_src
->sin6_addr
;
1445 if (len
== sizeof(struct mld_v1_pkt
)) {
1446 timers
.qrv
= gm_ifp
->cur_qrv
;
1447 timers
.max_resp_ms
= hdr
->max_resp_code
;
1448 timers
.qqic_ms
= gm_ifp
->cur_query_intv
;
1450 timers
.qrv
= (hdr
->flags
& 0x7) ?: 8;
1451 timers
.max_resp_ms
= mld_max_resp_decode(hdr
->max_resp_code
);
1452 timers
.qqic_ms
= igmp_msg_decode8to16(hdr
->qqic
) * 1000;
1454 timers
.fuzz
= gm_ifp
->cfg_timing_fuzz
;
1456 gm_expiry_calc(&timers
);
1458 if (PIM_DEBUG_GM_TRACE_DETAIL
)
1460 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1461 timers
.qrv
, timers
.max_resp_ms
, timers
.qqic_ms
,
1462 &timers
.expire_wait
);
1464 if (IPV6_ADDR_CMP(&pkt_src
->sin6_addr
, &pim_ifp
->ll_lowest
) < 0) {
1465 unsigned int other_ms
;
1467 EVENT_OFF(gm_ifp
->t_query
);
1468 EVENT_OFF(gm_ifp
->t_other_querier
);
1470 other_ms
= timers
.qrv
* timers
.qqic_ms
+ timers
.max_resp_ms
/ 2;
1471 event_add_timer_msec(router
->master
, gm_t_other_querier
, gm_ifp
,
1472 other_ms
, &gm_ifp
->t_other_querier
);
1475 if (len
== sizeof(struct mld_v1_pkt
)) {
1476 if (general_query
) {
1477 gm_handle_q_general(gm_ifp
, &timers
);
1478 gm_ifp
->stats
.rx_query_old_general
++;
1480 gm_handle_q_group(gm_ifp
, &timers
, hdr
->grp
);
1481 gm_ifp
->stats
.rx_query_old_group
++;
1486 /* v2 query - [S]uppress bit */
1487 if (hdr
->flags
& 0x8) {
1488 gm_ifp
->stats
.rx_query_new_sbit
++;
1492 if (general_query
) {
1493 gm_handle_q_general(gm_ifp
, &timers
);
1494 gm_ifp
->stats
.rx_query_new_general
++;
1495 } else if (!ntohs(hdr
->n_src
)) {
1496 gm_handle_q_group(gm_ifp
, &timers
, hdr
->grp
);
1497 gm_ifp
->stats
.rx_query_new_group
++;
1499 gm_handle_q_groupsrc(gm_ifp
, &timers
, hdr
->grp
, hdr
->srcs
,
1501 gm_ifp
->stats
.rx_query_new_groupsrc
++;
1505 static void gm_rx_process(struct gm_if
*gm_ifp
,
1506 const struct sockaddr_in6
*pkt_src
, pim_addr
*pkt_dst
,
1507 void *data
, size_t pktlen
)
1509 struct icmp6_plain_hdr
*icmp6
= data
;
1510 uint16_t pkt_csum
, ref_csum
;
1511 struct ipv6_ph ph6
= {
1512 .src
= pkt_src
->sin6_addr
,
1514 .ulpl
= htons(pktlen
),
1515 .next_hdr
= IPPROTO_ICMPV6
,
1518 pkt_csum
= icmp6
->icmp6_cksum
;
1519 icmp6
->icmp6_cksum
= 0;
1520 ref_csum
= in_cksum_with_ph6(&ph6
, data
, pktlen
);
1522 if (pkt_csum
!= ref_csum
) {
1525 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1526 pkt_dst
, pkt_csum
, ref_csum
);
1527 gm_ifp
->stats
.rx_drop_csum
++;
1532 pktlen
-= sizeof(*icmp6
);
1534 switch (icmp6
->icmp6_type
) {
1535 case ICMP6_MLD_QUERY
:
1536 gm_handle_query(gm_ifp
, pkt_src
, pkt_dst
, data
, pktlen
);
1538 case ICMP6_MLD_V1_REPORT
:
1539 gm_handle_v1_report(gm_ifp
, pkt_src
, data
, pktlen
);
1541 case ICMP6_MLD_V1_DONE
:
1542 gm_handle_v1_leave(gm_ifp
, pkt_src
, data
, pktlen
);
1544 case ICMP6_MLD_V2_REPORT
:
1545 gm_handle_v2_report(gm_ifp
, pkt_src
, data
, pktlen
);
1550 static bool ip6_check_hopopts_ra(uint8_t *hopopts
, size_t hopopt_len
,
1551 uint16_t alert_type
)
1553 uint8_t *hopopt_end
;
1557 if (hopopt_len
< (hopopts
[1] + 1U) * 8U)
1560 hopopt_end
= hopopts
+ (hopopts
[1] + 1) * 8;
1563 while (hopopts
< hopopt_end
) {
1564 if (hopopts
[0] == IP6OPT_PAD1
) {
1569 if (hopopts
> hopopt_end
- 2)
1571 if (hopopts
> hopopt_end
- 2 - hopopts
[1])
1574 if (hopopts
[0] == IP6OPT_ROUTER_ALERT
&& hopopts
[1] == 2) {
1575 uint16_t have_type
= (hopopts
[2] << 8) | hopopts
[3];
1577 if (have_type
== alert_type
)
1581 hopopts
+= 2 + hopopts
[1];
1586 static void gm_t_recv(struct event
*t
)
1588 struct pim_instance
*pim
= EVENT_ARG(t
);
1590 char buf
[CMSG_SPACE(sizeof(struct in6_pktinfo
)) +
1591 CMSG_SPACE(256) /* hop options */ +
1592 CMSG_SPACE(sizeof(int)) /* hopcount */];
1593 struct cmsghdr align
;
1595 struct cmsghdr
*cmsg
;
1596 struct in6_pktinfo
*pktinfo
= NULL
;
1597 uint8_t *hopopts
= NULL
;
1598 size_t hopopt_len
= 0;
1599 int *hoplimit
= NULL
;
1601 struct msghdr mh
[1] = {};
1602 struct iovec iov
[1];
1603 struct sockaddr_in6 pkt_src
[1] = {};
1607 event_add_read(router
->master
, gm_t_recv
, pim
, pim
->gm_socket
,
1610 iov
->iov_base
= rxbuf
;
1611 iov
->iov_len
= sizeof(rxbuf
);
1613 mh
->msg_name
= pkt_src
;
1614 mh
->msg_namelen
= sizeof(pkt_src
);
1615 mh
->msg_control
= cmsgbuf
.buf
;
1616 mh
->msg_controllen
= sizeof(cmsgbuf
.buf
);
1618 mh
->msg_iovlen
= array_size(iov
);
1621 nread
= recvmsg(pim
->gm_socket
, mh
, MSG_PEEK
| MSG_TRUNC
);
1623 zlog_err("(VRF %s) RX error: %m", pim
->vrf
->name
);
1624 pim
->gm_rx_drop_sys
++;
1628 if ((size_t)nread
> sizeof(rxbuf
)) {
1629 iov
->iov_base
= XMALLOC(MTYPE_GM_PACKET
, nread
);
1630 iov
->iov_len
= nread
;
1632 nread
= recvmsg(pim
->gm_socket
, mh
, 0);
1634 zlog_err("(VRF %s) RX error: %m", pim
->vrf
->name
);
1635 pim
->gm_rx_drop_sys
++;
1639 struct interface
*ifp
;
1641 ifp
= if_lookup_by_index(pkt_src
->sin6_scope_id
, pim
->vrf
->vrf_id
);
1642 if (!ifp
|| !ifp
->info
)
1645 struct pim_interface
*pim_ifp
= ifp
->info
;
1646 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
1651 for (cmsg
= CMSG_FIRSTHDR(mh
); cmsg
; cmsg
= CMSG_NXTHDR(mh
, cmsg
)) {
1652 if (cmsg
->cmsg_level
!= SOL_IPV6
)
1655 switch (cmsg
->cmsg_type
) {
1657 pktinfo
= (struct in6_pktinfo
*)CMSG_DATA(cmsg
);
1660 hopopts
= CMSG_DATA(cmsg
);
1661 hopopt_len
= cmsg
->cmsg_len
- sizeof(*cmsg
);
1664 hoplimit
= (int *)CMSG_DATA(cmsg
);
1669 if (!pktinfo
|| !hoplimit
) {
1671 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
1672 pim
->gm_rx_drop_sys
++;
1676 if (*hoplimit
!= 1) {
1677 zlog_err(log_pkt_src("packet with hop limit != 1"));
1678 /* spoofing attempt => count on srcaddr counter */
1679 gm_ifp
->stats
.rx_drop_srcaddr
++;
1683 if (!ip6_check_hopopts_ra(hopopts
, hopopt_len
, IP6_ALERT_MLD
)) {
1684 zlog_err(log_pkt_src(
1685 "packet without IPv6 Router Alert MLD option"));
1686 gm_ifp
->stats
.rx_drop_ra
++;
1690 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src
->sin6_addr
))
1691 /* reports from :: happen in normal operation for DAD, so
1692 * don't spam log messages about this
1696 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src
->sin6_addr
)) {
1697 zlog_warn(log_pkt_src("packet from invalid source address"));
1698 gm_ifp
->stats
.rx_drop_srcaddr
++;
1703 if (pktlen
< sizeof(struct icmp6_plain_hdr
)) {
1704 zlog_warn(log_pkt_src("truncated packet"));
1705 gm_ifp
->stats
.rx_drop_malformed
++;
1709 gm_rx_process(gm_ifp
, pkt_src
, &pktinfo
->ipi6_addr
, iov
->iov_base
,
1713 if (iov
->iov_base
!= rxbuf
)
1714 XFREE(MTYPE_GM_PACKET
, iov
->iov_base
);
1717 static void gm_send_query(struct gm_if
*gm_ifp
, pim_addr grp
,
1718 const pim_addr
*srcs
, size_t n_srcs
, bool s_bit
)
1720 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1721 struct sockaddr_in6 dstaddr
= {
1722 .sin6_family
= AF_INET6
,
1723 .sin6_scope_id
= gm_ifp
->ifp
->ifindex
,
1726 struct icmp6_plain_hdr hdr
;
1727 struct mld_v2_query_hdr v2_query
;
1729 /* clang-format off */
1731 .icmp6_type
= ICMP6_MLD_QUERY
,
1737 /* clang-format on */
1739 struct ipv6_ph ph6
= {
1740 .src
= pim_ifp
->ll_lowest
,
1741 .ulpl
= htons(sizeof(query
)),
1742 .next_hdr
= IPPROTO_ICMPV6
,
1745 char buf
[CMSG_SPACE(8) /* hop options */ +
1746 CMSG_SPACE(sizeof(struct in6_pktinfo
))];
1747 struct cmsghdr align
;
1749 struct cmsghdr
*cmh
;
1750 struct msghdr mh
[1] = {};
1751 struct iovec iov
[3];
1753 ssize_t ret
, expect_ret
;
1755 struct in6_pktinfo
*pktinfo
;
1757 if (if_is_loopback(gm_ifp
->ifp
)) {
1758 /* Linux is a bit odd with multicast on loopback */
1759 ph6
.src
= in6addr_loopback
;
1760 dstaddr
.sin6_addr
= in6addr_loopback
;
1761 } else if (pim_addr_is_any(grp
))
1762 dstaddr
.sin6_addr
= gm_all_hosts
;
1764 dstaddr
.sin6_addr
= grp
;
1766 query
.v2_query
.max_resp_code
=
1767 mld_max_resp_encode(gm_ifp
->cur_max_resp
);
1768 query
.v2_query
.flags
= (gm_ifp
->cur_qrv
< 8) ? gm_ifp
->cur_qrv
: 0;
1770 query
.v2_query
.flags
|= 0x08;
1771 query
.v2_query
.qqic
=
1772 igmp_msg_encode16to8(gm_ifp
->cur_query_intv
/ 1000);
1773 query
.v2_query
.n_src
= htons(n_srcs
);
1775 ph6
.dst
= dstaddr
.sin6_addr
;
1777 /* ph6 not included in sendmsg */
1778 iov
[0].iov_base
= &ph6
;
1779 iov
[0].iov_len
= sizeof(ph6
);
1780 iov
[1].iov_base
= &query
;
1781 if (gm_ifp
->cur_version
== GM_MLDV1
) {
1783 iov
[1].iov_len
= sizeof(query
.hdr
) + sizeof(struct mld_v1_pkt
);
1784 } else if (!n_srcs
) {
1786 iov
[1].iov_len
= sizeof(query
);
1788 iov
[1].iov_len
= sizeof(query
);
1789 iov
[2].iov_base
= (void *)srcs
;
1790 iov
[2].iov_len
= n_srcs
* sizeof(srcs
[0]);
1794 query
.hdr
.icmp6_cksum
= in_cksumv(iov
, iov_len
);
1796 if (PIM_DEBUG_GM_PACKETS
)
1798 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1799 &pim_ifp
->ll_lowest
, &dstaddr
.sin6_addr
, &grp
, n_srcs
);
1801 mh
->msg_name
= &dstaddr
;
1802 mh
->msg_namelen
= sizeof(dstaddr
);
1803 mh
->msg_iov
= iov
+ 1;
1804 mh
->msg_iovlen
= iov_len
- 1;
1805 mh
->msg_control
= &cmsg
;
1806 mh
->msg_controllen
= sizeof(cmsg
.buf
);
1808 cmh
= CMSG_FIRSTHDR(mh
);
1809 cmh
->cmsg_level
= IPPROTO_IPV6
;
1810 cmh
->cmsg_type
= IPV6_HOPOPTS
;
1811 cmh
->cmsg_len
= CMSG_LEN(8);
1812 dp
= CMSG_DATA(cmh
);
1813 *dp
++ = 0; /* next header */
1814 *dp
++ = 0; /* length (8-byte blocks, minus 1) */
1815 *dp
++ = IP6OPT_ROUTER_ALERT
; /* router alert */
1816 *dp
++ = 2; /* length */
1817 *dp
++ = 0; /* value (2 bytes) */
1818 *dp
++ = 0; /* value (2 bytes) (0 = MLD) */
1819 *dp
++ = 0; /* pad0 */
1820 *dp
++ = 0; /* pad0 */
1822 cmh
= CMSG_NXTHDR(mh
, cmh
);
1823 cmh
->cmsg_level
= IPPROTO_IPV6
;
1824 cmh
->cmsg_type
= IPV6_PKTINFO
;
1825 cmh
->cmsg_len
= CMSG_LEN(sizeof(struct in6_pktinfo
));
1826 pktinfo
= (struct in6_pktinfo
*)CMSG_DATA(cmh
);
1827 pktinfo
->ipi6_ifindex
= gm_ifp
->ifp
->ifindex
;
1828 pktinfo
->ipi6_addr
= gm_ifp
->cur_ll_lowest
;
1830 expect_ret
= iov
[1].iov_len
;
1832 expect_ret
+= iov
[2].iov_len
;
1834 frr_with_privs (&pimd_privs
) {
1835 ret
= sendmsg(gm_ifp
->pim
->gm_socket
, mh
, 0);
1838 if (ret
!= expect_ret
) {
1839 zlog_warn(log_ifp("failed to send query: %m"));
1840 gm_ifp
->stats
.tx_query_fail
++;
1842 if (gm_ifp
->cur_version
== GM_MLDV1
) {
1843 if (pim_addr_is_any(grp
))
1844 gm_ifp
->stats
.tx_query_old_general
++;
1846 gm_ifp
->stats
.tx_query_old_group
++;
1848 if (pim_addr_is_any(grp
))
1849 gm_ifp
->stats
.tx_query_new_general
++;
1851 gm_ifp
->stats
.tx_query_new_group
++;
1853 gm_ifp
->stats
.tx_query_new_groupsrc
++;
1858 static void gm_t_query(struct event
*t
)
1860 struct gm_if
*gm_ifp
= EVENT_ARG(t
);
1861 unsigned int timer_ms
= gm_ifp
->cur_query_intv
;
1863 if (gm_ifp
->n_startup
) {
1865 gm_ifp
->n_startup
--;
1868 event_add_timer_msec(router
->master
, gm_t_query
, gm_ifp
, timer_ms
,
1871 gm_send_query(gm_ifp
, PIMADDR_ANY
, NULL
, 0, false);
1874 static void gm_t_sg_query(struct event
*t
)
1876 struct gm_sg
*sg
= EVENT_ARG(t
);
1878 gm_trigger_specific(sg
);
1881 /* S,G specific queries (triggered by a member leaving) get a little slack
1882 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1884 static void gm_send_specific(struct gm_gsq_pending
*pend_gsq
)
1886 struct gm_if
*gm_ifp
= pend_gsq
->iface
;
1888 gm_send_query(gm_ifp
, pend_gsq
->grp
, pend_gsq
->srcs
, pend_gsq
->n_src
,
1891 gm_gsq_pends_del(gm_ifp
->gsq_pends
, pend_gsq
);
1892 XFREE(MTYPE_GM_GSQ_PENDING
, pend_gsq
);
1895 static void gm_t_gsq_pend(struct event
*t
)
1897 struct gm_gsq_pending
*pend_gsq
= EVENT_ARG(t
);
1899 gm_send_specific(pend_gsq
);
1902 static void gm_trigger_specific(struct gm_sg
*sg
)
1904 struct gm_if
*gm_ifp
= sg
->iface
;
1905 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1906 struct gm_gsq_pending
*pend_gsq
, ref
= {};
1910 event_add_timer_msec(router
->master
, gm_t_sg_query
, sg
,
1911 gm_ifp
->cur_query_intv_trig
,
1914 if (!IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
))
1916 if (gm_ifp
->pim
->gm_socket
== -1)
1919 if (PIM_DEBUG_GM_TRACE
)
1920 zlog_debug(log_sg(sg
, "triggered query"));
1922 if (pim_addr_is_any(sg
->sgaddr
.src
)) {
1923 gm_send_query(gm_ifp
, sg
->sgaddr
.grp
, NULL
, 0, sg
->query_sbit
);
1927 ref
.grp
= sg
->sgaddr
.grp
;
1928 ref
.s_bit
= sg
->query_sbit
;
1930 pend_gsq
= gm_gsq_pends_find(gm_ifp
->gsq_pends
, &ref
);
1932 pend_gsq
= XCALLOC(MTYPE_GM_GSQ_PENDING
, sizeof(*pend_gsq
));
1933 pend_gsq
->grp
= sg
->sgaddr
.grp
;
1934 pend_gsq
->s_bit
= sg
->query_sbit
;
1935 pend_gsq
->iface
= gm_ifp
;
1936 gm_gsq_pends_add(gm_ifp
->gsq_pends
, pend_gsq
);
1938 event_add_timer_tv(router
->master
, gm_t_gsq_pend
, pend_gsq
,
1939 &gm_ifp
->cfg_timing_fuzz
, &pend_gsq
->t_send
);
1942 assert(pend_gsq
->n_src
< array_size(pend_gsq
->srcs
));
1944 pend_gsq
->srcs
[pend_gsq
->n_src
] = sg
->sgaddr
.src
;
1947 if (pend_gsq
->n_src
== array_size(pend_gsq
->srcs
)) {
1948 EVENT_OFF(pend_gsq
->t_send
);
1949 gm_send_specific(pend_gsq
);
1954 static void gm_vrf_socket_incref(struct pim_instance
*pim
)
1956 struct vrf
*vrf
= pim
->vrf
;
1958 struct icmp6_filter filter
[1];
1960 if (pim
->gm_socket_if_count
++ && pim
->gm_socket
!= -1)
1963 ICMP6_FILTER_SETBLOCKALL(filter
);
1964 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY
, filter
);
1965 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT
, filter
);
1966 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE
, filter
);
1967 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT
, filter
);
1969 frr_with_privs (&pimd_privs
) {
1970 pim
->gm_socket
= vrf_socket(AF_INET6
, SOCK_RAW
, IPPROTO_ICMPV6
,
1971 vrf
->vrf_id
, vrf
->name
);
1972 if (pim
->gm_socket
< 0) {
1973 zlog_err("(VRF %s) could not create MLD socket: %m",
1978 ret
= setsockopt(pim
->gm_socket
, SOL_ICMPV6
, ICMP6_FILTER
,
1979 filter
, sizeof(filter
));
1981 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1985 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVPKTINFO
,
1986 &intval
, sizeof(intval
));
1988 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1992 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVHOPOPTS
,
1993 &intval
, sizeof(intval
));
1995 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
1999 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVHOPLIMIT
,
2000 &intval
, sizeof(intval
));
2002 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2006 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_LOOP
,
2007 &intval
, sizeof(intval
));
2010 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2014 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_HOPS
,
2015 &intval
, sizeof(intval
));
2018 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2021 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2022 * RX filtering in Linux. It only means "receive all groups
2023 * that something on the system has joined". To actually
2024 * receive *all* MLD packets - which is what we need -
2025 * multicast routing must be enabled on the interface. And
2026 * this only works for MLD packets specifically.
2028 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2029 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2031 * Also note that the code there explicitly checks for the IPv6
2032 * router alert MLD option (which is required by the RFC to be
2033 * on MLD packets.) That implies trying to support hosts which
2034 * erroneously don't add that option is just not possible.
2037 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_ALL
,
2038 &intval
, sizeof(intval
));
2041 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2045 event_add_read(router
->master
, gm_t_recv
, pim
, pim
->gm_socket
,
2049 static void gm_vrf_socket_decref(struct pim_instance
*pim
)
2051 if (--pim
->gm_socket_if_count
)
2054 EVENT_OFF(pim
->t_gm_recv
);
2055 close(pim
->gm_socket
);
2056 pim
->gm_socket
= -1;
2059 static void gm_start(struct interface
*ifp
)
2061 struct pim_interface
*pim_ifp
= ifp
->info
;
2062 struct gm_if
*gm_ifp
;
2065 assert(pim_ifp
->pim
);
2066 assert(pim_ifp
->mroute_vif_index
>= 0);
2067 assert(!pim_ifp
->mld
);
2069 gm_vrf_socket_incref(pim_ifp
->pim
);
2071 gm_ifp
= XCALLOC(MTYPE_GM_IFACE
, sizeof(*gm_ifp
));
2073 pim_ifp
->mld
= gm_ifp
;
2074 gm_ifp
->pim
= pim_ifp
->pim
;
2075 monotime(&gm_ifp
->started
);
2077 zlog_info(log_ifp("starting MLD"));
2079 if (pim_ifp
->mld_version
== 1)
2080 gm_ifp
->cur_version
= GM_MLDV1
;
2082 gm_ifp
->cur_version
= GM_MLDV2
;
2084 gm_ifp
->cur_qrv
= pim_ifp
->gm_default_robustness_variable
;
2085 gm_ifp
->cur_query_intv
= pim_ifp
->gm_default_query_interval
* 1000;
2086 gm_ifp
->cur_query_intv_trig
=
2087 pim_ifp
->gm_specific_query_max_response_time_dsec
* 100;
2088 gm_ifp
->cur_max_resp
= pim_ifp
->gm_query_max_response_time_dsec
* 100;
2089 gm_ifp
->cur_lmqc
= pim_ifp
->gm_last_member_query_count
;
2091 gm_ifp
->cfg_timing_fuzz
.tv_sec
= 0;
2092 gm_ifp
->cfg_timing_fuzz
.tv_usec
= 10 * 1000;
2094 gm_sgs_init(gm_ifp
->sgs
);
2095 gm_subscribers_init(gm_ifp
->subscribers
);
2096 gm_packet_expires_init(gm_ifp
->expires
);
2097 gm_grp_pends_init(gm_ifp
->grp_pends
);
2098 gm_gsq_pends_init(gm_ifp
->gsq_pends
);
2100 frr_with_privs (&pimd_privs
) {
2101 struct ipv6_mreq mreq
;
2104 /* all-MLDv2 group */
2105 mreq
.ipv6mr_multiaddr
= gm_all_routers
;
2106 mreq
.ipv6mr_interface
= ifp
->ifindex
;
2107 ret
= setsockopt(gm_ifp
->pim
->gm_socket
, SOL_IPV6
,
2108 IPV6_JOIN_GROUP
, &mreq
, sizeof(mreq
));
2110 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2115 void gm_group_delete(struct gm_if
*gm_ifp
)
2118 struct gm_packet_state
*pkt
;
2119 struct gm_grp_pending
*pend_grp
;
2120 struct gm_gsq_pending
*pend_gsq
;
2121 struct gm_subscriber
*subscriber
;
2123 while ((pkt
= gm_packet_expires_first(gm_ifp
->expires
)))
2124 gm_packet_drop(pkt
, false);
2126 while ((pend_grp
= gm_grp_pends_pop(gm_ifp
->grp_pends
))) {
2127 EVENT_OFF(pend_grp
->t_expire
);
2128 XFREE(MTYPE_GM_GRP_PENDING
, pend_grp
);
2131 while ((pend_gsq
= gm_gsq_pends_pop(gm_ifp
->gsq_pends
))) {
2132 EVENT_OFF(pend_gsq
->t_send
);
2133 XFREE(MTYPE_GM_GSQ_PENDING
, pend_gsq
);
2136 while ((sg
= gm_sgs_pop(gm_ifp
->sgs
))) {
2137 EVENT_OFF(sg
->t_sg_expire
);
2138 assertf(!gm_packet_sg_subs_count(sg
->subs_negative
), "%pSG",
2140 assertf(!gm_packet_sg_subs_count(sg
->subs_positive
), "%pSG",
2145 while ((subscriber
= gm_subscribers_pop(gm_ifp
->subscribers
))) {
2146 assertf(!gm_packets_count(subscriber
->packets
), "%pPA",
2148 XFREE(MTYPE_GM_SUBSCRIBER
, subscriber
);
2152 void gm_ifp_teardown(struct interface
*ifp
)
2154 struct pim_interface
*pim_ifp
= ifp
->info
;
2155 struct gm_if
*gm_ifp
;
2157 if (!pim_ifp
|| !pim_ifp
->mld
)
2160 gm_ifp
= pim_ifp
->mld
;
2161 gm_ifp
->stopping
= true;
2162 if (PIM_DEBUG_GM_EVENTS
)
2163 zlog_debug(log_ifp("MLD stop"));
2165 EVENT_OFF(gm_ifp
->t_query
);
2166 EVENT_OFF(gm_ifp
->t_other_querier
);
2167 EVENT_OFF(gm_ifp
->t_expire
);
2169 frr_with_privs (&pimd_privs
) {
2170 struct ipv6_mreq mreq
;
2173 /* all-MLDv2 group */
2174 mreq
.ipv6mr_multiaddr
= gm_all_routers
;
2175 mreq
.ipv6mr_interface
= ifp
->ifindex
;
2176 ret
= setsockopt(gm_ifp
->pim
->gm_socket
, SOL_IPV6
,
2177 IPV6_LEAVE_GROUP
, &mreq
, sizeof(mreq
));
2180 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2184 gm_vrf_socket_decref(gm_ifp
->pim
);
2186 gm_group_delete(gm_ifp
);
2188 gm_grp_pends_fini(gm_ifp
->grp_pends
);
2189 gm_packet_expires_fini(gm_ifp
->expires
);
2190 gm_subscribers_fini(gm_ifp
->subscribers
);
2191 gm_sgs_fini(gm_ifp
->sgs
);
2193 XFREE(MTYPE_GM_IFACE
, gm_ifp
);
2194 pim_ifp
->mld
= NULL
;
2197 static void gm_update_ll(struct interface
*ifp
)
2199 struct pim_interface
*pim_ifp
= ifp
->info
;
2200 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
2204 !IPV6_ADDR_CMP(&gm_ifp
->cur_ll_lowest
, &gm_ifp
->querier
) &&
2205 !pim_addr_is_any(gm_ifp
->querier
);
2207 gm_ifp
->cur_ll_lowest
= pim_ifp
->ll_lowest
;
2209 gm_ifp
->querier
= pim_ifp
->ll_lowest
;
2210 EVENT_OFF(gm_ifp
->t_query
);
2212 if (pim_addr_is_any(gm_ifp
->cur_ll_lowest
)) {
2215 "lost link-local address, stopping querier"));
2220 zlog_info(log_ifp("new link-local %pPA while querier"),
2221 &gm_ifp
->cur_ll_lowest
);
2222 else if (IPV6_ADDR_CMP(&gm_ifp
->cur_ll_lowest
, &gm_ifp
->querier
) < 0 ||
2223 pim_addr_is_any(gm_ifp
->querier
)) {
2224 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2225 &gm_ifp
->cur_ll_lowest
);
2226 gm_ifp
->querier
= gm_ifp
->cur_ll_lowest
;
2230 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
2231 event_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
2234 void gm_ifp_update(struct interface
*ifp
)
2236 struct pim_interface
*pim_ifp
= ifp
->info
;
2237 struct gm_if
*gm_ifp
;
2238 bool changed
= false;
2242 if (!if_is_operative(ifp
) || !pim_ifp
->pim
||
2243 pim_ifp
->mroute_vif_index
< 0) {
2244 gm_ifp_teardown(ifp
);
2249 * If ipv6 mld is not enabled on interface, do not start mld activites.
2251 if (!pim_ifp
->gm_enable
)
2254 if (!pim_ifp
->mld
) {
2259 gm_ifp
= pim_ifp
->mld
;
2260 if (IPV6_ADDR_CMP(&pim_ifp
->ll_lowest
, &gm_ifp
->cur_ll_lowest
))
2263 unsigned int cfg_query_intv
= pim_ifp
->gm_default_query_interval
* 1000;
2265 if (gm_ifp
->cur_query_intv
!= cfg_query_intv
) {
2266 gm_ifp
->cur_query_intv
= cfg_query_intv
;
2270 unsigned int cfg_query_intv_trig
=
2271 pim_ifp
->gm_specific_query_max_response_time_dsec
* 100;
2273 if (gm_ifp
->cur_query_intv_trig
!= cfg_query_intv_trig
) {
2274 gm_ifp
->cur_query_intv_trig
= cfg_query_intv_trig
;
2278 unsigned int cfg_max_response
=
2279 pim_ifp
->gm_query_max_response_time_dsec
* 100;
2281 if (gm_ifp
->cur_max_resp
!= cfg_max_response
)
2282 gm_ifp
->cur_max_resp
= cfg_max_response
;
2284 if (gm_ifp
->cur_lmqc
!= pim_ifp
->gm_last_member_query_count
)
2285 gm_ifp
->cur_lmqc
= pim_ifp
->gm_last_member_query_count
;
2287 enum gm_version cfg_version
;
2289 if (pim_ifp
->mld_version
== 1)
2290 cfg_version
= GM_MLDV1
;
2292 cfg_version
= GM_MLDV2
;
2293 if (gm_ifp
->cur_version
!= cfg_version
) {
2294 gm_ifp
->cur_version
= cfg_version
;
2299 if (PIM_DEBUG_GM_TRACE
)
2301 "MLD querier config changed, querying"));
2302 gm_bump_querier(gm_ifp
);
2307 * CLI (show commands only)
2310 #include "lib/command.h"
2312 #include "pimd/pim6_mld_clippy.c"
2314 static struct vrf
*gm_cmd_vrf_lookup(struct vty
*vty
, const char *vrf_str
,
2320 return vrf_lookup_by_id(VRF_DEFAULT
);
2321 if (!strcmp(vrf_str
, "all"))
2323 ret
= vrf_lookup_by_name(vrf_str
);
2327 vty_out(vty
, "%% VRF %pSQq does not exist\n", vrf_str
);
2332 static void gm_show_if_one_detail(struct vty
*vty
, struct interface
*ifp
)
2334 struct pim_interface
*pim_ifp
= (struct pim_interface
*)ifp
->info
;
2335 struct gm_if
*gm_ifp
;
2340 vty_out(vty
, "Interface %s: no PIM/MLD config\n\n", ifp
->name
);
2344 gm_ifp
= pim_ifp
->mld
;
2346 vty_out(vty
, "Interface %s: MLD not running\n\n", ifp
->name
);
2350 querier
= IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
);
2352 vty_out(vty
, "Interface %s: MLD running\n", ifp
->name
);
2353 vty_out(vty
, " Uptime: %pTVMs\n", &gm_ifp
->started
);
2354 vty_out(vty
, " MLD version: %d\n", gm_ifp
->cur_version
);
2355 vty_out(vty
, " Querier: %pPA%s\n", &gm_ifp
->querier
,
2356 querier
? " (this system)" : "");
2357 vty_out(vty
, " Query timer: %pTH\n", gm_ifp
->t_query
);
2358 vty_out(vty
, " Other querier timer: %pTH\n",
2359 gm_ifp
->t_other_querier
);
2360 vty_out(vty
, " Robustness value: %u\n", gm_ifp
->cur_qrv
);
2361 vty_out(vty
, " Query interval: %ums\n",
2362 gm_ifp
->cur_query_intv
);
2363 vty_out(vty
, " Query response timer: %ums\n", gm_ifp
->cur_max_resp
);
2364 vty_out(vty
, " Last member query intv.: %ums\n",
2365 gm_ifp
->cur_query_intv_trig
);
2366 vty_out(vty
, " %u expiry timers from general queries:\n",
2368 for (i
= 0; i
< gm_ifp
->n_pending
; i
++) {
2369 struct gm_general_pending
*p
= &gm_ifp
->pending
[i
];
2371 vty_out(vty
, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2372 &p
->query
, &p
->expiry
);
2374 vty_out(vty
, " %zu expiry timers from *,G queries\n",
2375 gm_grp_pends_count(gm_ifp
->grp_pends
));
2376 vty_out(vty
, " %zu expiry timers from S,G queries\n",
2377 gm_gsq_pends_count(gm_ifp
->gsq_pends
));
2378 vty_out(vty
, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2379 gm_sgs_count(gm_ifp
->sgs
),
2380 gm_subscribers_count(gm_ifp
->subscribers
),
2381 gm_packet_expires_count(gm_ifp
->expires
));
2385 static void gm_show_if_one(struct vty
*vty
, struct interface
*ifp
,
2386 json_object
*js_if
, struct ttable
*tt
)
2388 struct pim_interface
*pim_ifp
= (struct pim_interface
*)ifp
->info
;
2389 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
2392 querier
= IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
);
2395 json_object_string_add(js_if
, "name", ifp
->name
);
2396 json_object_string_addf(js_if
, "address", "%pPA",
2397 &pim_ifp
->primary_address
);
2398 json_object_string_add(js_if
, "state", "up");
2399 json_object_string_addf(js_if
, "version", "%d",
2400 gm_ifp
->cur_version
);
2401 json_object_string_addf(js_if
, "upTime", "%pTVMs",
2403 json_object_boolean_add(js_if
, "querier", querier
);
2404 json_object_string_addf(js_if
, "querierIp", "%pPA",
2407 json_object_string_addf(js_if
, "queryTimer", "%pTH",
2410 json_object_string_addf(js_if
, "otherQuerierTimer",
2412 gm_ifp
->t_other_querier
);
2413 json_object_int_add(js_if
, "timerRobustnessValue",
2415 json_object_int_add(js_if
, "lastMemberQueryCount",
2417 json_object_int_add(js_if
, "timerQueryIntervalMsec",
2418 gm_ifp
->cur_query_intv
);
2419 json_object_int_add(js_if
, "timerQueryResponseTimerMsec",
2420 gm_ifp
->cur_max_resp
);
2421 json_object_int_add(js_if
, "timerLastMemberQueryIntervalMsec",
2422 gm_ifp
->cur_query_intv_trig
);
2424 ttable_add_row(tt
, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
2425 ifp
->name
, "up", &pim_ifp
->primary_address
,
2426 gm_ifp
->cur_version
, querier
? "local" : "other",
2427 &gm_ifp
->querier
, gm_ifp
->t_query
,
2432 static void gm_show_if_vrf(struct vty
*vty
, struct vrf
*vrf
, const char *ifname
,
2433 bool detail
, json_object
*js
)
2435 struct interface
*ifp
;
2436 json_object
*js_vrf
= NULL
;
2437 struct pim_interface
*pim_ifp
;
2438 struct ttable
*tt
= NULL
;
2442 js_vrf
= json_object_new_object();
2443 json_object_object_add(js
, vrf
->name
, js_vrf
);
2446 if (!js
&& !detail
) {
2447 /* Prepare table. */
2448 tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
2451 "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
2452 tt
->style
.cell
.rpad
= 2;
2453 tt
->style
.corner
= '+';
2457 FOR_ALL_INTERFACES (vrf
, ifp
) {
2458 json_object
*js_if
= NULL
;
2460 if (ifname
&& strcmp(ifp
->name
, ifname
))
2462 if (detail
&& !js
) {
2463 gm_show_if_one_detail(vty
, ifp
);
2467 pim_ifp
= ifp
->info
;
2469 if (!pim_ifp
|| !pim_ifp
->mld
)
2473 js_if
= json_object_new_object();
2475 * If we have js as true and detail as false
2476 * and if Coverity thinks that js_if is NULL
2477 * because of a failed call to new then
2478 * when we call gm_show_if_one below
2479 * the tt can be deref'ed and as such
2480 * FRR will crash. But since we know
2481 * that json_object_new_object never fails
2482 * then let's tell Coverity that this assumption
2483 * is true. I'm not worried about fast path
2487 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2490 gm_show_if_one(vty
, ifp
, js_if
, tt
);
2493 /* Dump the generated table. */
2494 if (!js
&& !detail
) {
2495 table
= ttable_dump(tt
, "\n");
2496 vty_out(vty
, "%s\n", table
);
2497 XFREE(MTYPE_TMP
, table
);
2502 static void gm_show_if(struct vty
*vty
, struct vrf
*vrf
, const char *ifname
,
2503 bool detail
, json_object
*js
)
2506 gm_show_if_vrf(vty
, vrf
, ifname
, detail
, js
);
2508 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2509 gm_show_if_vrf(vty
, vrf
, ifname
, detail
, js
);
2512 DEFPY(gm_show_interface
,
2513 gm_show_interface_cmd
,
2514 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
2518 VRF_FULL_CMD_HELP_STR
2519 "MLD interface information\n"
2524 int ret
= CMD_SUCCESS
;
2526 json_object
*js
= NULL
;
2528 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2529 if (ret
!= CMD_SUCCESS
)
2533 js
= json_object_new_object();
2534 gm_show_if(vty
, vrf
, ifname
, !!detail
, js
);
2535 return vty_json(vty
, js
);
2538 static void gm_show_stats_one(struct vty
*vty
, struct gm_if
*gm_ifp
,
2541 struct gm_if_stats
*stats
= &gm_ifp
->stats
;
2542 /* clang-format off */
2547 } *item
, items
[] = {
2548 { "v2 reports received", "rxV2Reports", &stats
->rx_new_report
},
2549 { "v1 reports received", "rxV1Reports", &stats
->rx_old_report
},
2550 { "v1 done received", "rxV1Done", &stats
->rx_old_leave
},
2552 { "v2 *,* queries received", "rxV2QueryGeneral", &stats
->rx_query_new_general
},
2553 { "v2 *,G queries received", "rxV2QueryGroup", &stats
->rx_query_new_group
},
2554 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats
->rx_query_new_groupsrc
},
2555 { "v2 S-bit queries received", "rxV2QuerySBit", &stats
->rx_query_new_sbit
},
2556 { "v1 *,* queries received", "rxV1QueryGeneral", &stats
->rx_query_old_general
},
2557 { "v1 *,G queries received", "rxV1QueryGroup", &stats
->rx_query_old_group
},
2559 { "v2 *,* queries sent", "txV2QueryGeneral", &stats
->tx_query_new_general
},
2560 { "v2 *,G queries sent", "txV2QueryGroup", &stats
->tx_query_new_group
},
2561 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats
->tx_query_new_groupsrc
},
2562 { "v1 *,* queries sent", "txV1QueryGeneral", &stats
->tx_query_old_general
},
2563 { "v1 *,G queries sent", "txV1QueryGroup", &stats
->tx_query_old_group
},
2564 { "TX errors", "txErrors", &stats
->tx_query_fail
},
2566 { "RX dropped (checksum error)", "rxDropChecksum", &stats
->rx_drop_csum
},
2567 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats
->rx_drop_srcaddr
},
2568 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats
->rx_drop_dstaddr
},
2569 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats
->rx_drop_ra
},
2570 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats
->rx_drop_malformed
},
2571 { "RX truncated reports", "rxTruncatedRep", &stats
->rx_trunc_report
},
2573 /* clang-format on */
2575 for (item
= items
; item
< items
+ array_size(items
); item
++) {
2577 json_object_int_add(js_if
, item
->js_key
, *item
->val
);
2579 vty_out(vty
, " %-30s %" PRIu64
"\n", item
->text
,
2584 static void gm_show_stats_vrf(struct vty
*vty
, struct vrf
*vrf
,
2585 const char *ifname
, json_object
*js
)
2587 struct interface
*ifp
;
2588 json_object
*js_vrf
;
2591 js_vrf
= json_object_new_object();
2592 json_object_object_add(js
, vrf
->name
, js_vrf
);
2595 FOR_ALL_INTERFACES (vrf
, ifp
) {
2596 struct pim_interface
*pim_ifp
;
2597 struct gm_if
*gm_ifp
;
2598 json_object
*js_if
= NULL
;
2600 if (ifname
&& strcmp(ifp
->name
, ifname
))
2605 pim_ifp
= ifp
->info
;
2608 gm_ifp
= pim_ifp
->mld
;
2611 js_if
= json_object_new_object();
2612 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2614 vty_out(vty
, "Interface: %s\n", ifp
->name
);
2616 gm_show_stats_one(vty
, gm_ifp
, js_if
);
2622 DEFPY(gm_show_interface_stats
,
2623 gm_show_interface_stats_cmd
,
2624 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2628 VRF_FULL_CMD_HELP_STR
2634 int ret
= CMD_SUCCESS
;
2636 json_object
*js
= NULL
;
2638 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2639 if (ret
!= CMD_SUCCESS
)
2643 js
= json_object_new_object();
2646 gm_show_stats_vrf(vty
, vrf
, ifname
, js
);
2648 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2649 gm_show_stats_vrf(vty
, vrf
, ifname
, js
);
2650 return vty_json(vty
, js
);
2653 static void gm_show_joins_one(struct vty
*vty
, struct gm_if
*gm_ifp
,
2654 const struct prefix_ipv6
*groups
,
2655 const struct prefix_ipv6
*sources
, bool detail
,
2658 struct gm_sg
*sg
, *sg_start
;
2659 json_object
*js_group
= NULL
;
2660 pim_addr js_grpaddr
= PIMADDR_ANY
;
2661 struct gm_subscriber sub_ref
= {}, *sub_untracked
;
2664 struct gm_sg sg_ref
= {};
2666 sg_ref
.sgaddr
.grp
= pim_addr_from_prefix(groups
);
2667 sg_start
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
2669 sg_start
= gm_sgs_first(gm_ifp
->sgs
);
2671 sub_ref
.addr
= gm_dummy_untracked
;
2672 sub_untracked
= gm_subscribers_find(gm_ifp
->subscribers
, &sub_ref
);
2673 /* NB: sub_untracked may be NULL if no untracked joins exist */
2675 frr_each_from (gm_sgs
, gm_ifp
->sgs
, sg
, sg_start
) {
2676 struct timeval
*recent
= NULL
, *untracked
= NULL
;
2677 json_object
*js_src
;
2680 struct prefix grp_p
;
2682 pim_addr_to_prefix(&grp_p
, sg
->sgaddr
.grp
);
2683 if (!prefix_match(groups
, &grp_p
))
2688 struct prefix src_p
;
2690 pim_addr_to_prefix(&src_p
, sg
->sgaddr
.src
);
2691 if (!prefix_match(sources
, &src_p
))
2695 if (sg
->most_recent
) {
2696 struct gm_packet_state
*packet
;
2698 packet
= gm_packet_sg2state(sg
->most_recent
);
2699 recent
= &packet
->received
;
2702 if (sub_untracked
) {
2703 struct gm_packet_state
*packet
;
2704 struct gm_packet_sg
*item
;
2706 item
= gm_packet_sg_find(sg
, GM_SUB_POS
, sub_untracked
);
2708 packet
= gm_packet_sg2state(item
);
2709 untracked
= &packet
->received
;
2714 FMT_NSTD_BEGIN
; /* %.0p */
2716 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2717 &sg
->sgaddr
.grp
, &sg
->sgaddr
.src
,
2718 gm_states
[sg
->state
], recent
, untracked
,
2724 struct gm_packet_sg
*item
;
2725 struct gm_packet_state
*packet
;
2727 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
2728 packet
= gm_packet_sg2state(item
);
2730 if (packet
->subscriber
== sub_untracked
)
2732 vty_out(vty
, " %-58pPA %-16s %10.0pTVMs\n",
2733 &packet
->subscriber
->addr
, "(JOIN)",
2736 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
2737 packet
= gm_packet_sg2state(item
);
2739 if (packet
->subscriber
== sub_untracked
)
2741 vty_out(vty
, " %-58pPA %-16s %10.0pTVMs\n",
2742 &packet
->subscriber
->addr
, "(PRUNE)",
2745 FMT_NSTD_END
; /* %.0p */
2750 if (!js_group
|| pim_addr_cmp(js_grpaddr
, sg
->sgaddr
.grp
)) {
2751 js_group
= json_object_new_object();
2752 json_object_object_addf(js_if
, js_group
, "%pPA",
2754 js_grpaddr
= sg
->sgaddr
.grp
;
2757 js_src
= json_object_new_object();
2758 json_object_object_addf(js_group
, js_src
, "%pPAs",
2761 json_object_string_add(js_src
, "state", gm_states
[sg
->state
]);
2762 json_object_string_addf(js_src
, "created", "%pTVMs",
2764 json_object_string_addf(js_src
, "lastSeen", "%pTVMs", recent
);
2767 json_object_string_addf(js_src
, "untrackedLastSeen",
2768 "%pTVMs", untracked
);
2772 json_object
*js_subs
;
2773 struct gm_packet_sg
*item
;
2774 struct gm_packet_state
*packet
;
2776 js_subs
= json_object_new_object();
2777 json_object_object_add(js_src
, "joinedBy", js_subs
);
2778 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
2779 packet
= gm_packet_sg2state(item
);
2780 if (packet
->subscriber
== sub_untracked
)
2783 json_object
*js_sub
;
2785 js_sub
= json_object_new_object();
2786 json_object_object_addf(js_subs
, js_sub
, "%pPA",
2787 &packet
->subscriber
->addr
);
2788 json_object_string_addf(js_sub
, "lastSeen", "%pTVMs",
2792 js_subs
= json_object_new_object();
2793 json_object_object_add(js_src
, "prunedBy", js_subs
);
2794 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
2795 packet
= gm_packet_sg2state(item
);
2796 if (packet
->subscriber
== sub_untracked
)
2799 json_object
*js_sub
;
2801 js_sub
= json_object_new_object();
2802 json_object_object_addf(js_subs
, js_sub
, "%pPA",
2803 &packet
->subscriber
->addr
);
2804 json_object_string_addf(js_sub
, "lastSeen", "%pTVMs",
2810 static void gm_show_joins_vrf(struct vty
*vty
, struct vrf
*vrf
,
2812 const struct prefix_ipv6
*groups
,
2813 const struct prefix_ipv6
*sources
, bool detail
,
2816 struct interface
*ifp
;
2817 json_object
*js_vrf
;
2820 js_vrf
= json_object_new_object();
2821 json_object_string_add(js_vrf
, "vrf", vrf
->name
);
2822 json_object_object_add(js
, vrf
->name
, js_vrf
);
2825 FOR_ALL_INTERFACES (vrf
, ifp
) {
2826 struct pim_interface
*pim_ifp
;
2827 struct gm_if
*gm_ifp
;
2828 json_object
*js_if
= NULL
;
2830 if (ifname
&& strcmp(ifp
->name
, ifname
))
2835 pim_ifp
= ifp
->info
;
2838 gm_ifp
= pim_ifp
->mld
;
2841 js_if
= json_object_new_object();
2842 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2846 vty_out(vty
, "\nOn interface %s:\n", ifp
->name
);
2848 gm_show_joins_one(vty
, gm_ifp
, groups
, sources
, detail
, js_if
);
2852 DEFPY(gm_show_interface_joins
,
2853 gm_show_interface_joins_cmd
,
2854 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2858 VRF_FULL_CMD_HELP_STR
2859 "MLD joined groups & sources\n"
2862 "Limit output to group range\n"
2863 "Show groups covered by this prefix\n"
2864 "Limit output to source range\n"
2865 "Show sources covered by this prefix\n"
2866 "Show details, including tracked receivers\n"
2869 int ret
= CMD_SUCCESS
;
2871 json_object
*js
= NULL
;
2873 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2874 if (ret
!= CMD_SUCCESS
)
2878 js
= json_object_new_object();
2880 vty_out(vty
, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2881 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2884 gm_show_joins_vrf(vty
, vrf
, ifname
, groups
, sources
, !!detail
,
2887 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2888 gm_show_joins_vrf(vty
, vrf
, ifname
, groups
, sources
,
2890 return vty_json(vty
, js
);
2893 static void gm_show_groups(struct vty
*vty
, struct vrf
*vrf
, bool uj
)
2895 struct interface
*ifp
;
2896 struct ttable
*tt
= NULL
;
2898 json_object
*json
= NULL
;
2899 json_object
*json_iface
= NULL
;
2900 json_object
*json_group
= NULL
;
2901 json_object
*json_groups
= NULL
;
2902 struct pim_instance
*pim
= vrf
->info
;
2905 json
= json_object_new_object();
2906 json_object_int_add(json
, "totalGroups", pim
->gm_group_count
);
2907 json_object_int_add(json
, "watermarkLimit",
2908 pim
->gm_watermark_limit
);
2910 /* Prepare table. */
2911 tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
2912 ttable_add_row(tt
, "Interface|Group|Version|Uptime");
2913 tt
->style
.cell
.rpad
= 2;
2914 tt
->style
.corner
= '+';
2917 vty_out(vty
, "Total MLD groups: %u\n", pim
->gm_group_count
);
2918 vty_out(vty
, "Watermark warn limit(%s): %u\n",
2919 pim
->gm_watermark_limit
? "Set" : "Not Set",
2920 pim
->gm_watermark_limit
);
2923 /* scan interfaces */
2924 FOR_ALL_INTERFACES (vrf
, ifp
) {
2926 struct pim_interface
*pim_ifp
= ifp
->info
;
2927 struct gm_if
*gm_ifp
;
2933 gm_ifp
= pim_ifp
->mld
;
2937 /* scan mld groups */
2938 frr_each (gm_sgs
, gm_ifp
->sgs
, sg
) {
2941 json_object_object_get_ex(json
, ifp
->name
,
2945 json_iface
= json_object_new_object();
2946 json_object_pim_ifp_add(json_iface
,
2948 json_object_object_add(json
, ifp
->name
,
2950 json_groups
= json_object_new_array();
2951 json_object_object_add(json_iface
,
2956 json_group
= json_object_new_object();
2957 json_object_string_addf(json_group
, "group",
2961 json_object_int_add(json_group
, "version",
2962 pim_ifp
->mld_version
);
2963 json_object_string_addf(json_group
, "uptime",
2964 "%pTVMs", &sg
->created
);
2965 json_object_array_add(json_groups
, json_group
);
2967 ttable_add_row(tt
, "%s|%pPAs|%d|%pTVMs",
2968 ifp
->name
, &sg
->sgaddr
.grp
,
2969 pim_ifp
->mld_version
,
2972 } /* scan gm groups */
2973 } /* scan interfaces */
2976 vty_json(vty
, json
);
2978 /* Dump the generated table. */
2979 table
= ttable_dump(tt
, "\n");
2980 vty_out(vty
, "%s\n", table
);
2981 XFREE(MTYPE_TMP
, table
);
2986 DEFPY(gm_show_mld_groups
,
2987 gm_show_mld_groups_cmd
,
2988 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2992 VRF_FULL_CMD_HELP_STR
2996 int ret
= CMD_SUCCESS
;
2999 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
3000 if (ret
!= CMD_SUCCESS
)
3004 gm_show_groups(vty
, vrf
, !!json
);
3006 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
3007 gm_show_groups(vty
, vrf
, !!json
);
3012 DEFPY(gm_debug_show
,
3014 "debug show mld interface IFNAME",
3021 struct interface
*ifp
;
3022 struct pim_interface
*pim_ifp
;
3023 struct gm_if
*gm_ifp
;
3025 ifp
= if_lookup_by_name(ifname
, VRF_DEFAULT
);
3027 vty_out(vty
, "%% no such interface: %pSQq\n", ifname
);
3031 pim_ifp
= ifp
->info
;
3033 vty_out(vty
, "%% no PIM state for interface %pSQq\n", ifname
);
3037 gm_ifp
= pim_ifp
->mld
;
3039 vty_out(vty
, "%% no MLD state for interface %pSQq\n", ifname
);
3043 vty_out(vty
, "querier: %pPA\n", &gm_ifp
->querier
);
3044 vty_out(vty
, "ll_lowest: %pPA\n\n", &pim_ifp
->ll_lowest
);
3045 vty_out(vty
, "t_query: %pTHD\n", gm_ifp
->t_query
);
3046 vty_out(vty
, "t_other_querier: %pTHD\n", gm_ifp
->t_other_querier
);
3047 vty_out(vty
, "t_expire: %pTHD\n", gm_ifp
->t_expire
);
3049 vty_out(vty
, "\nn_pending: %u\n", gm_ifp
->n_pending
);
3050 for (size_t i
= 0; i
< gm_ifp
->n_pending
; i
++) {
3051 int64_t query
, expiry
;
3053 query
= monotime_since(&gm_ifp
->pending
[i
].query
, NULL
);
3054 expiry
= monotime_until(&gm_ifp
->pending
[i
].expiry
, NULL
);
3056 vty_out(vty
, "[%zu]: query %"PRId64
"ms ago, expiry in %"PRId64
"ms\n",
3057 i
, query
/ 1000, expiry
/ 1000);
3061 struct gm_packet_state
*pkt
;
3062 struct gm_packet_sg
*item
;
3063 struct gm_subscriber
*subscriber
;
3065 vty_out(vty
, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp
->sgs
));
3066 frr_each (gm_sgs
, gm_ifp
->sgs
, sg
) {
3067 vty_out(vty
, "\t%pSG t_expire=%pTHD\n", &sg
->sgaddr
,
3070 vty_out(vty
, "\t @pos:%zu\n",
3071 gm_packet_sg_subs_count(sg
->subs_positive
));
3072 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
3073 pkt
= gm_packet_sg2state(item
);
3075 vty_out(vty
, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3076 item
->is_src
? "S" : "",
3077 item
->is_excl
? "E" : "",
3078 &pkt
->subscriber
->addr
, pkt
->subscriber
, pkt
,
3081 assert(item
->sg
== sg
);
3083 vty_out(vty
, "\t @neg:%zu\n",
3084 gm_packet_sg_subs_count(sg
->subs_negative
));
3085 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
3086 pkt
= gm_packet_sg2state(item
);
3088 vty_out(vty
, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3089 item
->is_src
? "S" : "",
3090 item
->is_excl
? "E" : "",
3091 &pkt
->subscriber
->addr
, pkt
->subscriber
, pkt
,
3094 assert(item
->sg
== sg
);
3098 vty_out(vty
, "\n%zu subscribers:\n",
3099 gm_subscribers_count(gm_ifp
->subscribers
));
3100 frr_each (gm_subscribers
, gm_ifp
->subscribers
, subscriber
) {
3101 vty_out(vty
, "\t%pPA %p %zu packets\n", &subscriber
->addr
,
3102 subscriber
, gm_packets_count(subscriber
->packets
));
3104 frr_each (gm_packets
, subscriber
->packets
, pkt
) {
3105 vty_out(vty
, "\t\t%p %.3fs ago %u of %u items active\n",
3107 monotime_since(&pkt
->received
, NULL
) *
3109 pkt
->n_active
, pkt
->n_sg
);
3111 for (size_t i
= 0; i
< pkt
->n_sg
; i
++) {
3112 item
= pkt
->items
+ i
;
3114 vty_out(vty
, "\t\t[%zu]", i
);
3117 vty_out(vty
, " inactive\n");
3121 vty_out(vty
, " %s%s %pSG nE=%u\n",
3122 item
->is_src
? "S" : "",
3123 item
->is_excl
? "E" : "",
3124 &item
->sg
->sgaddr
, item
->n_exclude
);
3132 DEFPY(gm_debug_iface_cfg
,
3133 gm_debug_iface_cfg_cmd
,
3136 "query-max-response-time (1-8387584)"
3140 "Multicast Listener Discovery\n"
3142 "maxresp\nmaxresp\n")
3144 VTY_DECLVAR_CONTEXT(interface
, ifp
);
3145 struct pim_interface
*pim_ifp
;
3146 struct gm_if
*gm_ifp
;
3147 bool changed
= false;
3149 pim_ifp
= ifp
->info
;
3151 vty_out(vty
, "%% no PIM state for interface %pSQq\n",
3155 gm_ifp
= pim_ifp
->mld
;
3157 vty_out(vty
, "%% no MLD state for interface %pSQq\n",
3162 if (robustness_str
&& gm_ifp
->cur_qrv
!= robustness
) {
3163 gm_ifp
->cur_qrv
= robustness
;
3166 if (query_max_response_time_str
&&
3167 gm_ifp
->cur_max_resp
!= (unsigned int)query_max_response_time
) {
3168 gm_ifp
->cur_max_resp
= query_max_response_time
;
3173 vty_out(vty
, "%% MLD querier config changed, bumping\n");
3174 gm_bump_querier(gm_ifp
);
3179 void gm_cli_init(void);
3181 void gm_cli_init(void)
3183 install_element(VIEW_NODE
, &gm_show_interface_cmd
);
3184 install_element(VIEW_NODE
, &gm_show_interface_stats_cmd
);
3185 install_element(VIEW_NODE
, &gm_show_interface_joins_cmd
);
3186 install_element(VIEW_NODE
, &gm_show_mld_groups_cmd
);
3188 install_element(VIEW_NODE
, &gm_debug_show_cmd
);
3189 install_element(INTERFACE_NODE
, &gm_debug_iface_cfg_cmd
);