1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
16 #include <netinet/ip6.h>
18 #include "lib/memory.h"
19 #include "lib/jhash.h"
20 #include "lib/prefix.h"
21 #include "lib/checksum.h"
22 #include "lib/frrevent.h"
23 #include "termtable.h"
25 #include "pimd/pim6_mld.h"
26 #include "pimd/pim6_mld_protocol.h"
27 #include "pimd/pim_memory.h"
28 #include "pimd/pim_instance.h"
29 #include "pimd/pim_iface.h"
30 #include "pimd/pim6_cmd.h"
31 #include "pimd/pim_cmd_common.h"
32 #include "pimd/pim_util.h"
33 #include "pimd/pim_tib.h"
34 #include "pimd/pimd.h"
36 #ifndef IPV6_MULTICAST_ALL
37 #define IPV6_MULTICAST_ALL 29
40 DEFINE_MTYPE_STATIC(PIMD
, GM_IFACE
, "MLD interface");
41 DEFINE_MTYPE_STATIC(PIMD
, GM_PACKET
, "MLD packet");
42 DEFINE_MTYPE_STATIC(PIMD
, GM_SUBSCRIBER
, "MLD subscriber");
43 DEFINE_MTYPE_STATIC(PIMD
, GM_STATE
, "MLD subscription state");
44 DEFINE_MTYPE_STATIC(PIMD
, GM_SG
, "MLD (S,G)");
45 DEFINE_MTYPE_STATIC(PIMD
, GM_GRP_PENDING
, "MLD group query state");
46 DEFINE_MTYPE_STATIC(PIMD
, GM_GSQ_PENDING
, "MLD group/source query aggregate");
48 static void gm_t_query(struct event
*t
);
49 static void gm_trigger_specific(struct gm_sg
*sg
);
50 static void gm_sg_timer_start(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
51 struct timeval expire_wait
);
53 /* shorthand for log messages */
54 #define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56 #define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
59 #define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
63 /* clang-format off */
65 static const pim_addr gm_all_hosts
= {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
71 static const pim_addr gm_all_routers
= {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
77 /* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
80 static const pim_addr gm_dummy_untracked
= {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
88 static const pim_addr gm_all_hosts
= { .s_addr
= htonl(0xe0000001), };
90 static const pim_addr gm_all_routers
= { .s_addr
= htonl(0xe0000016), };
91 static const pim_addr gm_dummy_untracked
= { .s_addr
= 0xffffffff, };
95 #define IPV6_MULTICAST_SCOPE_LINK 2
97 static inline uint8_t in6_multicast_scope(const pim_addr
*addr
)
99 return addr
->s6_addr
[1] & 0xf;
102 bool in6_multicast_nofwd(const pim_addr
*addr
)
104 return in6_multicast_scope(addr
) <= IPV6_MULTICAST_SCOPE_LINK
;
108 * (S,G) -> subscriber,(S,G)
111 static int gm_packet_sg_cmp(const struct gm_packet_sg
*a
,
112 const struct gm_packet_sg
*b
)
114 const struct gm_packet_state
*s_a
, *s_b
;
116 s_a
= gm_packet_sg2state(a
);
117 s_b
= gm_packet_sg2state(b
);
118 return IPV6_ADDR_CMP(&s_a
->subscriber
->addr
, &s_b
->subscriber
->addr
);
121 DECLARE_RBTREE_UNIQ(gm_packet_sg_subs
, struct gm_packet_sg
, subs_itm
,
124 static struct gm_packet_sg
*gm_packet_sg_find(struct gm_sg
*sg
,
125 enum gm_sub_sense sense
,
126 struct gm_subscriber
*sub
)
129 struct gm_packet_state hdr
;
130 struct gm_packet_sg item
;
132 /* clang-format off */
139 /* clang-format on */
142 return gm_packet_sg_subs_find(&sg
->subs
[sense
], &ref
.item
);
146 * interface -> (*,G),pending
149 static int gm_grp_pending_cmp(const struct gm_grp_pending
*a
,
150 const struct gm_grp_pending
*b
)
152 return IPV6_ADDR_CMP(&a
->grp
, &b
->grp
);
155 DECLARE_RBTREE_UNIQ(gm_grp_pends
, struct gm_grp_pending
, itm
,
159 * interface -> ([S1,S2,...],G),pending
162 static int gm_gsq_pending_cmp(const struct gm_gsq_pending
*a
,
163 const struct gm_gsq_pending
*b
)
165 if (a
->s_bit
!= b
->s_bit
)
166 return numcmp(a
->s_bit
, b
->s_bit
);
168 return IPV6_ADDR_CMP(&a
->grp
, &b
->grp
);
171 static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending
*a
)
173 uint32_t seed
= a
->s_bit
? 0x68f0eb5e : 0x156b7f19;
175 return jhash(&a
->grp
, sizeof(a
->grp
), seed
);
178 DECLARE_HASH(gm_gsq_pends
, struct gm_gsq_pending
, itm
, gm_gsq_pending_cmp
,
179 gm_gsq_pending_hash
);
185 int gm_sg_cmp(const struct gm_sg
*a
, const struct gm_sg
*b
)
187 return pim_sgaddr_cmp(a
->sgaddr
, b
->sgaddr
);
190 static struct gm_sg
*gm_sg_find(struct gm_if
*gm_ifp
, pim_addr grp
,
193 struct gm_sg ref
= {};
195 ref
.sgaddr
.grp
= grp
;
196 ref
.sgaddr
.src
= src
;
197 return gm_sgs_find(gm_ifp
->sgs
, &ref
);
200 static struct gm_sg
*gm_sg_make(struct gm_if
*gm_ifp
, pim_addr grp
,
203 struct gm_sg
*ret
, *prev
;
205 ret
= XCALLOC(MTYPE_GM_SG
, sizeof(*ret
));
206 ret
->sgaddr
.grp
= grp
;
207 ret
->sgaddr
.src
= src
;
209 prev
= gm_sgs_add(gm_ifp
->sgs
, ret
);
212 XFREE(MTYPE_GM_SG
, ret
);
215 monotime(&ret
->created
);
216 gm_packet_sg_subs_init(ret
->subs_positive
);
217 gm_packet_sg_subs_init(ret
->subs_negative
);
223 * interface -> packets, sorted by expiry (because add_tail insert order)
226 DECLARE_DLIST(gm_packet_expires
, struct gm_packet_state
, exp_itm
);
229 * subscriber -> packets
232 DECLARE_DLIST(gm_packets
, struct gm_packet_state
, pkt_itm
);
235 * interface -> subscriber
238 static int gm_subscriber_cmp(const struct gm_subscriber
*a
,
239 const struct gm_subscriber
*b
)
241 return IPV6_ADDR_CMP(&a
->addr
, &b
->addr
);
244 static uint32_t gm_subscriber_hash(const struct gm_subscriber
*a
)
246 return jhash(&a
->addr
, sizeof(a
->addr
), 0xd0e94ad4);
249 DECLARE_HASH(gm_subscribers
, struct gm_subscriber
, itm
, gm_subscriber_cmp
,
252 static struct gm_subscriber
*gm_subscriber_findref(struct gm_if
*gm_ifp
,
255 struct gm_subscriber ref
= {}, *ret
;
258 ret
= gm_subscribers_find(gm_ifp
->subscribers
, &ref
);
264 static struct gm_subscriber
*gm_subscriber_get(struct gm_if
*gm_ifp
,
267 struct gm_subscriber ref
= {}, *ret
;
270 ret
= gm_subscribers_find(gm_ifp
->subscribers
, &ref
);
273 ret
= XCALLOC(MTYPE_GM_SUBSCRIBER
, sizeof(*ret
));
277 monotime(&ret
->created
);
278 gm_packets_init(ret
->packets
);
280 gm_subscribers_add(gm_ifp
->subscribers
, ret
);
285 static void gm_subscriber_drop(struct gm_subscriber
**subp
)
287 struct gm_subscriber
*sub
= *subp
;
288 struct gm_if
*gm_ifp
;
300 gm_subscribers_del(gm_ifp
->subscribers
, sub
);
301 XFREE(MTYPE_GM_SUBSCRIBER
, sub
);
304 /****************************************************************************/
306 /* bundle query timer values for combined v1/v2 handling */
307 struct gm_query_timers
{
309 unsigned int max_resp_ms
;
310 unsigned int qqic_ms
;
313 struct timeval expire_wait
;
316 static void gm_expiry_calc(struct gm_query_timers
*timers
)
318 unsigned int expire
=
319 (timers
->qrv
- 1) * timers
->qqic_ms
+ timers
->max_resp_ms
;
320 ldiv_t exp_div
= ldiv(expire
, 1000);
322 timers
->expire_wait
.tv_sec
= exp_div
.quot
;
323 timers
->expire_wait
.tv_usec
= exp_div
.rem
* 1000;
324 timeradd(&timers
->expire_wait
, &timers
->fuzz
, &timers
->expire_wait
);
327 static void gm_sg_free(struct gm_sg
*sg
)
329 /* t_sg_expiry is handled before this is reached */
330 EVENT_OFF(sg
->t_sg_query
);
331 gm_packet_sg_subs_fini(sg
->subs_negative
);
332 gm_packet_sg_subs_fini(sg
->subs_positive
);
333 XFREE(MTYPE_GM_SG
, sg
);
336 /* clang-format off */
337 static const char *const gm_states
[] = {
338 [GM_SG_NOINFO
] = "NOINFO",
339 [GM_SG_JOIN
] = "JOIN",
340 [GM_SG_JOIN_EXPIRING
] = "JOIN_EXPIRING",
341 [GM_SG_PRUNE
] = "PRUNE",
342 [GM_SG_NOPRUNE
] = "NOPRUNE",
343 [GM_SG_NOPRUNE_EXPIRING
] = "NOPRUNE_EXPIRING",
345 /* clang-format on */
347 /* TODO: S,G entries in EXCLUDE (i.e. prune) unsupported" */
349 /* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
350 * joined. Whether we actually want/need to support this is a separate
351 * question - it is almost never used. In fact this is exactly what RFC5790
352 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
355 static void gm_sg_update(struct gm_sg
*sg
, bool has_expired
)
357 struct gm_if
*gm_ifp
= sg
->iface
;
358 enum gm_sg_state prev
, desired
;
360 struct gm_sg
*grp
= NULL
;
362 if (!pim_addr_is_any(sg
->sgaddr
.src
))
363 grp
= gm_sg_find(gm_ifp
, sg
->sgaddr
.grp
, PIMADDR_ANY
);
365 assert(sg
->state
!= GM_SG_PRUNE
);
367 if (gm_packet_sg_subs_count(sg
->subs_positive
)) {
368 desired
= GM_SG_JOIN
;
369 assert(!sg
->t_sg_expire
);
370 } else if ((sg
->state
== GM_SG_JOIN
||
371 sg
->state
== GM_SG_JOIN_EXPIRING
) &&
373 desired
= GM_SG_JOIN_EXPIRING
;
374 else if (!grp
|| !gm_packet_sg_subs_count(grp
->subs_positive
))
375 desired
= GM_SG_NOINFO
;
376 else if (gm_packet_sg_subs_count(grp
->subs_positive
) ==
377 gm_packet_sg_subs_count(sg
->subs_negative
)) {
378 if ((sg
->state
== GM_SG_NOPRUNE
||
379 sg
->state
== GM_SG_NOPRUNE_EXPIRING
) &&
381 desired
= GM_SG_NOPRUNE_EXPIRING
;
383 desired
= GM_SG_PRUNE
;
384 } else if (gm_packet_sg_subs_count(sg
->subs_negative
))
385 desired
= GM_SG_NOPRUNE
;
387 desired
= GM_SG_NOINFO
;
389 if (desired
!= sg
->state
&& !gm_ifp
->stopping
) {
390 if (PIM_DEBUG_GM_EVENTS
)
391 zlog_debug(log_sg(sg
, "%s => %s"), gm_states
[sg
->state
],
394 if (desired
== GM_SG_JOIN_EXPIRING
||
395 desired
== GM_SG_NOPRUNE_EXPIRING
) {
396 struct gm_query_timers timers
;
398 timers
.qrv
= gm_ifp
->cur_qrv
;
399 timers
.max_resp_ms
= gm_ifp
->cur_max_resp
;
400 timers
.qqic_ms
= gm_ifp
->cur_query_intv_trig
;
401 timers
.fuzz
= gm_ifp
->cfg_timing_fuzz
;
403 gm_expiry_calc(&timers
);
404 gm_sg_timer_start(gm_ifp
, sg
, timers
.expire_wait
);
406 EVENT_OFF(sg
->t_sg_query
);
407 sg
->n_query
= gm_ifp
->cur_lmqc
;
408 sg
->query_sbit
= false;
409 gm_trigger_specific(sg
);
415 if (in6_multicast_nofwd(&sg
->sgaddr
.grp
) || gm_ifp
->stopping
)
418 new_join
= gm_sg_state_want_join(desired
);
420 if (new_join
&& !sg
->tib_joined
) {
421 /* this will retry if join previously failed */
422 sg
->tib_joined
= tib_sg_gm_join(gm_ifp
->pim
, sg
->sgaddr
,
423 gm_ifp
->ifp
, &sg
->oil
);
426 "MLD join for %pSG%%%s not propagated into TIB",
427 &sg
->sgaddr
, gm_ifp
->ifp
->name
);
429 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg
->sgaddr
,
432 } else if (sg
->tib_joined
&& !new_join
) {
433 tib_sg_gm_prune(gm_ifp
->pim
, sg
->sgaddr
, gm_ifp
->ifp
, &sg
->oil
);
436 sg
->tib_joined
= false;
439 if (desired
== GM_SG_NOINFO
) {
440 /* multiple paths can lead to the last state going away;
441 * t_sg_expire can still be running if we're arriving from
445 EVENT_OFF(sg
->t_sg_expire
);
447 assertf((!sg
->t_sg_expire
&&
448 !gm_packet_sg_subs_count(sg
->subs_positive
) &&
449 !gm_packet_sg_subs_count(sg
->subs_negative
)),
450 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
451 &sg
->sgaddr
, gm_ifp
->ifp
->name
, has_expired
,
452 sg
->t_sg_expire
, gm_states
[prev
], gm_states
[desired
],
453 gm_packet_sg_subs_count(sg
->subs_positive
),
454 gm_packet_sg_subs_count(sg
->subs_negative
), grp
);
456 if (PIM_DEBUG_GM_TRACE
)
457 zlog_debug(log_sg(sg
, "dropping"));
459 gm_sgs_del(gm_ifp
->sgs
, sg
);
464 /****************************************************************************/
466 /* the following bunch of functions deals with transferring state from
467 * received packets into gm_packet_state. As a reminder, the querier is
468 * structured to keep all items received in one packet together, since they
469 * will share expiry timers and thus allows efficient handling.
472 static void gm_packet_free(struct gm_packet_state
*pkt
)
474 gm_packet_expires_del(pkt
->iface
->expires
, pkt
);
475 gm_packets_del(pkt
->subscriber
->packets
, pkt
);
476 gm_subscriber_drop(&pkt
->subscriber
);
477 XFREE(MTYPE_GM_STATE
, pkt
);
480 static struct gm_packet_sg
*gm_packet_sg_setup(struct gm_packet_state
*pkt
,
481 struct gm_sg
*sg
, bool is_excl
,
484 struct gm_packet_sg
*item
;
486 assert(pkt
->n_active
< pkt
->n_sg
);
488 item
= &pkt
->items
[pkt
->n_active
];
490 item
->is_excl
= is_excl
;
491 item
->is_src
= is_src
;
492 item
->offset
= pkt
->n_active
;
498 static bool gm_packet_sg_drop(struct gm_packet_sg
*item
)
500 struct gm_packet_state
*pkt
;
505 pkt
= gm_packet_sg2state(item
);
506 if (item
->sg
->most_recent
== item
)
507 item
->sg
->most_recent
= NULL
;
509 for (i
= 0; i
< item
->n_exclude
; i
++) {
510 struct gm_packet_sg
*excl_item
;
512 excl_item
= item
+ 1 + i
;
516 gm_packet_sg_subs_del(excl_item
->sg
->subs_negative
, excl_item
);
517 excl_item
->sg
= NULL
;
520 assert(pkt
->n_active
> 0);
523 if (item
->is_excl
&& item
->is_src
)
524 gm_packet_sg_subs_del(item
->sg
->subs_negative
, item
);
526 gm_packet_sg_subs_del(item
->sg
->subs_positive
, item
);
530 if (!pkt
->n_active
) {
537 static void gm_packet_drop(struct gm_packet_state
*pkt
, bool trace
)
539 for (size_t i
= 0; i
< pkt
->n_sg
; i
++) {
540 struct gm_sg
*sg
= pkt
->items
[i
].sg
;
546 if (trace
&& PIM_DEBUG_GM_TRACE
)
547 zlog_debug(log_sg(sg
, "general-dropping from %pPA"),
548 &pkt
->subscriber
->addr
);
549 deleted
= gm_packet_sg_drop(&pkt
->items
[i
]);
551 gm_sg_update(sg
, true);
557 static void gm_packet_sg_remove_sources(struct gm_if
*gm_ifp
,
558 struct gm_subscriber
*subscriber
,
559 pim_addr grp
, pim_addr
*srcs
,
560 size_t n_src
, enum gm_sub_sense sense
)
563 struct gm_packet_sg
*old_src
;
566 for (i
= 0; i
< n_src
; i
++) {
567 sg
= gm_sg_find(gm_ifp
, grp
, srcs
[i
]);
571 old_src
= gm_packet_sg_find(sg
, sense
, subscriber
);
575 gm_packet_sg_drop(old_src
);
576 gm_sg_update(sg
, false);
580 static void gm_sg_expiry_cancel(struct gm_sg
*sg
)
582 if (sg
->t_sg_expire
&& PIM_DEBUG_GM_TRACE
)
583 zlog_debug(log_sg(sg
, "alive, cancelling expiry timer"));
584 EVENT_OFF(sg
->t_sg_expire
);
585 sg
->query_sbit
= true;
588 /* first pass: process all changes resulting in removal of state:
589 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
590 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
591 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
592 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
593 * note *replacing* state is NOT considered *removing* state here
595 * everything else is thrown into pkt for creation of state in pass 2
597 static void gm_handle_v2_pass1(struct gm_packet_state
*pkt
,
598 struct mld_v2_rec_hdr
*rechdr
, size_t n_src
)
600 /* NB: pkt->subscriber can be NULL here if the subscriber was not
603 struct gm_subscriber
*subscriber
= pkt
->subscriber
;
605 struct gm_packet_sg
*old_grp
= NULL
;
606 struct gm_packet_sg
*item
;
608 bool is_excl
= false;
610 grp
= gm_sg_find(pkt
->iface
, rechdr
->grp
, PIMADDR_ANY
);
611 if (grp
&& subscriber
)
612 old_grp
= gm_packet_sg_find(grp
, GM_SUB_POS
, subscriber
);
614 assert(old_grp
== NULL
|| old_grp
->is_excl
);
616 switch (rechdr
->type
) {
617 case MLD_RECTYPE_IS_EXCLUDE
:
618 case MLD_RECTYPE_CHANGE_TO_EXCLUDE
:
619 /* this always replaces or creates state */
622 grp
= gm_sg_make(pkt
->iface
, rechdr
->grp
, PIMADDR_ANY
);
624 item
= gm_packet_sg_setup(pkt
, grp
, is_excl
, false);
625 item
->n_exclude
= n_src
;
627 /* [EXCL_INCL_SG_NOTE] referenced below
629 * in theory, we should drop any S,G that the host may have
630 * previously added in INCLUDE mode. In practice, this is both
631 * incredibly rare and entirely irrelevant. It only makes any
632 * difference if an S,G that the host previously had on the
633 * INCLUDE list is now on the blocked list for EXCLUDE, which
634 * we can cover in processing the S,G list in pass2_excl().
636 * Other S,G from the host are simply left to expire
637 * "naturally" through general expiry.
641 case MLD_RECTYPE_IS_INCLUDE
:
642 case MLD_RECTYPE_CHANGE_TO_INCLUDE
:
644 /* INCLUDE has no *,G state, so old_grp here refers to
645 * previous EXCLUDE => delete it
647 gm_packet_sg_drop(old_grp
);
648 gm_sg_update(grp
, false);
649 /* TODO "need S,G PRUNE => NO_INFO transition here" */
653 case MLD_RECTYPE_ALLOW_NEW_SOURCES
:
655 /* remove S,Gs from EXCLUDE, and then we're done */
656 gm_packet_sg_remove_sources(pkt
->iface
, subscriber
,
657 rechdr
->grp
, rechdr
->srcs
,
661 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
662 * idential to IS_INCLUDE (because the list of sources in
663 * IS_INCLUDE is not exhaustive)
667 case MLD_RECTYPE_BLOCK_OLD_SOURCES
:
669 /* this is intentionally not implemented because it
670 * would be complicated as hell. we only take the list
671 * of blocked sources from full group state records
677 gm_packet_sg_remove_sources(pkt
->iface
, subscriber
,
678 rechdr
->grp
, rechdr
->srcs
,
683 for (j
= 0; j
< n_src
; j
++) {
686 sg
= gm_sg_find(pkt
->iface
, rechdr
->grp
, rechdr
->srcs
[j
]);
688 sg
= gm_sg_make(pkt
->iface
, rechdr
->grp
,
691 gm_packet_sg_setup(pkt
, sg
, is_excl
, true);
695 /* second pass: creating/updating/refreshing state. All the items from the
696 * received packet have already been thrown into gm_packet_state.
699 static void gm_handle_v2_pass2_incl(struct gm_packet_state
*pkt
, size_t i
)
701 struct gm_packet_sg
*item
= &pkt
->items
[i
];
702 struct gm_packet_sg
*old
= NULL
;
703 struct gm_sg
*sg
= item
->sg
;
705 /* EXCLUDE state was already dropped in pass1 */
706 assert(!gm_packet_sg_find(sg
, GM_SUB_NEG
, pkt
->subscriber
));
708 old
= gm_packet_sg_find(sg
, GM_SUB_POS
, pkt
->subscriber
);
710 gm_packet_sg_drop(old
);
713 gm_packet_sg_subs_add(sg
->subs_positive
, item
);
715 sg
->most_recent
= item
;
716 gm_sg_expiry_cancel(sg
);
717 gm_sg_update(sg
, false);
720 static void gm_handle_v2_pass2_excl(struct gm_packet_state
*pkt
, size_t offs
)
722 struct gm_packet_sg
*item
= &pkt
->items
[offs
];
723 struct gm_packet_sg
*old_grp
, *item_dup
;
724 struct gm_sg
*sg_grp
= item
->sg
;
727 old_grp
= gm_packet_sg_find(sg_grp
, GM_SUB_POS
, pkt
->subscriber
);
729 for (i
= 0; i
< item
->n_exclude
; i
++) {
730 struct gm_packet_sg
*item_src
, *old_src
;
732 item_src
= &pkt
->items
[offs
+ 1 + i
];
733 old_src
= gm_packet_sg_find(item_src
->sg
, GM_SUB_NEG
,
736 gm_packet_sg_drop(old_src
);
738 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
739 * items left over if the host previously had INCLUDE
740 * mode going. Remove them here if we find any.
742 old_src
= gm_packet_sg_find(item_src
->sg
, GM_SUB_POS
,
745 gm_packet_sg_drop(old_src
);
748 /* the previous loop has removed the S,G entries which are
749 * still excluded after this update. So anything left on the
750 * old item was previously excluded but is now included
751 * => need to trigger update on S,G
753 for (i
= 0; i
< old_grp
->n_exclude
; i
++) {
754 struct gm_packet_sg
*old_src
;
755 struct gm_sg
*old_sg_src
;
757 old_src
= old_grp
+ 1 + i
;
758 old_sg_src
= old_src
->sg
;
762 gm_packet_sg_drop(old_src
);
763 gm_sg_update(old_sg_src
, false);
766 gm_packet_sg_drop(old_grp
);
769 item_dup
= gm_packet_sg_subs_add(sg_grp
->subs_positive
, item
);
773 sg_grp
->most_recent
= item
;
774 gm_sg_expiry_cancel(sg_grp
);
776 for (i
= 0; i
< item
->n_exclude
; i
++) {
777 struct gm_packet_sg
*item_src
;
779 item_src
= &pkt
->items
[offs
+ 1 + i
];
780 item_dup
= gm_packet_sg_subs_add(item_src
->sg
->subs_negative
,
787 gm_sg_update(item_src
->sg
, false);
791 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
792 * to get lower PIM churn/flapping
794 gm_sg_update(sg_grp
, false);
797 /* TODO: QRV/QQIC are not copied from queries to local state" */
799 /* on receiving a query, we need to update our robustness/query interval to
800 * match, so we correctly process group/source specific queries after last
804 static void gm_handle_v2_report(struct gm_if
*gm_ifp
,
805 const struct sockaddr_in6
*pkt_src
, char *data
,
808 struct mld_v2_report_hdr
*hdr
;
809 size_t i
, n_records
, max_entries
;
810 struct gm_packet_state
*pkt
;
812 if (len
< sizeof(*hdr
)) {
813 if (PIM_DEBUG_GM_PACKETS
)
814 zlog_debug(log_pkt_src(
815 "malformed MLDv2 report (truncated header)"));
816 gm_ifp
->stats
.rx_drop_malformed
++;
820 hdr
= (struct mld_v2_report_hdr
*)data
;
821 data
+= sizeof(*hdr
);
824 n_records
= ntohs(hdr
->n_records
);
825 if (n_records
> len
/ sizeof(struct mld_v2_rec_hdr
)) {
826 /* note this is only an upper bound, records with source lists
827 * are larger. This is mostly here to make coverity happy.
829 zlog_warn(log_pkt_src(
830 "malformed MLDv2 report (infeasible record count)"));
831 gm_ifp
->stats
.rx_drop_malformed
++;
835 /* errors after this may at least partially process the packet */
836 gm_ifp
->stats
.rx_new_report
++;
838 /* can't have more *,G and S,G items than there is space for ipv6
839 * addresses, so just use this to allocate temporary buffer
841 max_entries
= len
/ sizeof(pim_addr
);
842 pkt
= XCALLOC(MTYPE_GM_STATE
,
843 offsetof(struct gm_packet_state
, items
[max_entries
]));
844 pkt
->n_sg
= max_entries
;
846 pkt
->subscriber
= gm_subscriber_findref(gm_ifp
, pkt_src
->sin6_addr
);
848 /* validate & remove state in v2_pass1() */
849 for (i
= 0; i
< n_records
; i
++) {
850 struct mld_v2_rec_hdr
*rechdr
;
851 size_t n_src
, record_size
;
853 if (len
< sizeof(*rechdr
)) {
854 zlog_warn(log_pkt_src(
855 "malformed MLDv2 report (truncated record header)"));
856 gm_ifp
->stats
.rx_trunc_report
++;
860 rechdr
= (struct mld_v2_rec_hdr
*)data
;
861 data
+= sizeof(*rechdr
);
862 len
-= sizeof(*rechdr
);
864 n_src
= ntohs(rechdr
->n_src
);
865 record_size
= n_src
* sizeof(pim_addr
) + rechdr
->aux_len
* 4;
867 if (len
< record_size
) {
868 zlog_warn(log_pkt_src(
869 "malformed MLDv2 report (truncated source list)"));
870 gm_ifp
->stats
.rx_trunc_report
++;
873 if (!IN6_IS_ADDR_MULTICAST(&rechdr
->grp
)) {
876 "malformed MLDv2 report (invalid group %pI6)"),
878 gm_ifp
->stats
.rx_trunc_report
++;
885 gm_handle_v2_pass1(pkt
, rechdr
, n_src
);
888 if (!pkt
->n_active
) {
889 gm_subscriber_drop(&pkt
->subscriber
);
890 XFREE(MTYPE_GM_STATE
, pkt
);
894 pkt
= XREALLOC(MTYPE_GM_STATE
, pkt
,
895 offsetof(struct gm_packet_state
, items
[pkt
->n_active
]));
896 pkt
->n_sg
= pkt
->n_active
;
899 monotime(&pkt
->received
);
900 if (!pkt
->subscriber
)
901 pkt
->subscriber
= gm_subscriber_get(gm_ifp
, pkt_src
->sin6_addr
);
902 gm_packets_add_tail(pkt
->subscriber
->packets
, pkt
);
903 gm_packet_expires_add_tail(gm_ifp
->expires
, pkt
);
905 for (i
= 0; i
< pkt
->n_sg
; i
++)
906 if (!pkt
->items
[i
].is_excl
)
907 gm_handle_v2_pass2_incl(pkt
, i
);
909 gm_handle_v2_pass2_excl(pkt
, i
);
910 i
+= pkt
->items
[i
].n_exclude
;
913 if (pkt
->n_active
== 0)
917 static void gm_handle_v1_report(struct gm_if
*gm_ifp
,
918 const struct sockaddr_in6
*pkt_src
, char *data
,
921 struct mld_v1_pkt
*hdr
;
922 struct gm_packet_state
*pkt
;
924 struct gm_packet_sg
*item
;
927 if (len
< sizeof(*hdr
)) {
928 if (PIM_DEBUG_GM_PACKETS
)
929 zlog_debug(log_pkt_src(
930 "malformed MLDv1 report (truncated)"));
931 gm_ifp
->stats
.rx_drop_malformed
++;
935 gm_ifp
->stats
.rx_old_report
++;
937 hdr
= (struct mld_v1_pkt
*)data
;
940 pkt
= XCALLOC(MTYPE_GM_STATE
,
941 offsetof(struct gm_packet_state
, items
[max_entries
]));
942 pkt
->n_sg
= max_entries
;
944 pkt
->subscriber
= gm_subscriber_findref(gm_ifp
, gm_dummy_untracked
);
946 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
948 grp
= gm_sg_find(pkt
->iface
, hdr
->grp
, PIMADDR_ANY
);
950 grp
= gm_sg_make(pkt
->iface
, hdr
->grp
, PIMADDR_ANY
);
952 item
= gm_packet_sg_setup(pkt
, grp
, true, false);
955 /* TODO "set v1-seen timer on grp here" */
959 /* pass2 will count n_active back up to 1. Also since a v1 report
960 * has exactly 1 group, we can skip the realloc() that v2 needs here.
962 assert(pkt
->n_active
== 1);
963 pkt
->n_sg
= pkt
->n_active
;
966 monotime(&pkt
->received
);
967 if (!pkt
->subscriber
)
968 pkt
->subscriber
= gm_subscriber_get(gm_ifp
, gm_dummy_untracked
);
969 gm_packets_add_tail(pkt
->subscriber
->packets
, pkt
);
970 gm_packet_expires_add_tail(gm_ifp
->expires
, pkt
);
972 /* pass2 covers installing state & removing old state; all the v1
973 * compat is handled at this point.
975 * Note that "old state" may be v2; subscribers will switch from v2
976 * reports to v1 reports when the querier changes from v2 to v1. So,
977 * limiting this to v1 would be wrong.
979 gm_handle_v2_pass2_excl(pkt
, 0);
981 if (pkt
->n_active
== 0)
985 static void gm_handle_v1_leave(struct gm_if
*gm_ifp
,
986 const struct sockaddr_in6
*pkt_src
, char *data
,
989 struct mld_v1_pkt
*hdr
;
990 struct gm_subscriber
*subscriber
;
992 struct gm_packet_sg
*old_grp
;
994 if (len
< sizeof(*hdr
)) {
995 if (PIM_DEBUG_GM_PACKETS
)
996 zlog_debug(log_pkt_src(
997 "malformed MLDv1 leave (truncated)"));
998 gm_ifp
->stats
.rx_drop_malformed
++;
1002 gm_ifp
->stats
.rx_old_leave
++;
1004 hdr
= (struct mld_v1_pkt
*)data
;
1006 subscriber
= gm_subscriber_findref(gm_ifp
, gm_dummy_untracked
);
1010 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1012 grp
= gm_sg_find(gm_ifp
, hdr
->grp
, PIMADDR_ANY
);
1014 old_grp
= gm_packet_sg_find(grp
, GM_SUB_POS
, subscriber
);
1016 gm_packet_sg_drop(old_grp
);
1017 gm_sg_update(grp
, false);
1019 /* TODO "need S,G PRUNE => NO_INFO transition here" */
1026 /* nothing more to do here, pass2 is no-op for leaves */
1027 gm_subscriber_drop(&subscriber
);
1030 /* for each general query received (or sent), a timer is started to expire
1031 * _everything_ at the appropriate time (including robustness multiplier).
1033 * So when this timer hits, all packets - with all of their items - that were
1034 * received *before* the query are aged out, and state updated accordingly.
1035 * Note that when we receive a refresh/update, the previous/old packet is
1036 * already dropped and replaced with a new one, so in normal steady-state
1037 * operation, this timer won't be doing anything.
1039 * Additionally, if a subscriber actively leaves a group, that goes through
1040 * its own path too and won't hit this. This is really only triggered when a
1041 * host straight up disappears.
1043 static void gm_t_expire(struct event
*t
)
1045 struct gm_if
*gm_ifp
= EVENT_ARG(t
);
1046 struct gm_packet_state
*pkt
;
1048 zlog_info(log_ifp("general expiry timer"));
1050 while (gm_ifp
->n_pending
) {
1051 struct gm_general_pending
*pend
= gm_ifp
->pending
;
1052 struct timeval remain
;
1055 remain_ms
= monotime_until(&pend
->expiry
, &remain
);
1056 if (remain_ms
> 0) {
1057 if (PIM_DEBUG_GM_EVENTS
)
1059 log_ifp("next general expiry in %" PRId64
"ms"),
1062 event_add_timer_tv(router
->master
, gm_t_expire
, gm_ifp
,
1063 &remain
, &gm_ifp
->t_expire
);
1067 while ((pkt
= gm_packet_expires_first(gm_ifp
->expires
))) {
1068 if (timercmp(&pkt
->received
, &pend
->query
, >=))
1071 if (PIM_DEBUG_GM_PACKETS
)
1072 zlog_debug(log_ifp("expire packet %p"), pkt
);
1073 gm_packet_drop(pkt
, true);
1076 gm_ifp
->n_pending
--;
1077 memmove(gm_ifp
->pending
, gm_ifp
->pending
+ 1,
1078 gm_ifp
->n_pending
* sizeof(gm_ifp
->pending
[0]));
1081 if (PIM_DEBUG_GM_EVENTS
)
1082 zlog_debug(log_ifp("next general expiry waiting for query"));
1085 /* NB: the receive handlers will also run when sending packets, since we
1086 * receive our own packets back in.
1088 static void gm_handle_q_general(struct gm_if
*gm_ifp
,
1089 struct gm_query_timers
*timers
)
1091 struct timeval now
, expiry
;
1092 struct gm_general_pending
*pend
;
1095 timeradd(&now
, &timers
->expire_wait
, &expiry
);
1097 while (gm_ifp
->n_pending
) {
1098 pend
= &gm_ifp
->pending
[gm_ifp
->n_pending
- 1];
1100 if (timercmp(&pend
->expiry
, &expiry
, <))
1103 /* if we end up here, the last item in pending[] has an expiry
1104 * later than the expiry for this query. But our query time
1105 * (now) is later than that of the item (because, well, that's
1106 * how time works.) This makes this query meaningless since
1107 * it's "supersetted" within the preexisting query
1110 if (PIM_DEBUG_GM_TRACE_DETAIL
)
1112 log_ifp("zapping supersetted general timer %pTVMu"),
1115 gm_ifp
->n_pending
--;
1116 if (!gm_ifp
->n_pending
)
1117 EVENT_OFF(gm_ifp
->t_expire
);
1120 /* people might be messing with their configs or something */
1121 if (gm_ifp
->n_pending
== array_size(gm_ifp
->pending
))
1124 pend
= &gm_ifp
->pending
[gm_ifp
->n_pending
];
1126 pend
->expiry
= expiry
;
1128 if (!gm_ifp
->n_pending
++) {
1129 if (PIM_DEBUG_GM_TRACE
)
1131 log_ifp("starting general timer @ 0: %pTVMu"),
1133 event_add_timer_tv(router
->master
, gm_t_expire
, gm_ifp
,
1134 &timers
->expire_wait
, &gm_ifp
->t_expire
);
1135 } else if (PIM_DEBUG_GM_TRACE
)
1136 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1137 gm_ifp
->n_pending
, &pend
->expiry
);
1140 static void gm_t_sg_expire(struct event
*t
)
1142 struct gm_sg
*sg
= EVENT_ARG(t
);
1143 struct gm_if
*gm_ifp
= sg
->iface
;
1144 struct gm_packet_sg
*item
;
1146 assertf(sg
->state
== GM_SG_JOIN_EXPIRING
||
1147 sg
->state
== GM_SG_NOPRUNE_EXPIRING
,
1148 "%pSG%%%s %pTHD", &sg
->sgaddr
, gm_ifp
->ifp
->name
, t
);
1150 frr_each_safe (gm_packet_sg_subs
, sg
->subs_positive
, item
)
1151 /* this will also drop EXCLUDE mode S,G lists together with
1154 gm_packet_sg_drop(item
);
1156 /* subs_negative items are only timed out together with the *,G entry
1157 * since we won't get any reports for a group-and-source query
1159 gm_sg_update(sg
, true);
1162 static bool gm_sg_check_recent(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
1165 struct gm_packet_state
*pkt
;
1167 if (!sg
->most_recent
) {
1168 struct gm_packet_state
*best_pkt
= NULL
;
1169 struct gm_packet_sg
*item
;
1171 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
1172 pkt
= gm_packet_sg2state(item
);
1175 timercmp(&pkt
->received
, &best_pkt
->received
, >)) {
1177 sg
->most_recent
= item
;
1181 if (sg
->most_recent
) {
1182 struct timeval fuzz
;
1184 pkt
= gm_packet_sg2state(sg
->most_recent
);
1186 /* this shouldn't happen on plain old real ethernet segment,
1187 * but on something like a VXLAN or VPLS it is very possible
1188 * that we get a report before the query that triggered it.
1189 * (imagine a triangle scenario with 3 datacenters, it's very
1190 * possible A->B + B->C is faster than A->C due to odd routing)
1192 * This makes a little tolerance allowance to handle that case.
1194 timeradd(&pkt
->received
, &gm_ifp
->cfg_timing_fuzz
, &fuzz
);
1196 if (timercmp(&fuzz
, &ref
, >))
1202 static void gm_sg_timer_start(struct gm_if
*gm_ifp
, struct gm_sg
*sg
,
1203 struct timeval expire_wait
)
1209 if (sg
->state
== GM_SG_PRUNE
)
1213 if (gm_sg_check_recent(gm_ifp
, sg
, now
))
1216 if (PIM_DEBUG_GM_TRACE
)
1217 zlog_debug(log_sg(sg
, "expiring in %pTVI"), &expire_wait
);
1219 if (sg
->t_sg_expire
) {
1220 struct timeval remain
;
1222 remain
= event_timer_remain(sg
->t_sg_expire
);
1223 if (timercmp(&remain
, &expire_wait
, <=))
1226 EVENT_OFF(sg
->t_sg_expire
);
1229 event_add_timer_tv(router
->master
, gm_t_sg_expire
, sg
, &expire_wait
,
1233 static void gm_handle_q_groupsrc(struct gm_if
*gm_ifp
,
1234 struct gm_query_timers
*timers
, pim_addr grp
,
1235 const pim_addr
*srcs
, size_t n_src
)
1240 for (i
= 0; i
< n_src
; i
++) {
1241 sg
= gm_sg_find(gm_ifp
, grp
, srcs
[i
]);
1242 gm_sg_timer_start(gm_ifp
, sg
, timers
->expire_wait
);
1246 static void gm_t_grp_expire(struct event
*t
)
1248 /* if we're here, that means when we received the group-specific query
1249 * there was one or more active S,G for this group. For *,G the timer
1250 * in sg->t_sg_expire is running separately and gets cancelled when we
1251 * receive a report, so that work is left to gm_t_sg_expire and we
1252 * shouldn't worry about it here.
1254 struct gm_grp_pending
*pend
= EVENT_ARG(t
);
1255 struct gm_if
*gm_ifp
= pend
->iface
;
1256 struct gm_sg
*sg
, *sg_start
, sg_ref
= {};
1258 if (PIM_DEBUG_GM_EVENTS
)
1259 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend
->grp
);
1261 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1262 * could technically be gt to skip a possible *,G
1264 sg_ref
.sgaddr
.grp
= pend
->grp
;
1265 sg_ref
.sgaddr
.src
= PIMADDR_ANY
;
1266 sg_start
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
1268 frr_each_from (gm_sgs
, gm_ifp
->sgs
, sg
, sg_start
) {
1269 struct gm_packet_sg
*item
;
1271 if (pim_addr_cmp(sg
->sgaddr
.grp
, pend
->grp
))
1273 if (pim_addr_is_any(sg
->sgaddr
.src
))
1274 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1276 if (gm_sg_check_recent(gm_ifp
, sg
, pend
->query
))
1279 /* we may also have a group-source-specific query going on in
1280 * parallel. But if we received nothing for the *,G query,
1281 * the S,G query is kinda irrelevant.
1283 EVENT_OFF(sg
->t_sg_expire
);
1285 frr_each_safe (gm_packet_sg_subs
, sg
->subs_positive
, item
)
1286 /* this will also drop the EXCLUDE S,G lists */
1287 gm_packet_sg_drop(item
);
1289 gm_sg_update(sg
, true);
1292 gm_grp_pends_del(gm_ifp
->grp_pends
, pend
);
1293 XFREE(MTYPE_GM_GRP_PENDING
, pend
);
1296 static void gm_handle_q_group(struct gm_if
*gm_ifp
,
1297 struct gm_query_timers
*timers
, pim_addr grp
)
1299 struct gm_sg
*sg
, sg_ref
= {};
1300 struct gm_grp_pending
*pend
, pend_ref
= {};
1302 sg_ref
.sgaddr
.grp
= grp
;
1303 sg_ref
.sgaddr
.src
= PIMADDR_ANY
;
1304 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1305 sg
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
1307 if (!sg
|| pim_addr_cmp(sg
->sgaddr
.grp
, grp
))
1308 /* we have nothing at all for this group - don't waste RAM */
1311 if (pim_addr_is_any(sg
->sgaddr
.src
)) {
1312 /* actually found *,G entry here */
1313 if (PIM_DEBUG_GM_TRACE
)
1314 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1316 gm_sg_timer_start(gm_ifp
, sg
, timers
->expire_wait
);
1318 sg
= gm_sgs_next(gm_ifp
->sgs
, sg
);
1319 if (!sg
|| pim_addr_cmp(sg
->sgaddr
.grp
, grp
))
1320 /* no S,G for this group */
1325 pend
= gm_grp_pends_find(gm_ifp
->grp_pends
, &pend_ref
);
1328 struct timeval remain
;
1330 remain
= event_timer_remain(pend
->t_expire
);
1331 if (timercmp(&remain
, &timers
->expire_wait
, <=))
1334 EVENT_OFF(pend
->t_expire
);
1336 pend
= XCALLOC(MTYPE_GM_GRP_PENDING
, sizeof(*pend
));
1338 pend
->iface
= gm_ifp
;
1339 gm_grp_pends_add(gm_ifp
->grp_pends
, pend
);
1342 monotime(&pend
->query
);
1343 event_add_timer_tv(router
->master
, gm_t_grp_expire
, pend
,
1344 &timers
->expire_wait
, &pend
->t_expire
);
1346 if (PIM_DEBUG_GM_TRACE
)
1347 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp
,
1351 static void gm_bump_querier(struct gm_if
*gm_ifp
)
1353 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1355 EVENT_OFF(gm_ifp
->t_query
);
1357 if (pim_addr_is_any(pim_ifp
->ll_lowest
))
1359 if (!IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
))
1362 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
1364 event_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
1367 static void gm_t_other_querier(struct event
*t
)
1369 struct gm_if
*gm_ifp
= EVENT_ARG(t
);
1370 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1372 zlog_info(log_ifp("other querier timer expired"));
1374 gm_ifp
->querier
= pim_ifp
->ll_lowest
;
1375 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
1377 event_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
1380 static void gm_handle_query(struct gm_if
*gm_ifp
,
1381 const struct sockaddr_in6
*pkt_src
,
1382 pim_addr
*pkt_dst
, char *data
, size_t len
)
1384 struct mld_v2_query_hdr
*hdr
;
1385 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1386 struct gm_query_timers timers
;
1389 if (len
< sizeof(struct mld_v2_query_hdr
) &&
1390 len
!= sizeof(struct mld_v1_pkt
)) {
1391 zlog_warn(log_pkt_src("invalid query size"));
1392 gm_ifp
->stats
.rx_drop_malformed
++;
1396 hdr
= (struct mld_v2_query_hdr
*)data
;
1397 general_query
= pim_addr_is_any(hdr
->grp
);
1399 if (!general_query
&& !IN6_IS_ADDR_MULTICAST(&hdr
->grp
)) {
1400 zlog_warn(log_pkt_src(
1401 "malformed MLDv2 query (invalid group %pI6)"),
1403 gm_ifp
->stats
.rx_drop_malformed
++;
1407 if (len
>= sizeof(struct mld_v2_query_hdr
)) {
1408 size_t src_space
= ntohs(hdr
->n_src
) * sizeof(pim_addr
);
1410 if (len
< sizeof(struct mld_v2_query_hdr
) + src_space
) {
1411 zlog_warn(log_pkt_src(
1412 "malformed MLDv2 query (truncated source list)"));
1413 gm_ifp
->stats
.rx_drop_malformed
++;
1417 if (general_query
&& src_space
) {
1418 zlog_warn(log_pkt_src(
1419 "malformed MLDv2 query (general query with non-empty source list)"));
1420 gm_ifp
->stats
.rx_drop_malformed
++;
1425 /* accepting queries unicast to us (or addressed to a wrong group)
1426 * can mess up querier election as well as cause us to terminate
1427 * traffic (since after a unicast query no reports will be coming in)
1429 if (!IPV6_ADDR_SAME(pkt_dst
, &gm_all_hosts
)) {
1430 if (pim_addr_is_any(hdr
->grp
)) {
1433 "wrong destination %pPA for general query"),
1435 gm_ifp
->stats
.rx_drop_dstaddr
++;
1439 if (!IPV6_ADDR_SAME(&hdr
->grp
, pkt_dst
)) {
1440 gm_ifp
->stats
.rx_drop_dstaddr
++;
1443 "wrong destination %pPA for group specific query"),
1449 if (IPV6_ADDR_CMP(&pkt_src
->sin6_addr
, &gm_ifp
->querier
) < 0) {
1450 if (PIM_DEBUG_GM_EVENTS
)
1452 log_pkt_src("replacing elected querier %pPA"),
1455 gm_ifp
->querier
= pkt_src
->sin6_addr
;
1458 if (len
== sizeof(struct mld_v1_pkt
)) {
1459 timers
.qrv
= gm_ifp
->cur_qrv
;
1460 timers
.max_resp_ms
= hdr
->max_resp_code
;
1461 timers
.qqic_ms
= gm_ifp
->cur_query_intv
;
1463 timers
.qrv
= (hdr
->flags
& 0x7) ?: 8;
1464 timers
.max_resp_ms
= mld_max_resp_decode(hdr
->max_resp_code
);
1465 timers
.qqic_ms
= igmp_msg_decode8to16(hdr
->qqic
) * 1000;
1467 timers
.fuzz
= gm_ifp
->cfg_timing_fuzz
;
1469 gm_expiry_calc(&timers
);
1471 if (PIM_DEBUG_GM_TRACE_DETAIL
)
1473 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1474 timers
.qrv
, timers
.max_resp_ms
, timers
.qqic_ms
,
1475 &timers
.expire_wait
);
1477 if (IPV6_ADDR_CMP(&pkt_src
->sin6_addr
, &pim_ifp
->ll_lowest
) < 0) {
1478 unsigned int other_ms
;
1480 EVENT_OFF(gm_ifp
->t_query
);
1481 EVENT_OFF(gm_ifp
->t_other_querier
);
1483 other_ms
= timers
.qrv
* timers
.qqic_ms
+ timers
.max_resp_ms
/ 2;
1484 event_add_timer_msec(router
->master
, gm_t_other_querier
, gm_ifp
,
1485 other_ms
, &gm_ifp
->t_other_querier
);
1488 if (len
== sizeof(struct mld_v1_pkt
)) {
1489 if (general_query
) {
1490 gm_handle_q_general(gm_ifp
, &timers
);
1491 gm_ifp
->stats
.rx_query_old_general
++;
1493 gm_handle_q_group(gm_ifp
, &timers
, hdr
->grp
);
1494 gm_ifp
->stats
.rx_query_old_group
++;
1499 /* v2 query - [S]uppress bit */
1500 if (hdr
->flags
& 0x8) {
1501 gm_ifp
->stats
.rx_query_new_sbit
++;
1505 if (general_query
) {
1506 gm_handle_q_general(gm_ifp
, &timers
);
1507 gm_ifp
->stats
.rx_query_new_general
++;
1508 } else if (!ntohs(hdr
->n_src
)) {
1509 gm_handle_q_group(gm_ifp
, &timers
, hdr
->grp
);
1510 gm_ifp
->stats
.rx_query_new_group
++;
1512 /* this is checked above:
1513 * if (len >= sizeof(struct mld_v2_query_hdr)) {
1514 * size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1515 * if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1517 assume(ntohs(hdr
->n_src
) <=
1518 (len
- sizeof(struct mld_v2_query_hdr
)) /
1521 gm_handle_q_groupsrc(gm_ifp
, &timers
, hdr
->grp
, hdr
->srcs
,
1523 gm_ifp
->stats
.rx_query_new_groupsrc
++;
1527 static void gm_rx_process(struct gm_if
*gm_ifp
,
1528 const struct sockaddr_in6
*pkt_src
, pim_addr
*pkt_dst
,
1529 void *data
, size_t pktlen
)
1531 struct icmp6_plain_hdr
*icmp6
= data
;
1532 uint16_t pkt_csum
, ref_csum
;
1533 struct ipv6_ph ph6
= {
1534 .src
= pkt_src
->sin6_addr
,
1536 .ulpl
= htons(pktlen
),
1537 .next_hdr
= IPPROTO_ICMPV6
,
1540 pkt_csum
= icmp6
->icmp6_cksum
;
1541 icmp6
->icmp6_cksum
= 0;
1542 ref_csum
= in_cksum_with_ph6(&ph6
, data
, pktlen
);
1544 if (pkt_csum
!= ref_csum
) {
1547 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1548 pkt_dst
, pkt_csum
, ref_csum
);
1549 gm_ifp
->stats
.rx_drop_csum
++;
1554 pktlen
-= sizeof(*icmp6
);
1556 switch (icmp6
->icmp6_type
) {
1557 case ICMP6_MLD_QUERY
:
1558 gm_handle_query(gm_ifp
, pkt_src
, pkt_dst
, data
, pktlen
);
1560 case ICMP6_MLD_V1_REPORT
:
1561 gm_handle_v1_report(gm_ifp
, pkt_src
, data
, pktlen
);
1563 case ICMP6_MLD_V1_DONE
:
1564 gm_handle_v1_leave(gm_ifp
, pkt_src
, data
, pktlen
);
1566 case ICMP6_MLD_V2_REPORT
:
1567 gm_handle_v2_report(gm_ifp
, pkt_src
, data
, pktlen
);
1572 static bool ip6_check_hopopts_ra(uint8_t *hopopts
, size_t hopopt_len
,
1573 uint16_t alert_type
)
1575 uint8_t *hopopt_end
;
1579 if (hopopt_len
< (hopopts
[1] + 1U) * 8U)
1582 hopopt_end
= hopopts
+ (hopopts
[1] + 1) * 8;
1585 while (hopopts
< hopopt_end
) {
1586 if (hopopts
[0] == IP6OPT_PAD1
) {
1591 if (hopopts
> hopopt_end
- 2)
1593 if (hopopts
> hopopt_end
- 2 - hopopts
[1])
1596 if (hopopts
[0] == IP6OPT_ROUTER_ALERT
&& hopopts
[1] == 2) {
1597 uint16_t have_type
= (hopopts
[2] << 8) | hopopts
[3];
1599 if (have_type
== alert_type
)
1603 hopopts
+= 2 + hopopts
[1];
1608 static void gm_t_recv(struct event
*t
)
1610 struct pim_instance
*pim
= EVENT_ARG(t
);
1612 char buf
[CMSG_SPACE(sizeof(struct in6_pktinfo
)) +
1613 CMSG_SPACE(256) /* hop options */ +
1614 CMSG_SPACE(sizeof(int)) /* hopcount */];
1615 struct cmsghdr align
;
1617 struct cmsghdr
*cmsg
;
1618 struct in6_pktinfo
*pktinfo
= NULL
;
1619 uint8_t *hopopts
= NULL
;
1620 size_t hopopt_len
= 0;
1621 int *hoplimit
= NULL
;
1623 struct msghdr mh
[1] = {};
1624 struct iovec iov
[1];
1625 struct sockaddr_in6 pkt_src
[1] = {};
1629 event_add_read(router
->master
, gm_t_recv
, pim
, pim
->gm_socket
,
1632 iov
->iov_base
= rxbuf
;
1633 iov
->iov_len
= sizeof(rxbuf
);
1635 mh
->msg_name
= pkt_src
;
1636 mh
->msg_namelen
= sizeof(pkt_src
);
1637 mh
->msg_control
= cmsgbuf
.buf
;
1638 mh
->msg_controllen
= sizeof(cmsgbuf
.buf
);
1640 mh
->msg_iovlen
= array_size(iov
);
1643 nread
= recvmsg(pim
->gm_socket
, mh
, MSG_PEEK
| MSG_TRUNC
);
1645 zlog_err("(VRF %s) RX error: %m", pim
->vrf
->name
);
1646 pim
->gm_rx_drop_sys
++;
1650 if ((size_t)nread
> sizeof(rxbuf
)) {
1651 iov
->iov_base
= XMALLOC(MTYPE_GM_PACKET
, nread
);
1652 iov
->iov_len
= nread
;
1654 nread
= recvmsg(pim
->gm_socket
, mh
, 0);
1656 zlog_err("(VRF %s) RX error: %m", pim
->vrf
->name
);
1657 pim
->gm_rx_drop_sys
++;
1661 struct interface
*ifp
;
1663 ifp
= if_lookup_by_index(pkt_src
->sin6_scope_id
, pim
->vrf
->vrf_id
);
1664 if (!ifp
|| !ifp
->info
)
1667 struct pim_interface
*pim_ifp
= ifp
->info
;
1668 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
1673 for (cmsg
= CMSG_FIRSTHDR(mh
); cmsg
; cmsg
= CMSG_NXTHDR(mh
, cmsg
)) {
1674 if (cmsg
->cmsg_level
!= SOL_IPV6
)
1677 switch (cmsg
->cmsg_type
) {
1679 pktinfo
= (struct in6_pktinfo
*)CMSG_DATA(cmsg
);
1682 hopopts
= CMSG_DATA(cmsg
);
1683 hopopt_len
= cmsg
->cmsg_len
- sizeof(*cmsg
);
1686 hoplimit
= (int *)CMSG_DATA(cmsg
);
1691 if (!pktinfo
|| !hoplimit
) {
1693 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
1694 pim
->gm_rx_drop_sys
++;
1698 if (*hoplimit
!= 1) {
1699 zlog_err(log_pkt_src("packet with hop limit != 1"));
1700 /* spoofing attempt => count on srcaddr counter */
1701 gm_ifp
->stats
.rx_drop_srcaddr
++;
1705 if (!ip6_check_hopopts_ra(hopopts
, hopopt_len
, IP6_ALERT_MLD
)) {
1706 zlog_err(log_pkt_src(
1707 "packet without IPv6 Router Alert MLD option"));
1708 gm_ifp
->stats
.rx_drop_ra
++;
1712 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src
->sin6_addr
))
1713 /* reports from :: happen in normal operation for DAD, so
1714 * don't spam log messages about this
1718 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src
->sin6_addr
)) {
1719 zlog_warn(log_pkt_src("packet from invalid source address"));
1720 gm_ifp
->stats
.rx_drop_srcaddr
++;
1725 if (pktlen
< sizeof(struct icmp6_plain_hdr
)) {
1726 zlog_warn(log_pkt_src("truncated packet"));
1727 gm_ifp
->stats
.rx_drop_malformed
++;
1731 gm_rx_process(gm_ifp
, pkt_src
, &pktinfo
->ipi6_addr
, iov
->iov_base
,
1735 if (iov
->iov_base
!= rxbuf
)
1736 XFREE(MTYPE_GM_PACKET
, iov
->iov_base
);
1739 static void gm_send_query(struct gm_if
*gm_ifp
, pim_addr grp
,
1740 const pim_addr
*srcs
, size_t n_srcs
, bool s_bit
)
1742 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1743 struct sockaddr_in6 dstaddr
= {
1744 .sin6_family
= AF_INET6
,
1745 .sin6_scope_id
= gm_ifp
->ifp
->ifindex
,
1748 struct icmp6_plain_hdr hdr
;
1749 struct mld_v2_query_hdr v2_query
;
1751 /* clang-format off */
1753 .icmp6_type
= ICMP6_MLD_QUERY
,
1759 /* clang-format on */
1761 struct ipv6_ph ph6
= {
1762 .src
= pim_ifp
->ll_lowest
,
1763 .ulpl
= htons(sizeof(query
)),
1764 .next_hdr
= IPPROTO_ICMPV6
,
1767 char buf
[CMSG_SPACE(8) /* hop options */ +
1768 CMSG_SPACE(sizeof(struct in6_pktinfo
))];
1769 struct cmsghdr align
;
1771 struct cmsghdr
*cmh
;
1772 struct msghdr mh
[1] = {};
1773 struct iovec iov
[3];
1775 ssize_t ret
, expect_ret
;
1777 struct in6_pktinfo
*pktinfo
;
1779 if (if_is_loopback(gm_ifp
->ifp
)) {
1780 /* Linux is a bit odd with multicast on loopback */
1781 ph6
.src
= in6addr_loopback
;
1782 dstaddr
.sin6_addr
= in6addr_loopback
;
1783 } else if (pim_addr_is_any(grp
))
1784 dstaddr
.sin6_addr
= gm_all_hosts
;
1786 dstaddr
.sin6_addr
= grp
;
1788 query
.v2_query
.max_resp_code
=
1789 mld_max_resp_encode(gm_ifp
->cur_max_resp
);
1790 query
.v2_query
.flags
= (gm_ifp
->cur_qrv
< 8) ? gm_ifp
->cur_qrv
: 0;
1792 query
.v2_query
.flags
|= 0x08;
1793 query
.v2_query
.qqic
=
1794 igmp_msg_encode16to8(gm_ifp
->cur_query_intv
/ 1000);
1795 query
.v2_query
.n_src
= htons(n_srcs
);
1797 ph6
.dst
= dstaddr
.sin6_addr
;
1799 /* ph6 not included in sendmsg */
1800 iov
[0].iov_base
= &ph6
;
1801 iov
[0].iov_len
= sizeof(ph6
);
1802 iov
[1].iov_base
= &query
;
1803 if (gm_ifp
->cur_version
== GM_MLDV1
) {
1805 iov
[1].iov_len
= sizeof(query
.hdr
) + sizeof(struct mld_v1_pkt
);
1806 } else if (!n_srcs
) {
1808 iov
[1].iov_len
= sizeof(query
);
1810 iov
[1].iov_len
= sizeof(query
);
1811 iov
[2].iov_base
= (void *)srcs
;
1812 iov
[2].iov_len
= n_srcs
* sizeof(srcs
[0]);
1816 query
.hdr
.icmp6_cksum
= in_cksumv(iov
, iov_len
);
1818 if (PIM_DEBUG_GM_PACKETS
)
1820 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1821 &pim_ifp
->ll_lowest
, &dstaddr
.sin6_addr
, &grp
, n_srcs
);
1823 mh
->msg_name
= &dstaddr
;
1824 mh
->msg_namelen
= sizeof(dstaddr
);
1825 mh
->msg_iov
= iov
+ 1;
1826 mh
->msg_iovlen
= iov_len
- 1;
1827 mh
->msg_control
= &cmsg
;
1828 mh
->msg_controllen
= sizeof(cmsg
.buf
);
1830 cmh
= CMSG_FIRSTHDR(mh
);
1831 cmh
->cmsg_level
= IPPROTO_IPV6
;
1832 cmh
->cmsg_type
= IPV6_HOPOPTS
;
1833 cmh
->cmsg_len
= CMSG_LEN(8);
1834 dp
= CMSG_DATA(cmh
);
1835 *dp
++ = 0; /* next header */
1836 *dp
++ = 0; /* length (8-byte blocks, minus 1) */
1837 *dp
++ = IP6OPT_ROUTER_ALERT
; /* router alert */
1838 *dp
++ = 2; /* length */
1839 *dp
++ = 0; /* value (2 bytes) */
1840 *dp
++ = 0; /* value (2 bytes) (0 = MLD) */
1841 *dp
++ = 0; /* pad0 */
1842 *dp
++ = 0; /* pad0 */
1844 cmh
= CMSG_NXTHDR(mh
, cmh
);
1845 cmh
->cmsg_level
= IPPROTO_IPV6
;
1846 cmh
->cmsg_type
= IPV6_PKTINFO
;
1847 cmh
->cmsg_len
= CMSG_LEN(sizeof(struct in6_pktinfo
));
1848 pktinfo
= (struct in6_pktinfo
*)CMSG_DATA(cmh
);
1849 pktinfo
->ipi6_ifindex
= gm_ifp
->ifp
->ifindex
;
1850 pktinfo
->ipi6_addr
= gm_ifp
->cur_ll_lowest
;
1852 expect_ret
= iov
[1].iov_len
;
1854 expect_ret
+= iov
[2].iov_len
;
1856 frr_with_privs (&pimd_privs
) {
1857 ret
= sendmsg(gm_ifp
->pim
->gm_socket
, mh
, 0);
1860 if (ret
!= expect_ret
) {
1861 zlog_warn(log_ifp("failed to send query: %m"));
1862 gm_ifp
->stats
.tx_query_fail
++;
1864 if (gm_ifp
->cur_version
== GM_MLDV1
) {
1865 if (pim_addr_is_any(grp
))
1866 gm_ifp
->stats
.tx_query_old_general
++;
1868 gm_ifp
->stats
.tx_query_old_group
++;
1870 if (pim_addr_is_any(grp
))
1871 gm_ifp
->stats
.tx_query_new_general
++;
1873 gm_ifp
->stats
.tx_query_new_group
++;
1875 gm_ifp
->stats
.tx_query_new_groupsrc
++;
1880 static void gm_t_query(struct event
*t
)
1882 struct gm_if
*gm_ifp
= EVENT_ARG(t
);
1883 unsigned int timer_ms
= gm_ifp
->cur_query_intv
;
1885 if (gm_ifp
->n_startup
) {
1887 gm_ifp
->n_startup
--;
1890 event_add_timer_msec(router
->master
, gm_t_query
, gm_ifp
, timer_ms
,
1893 gm_send_query(gm_ifp
, PIMADDR_ANY
, NULL
, 0, false);
1896 static void gm_t_sg_query(struct event
*t
)
1898 struct gm_sg
*sg
= EVENT_ARG(t
);
1900 gm_trigger_specific(sg
);
1903 /* S,G specific queries (triggered by a member leaving) get a little slack
1904 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1906 static void gm_send_specific(struct gm_gsq_pending
*pend_gsq
)
1908 struct gm_if
*gm_ifp
= pend_gsq
->iface
;
1910 gm_send_query(gm_ifp
, pend_gsq
->grp
, pend_gsq
->srcs
, pend_gsq
->n_src
,
1913 gm_gsq_pends_del(gm_ifp
->gsq_pends
, pend_gsq
);
1914 XFREE(MTYPE_GM_GSQ_PENDING
, pend_gsq
);
1917 static void gm_t_gsq_pend(struct event
*t
)
1919 struct gm_gsq_pending
*pend_gsq
= EVENT_ARG(t
);
1921 gm_send_specific(pend_gsq
);
1924 static void gm_trigger_specific(struct gm_sg
*sg
)
1926 struct gm_if
*gm_ifp
= sg
->iface
;
1927 struct pim_interface
*pim_ifp
= gm_ifp
->ifp
->info
;
1928 struct gm_gsq_pending
*pend_gsq
, ref
= {};
1932 event_add_timer_msec(router
->master
, gm_t_sg_query
, sg
,
1933 gm_ifp
->cur_query_intv_trig
,
1936 if (!IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
))
1938 if (gm_ifp
->pim
->gm_socket
== -1)
1941 if (PIM_DEBUG_GM_TRACE
)
1942 zlog_debug(log_sg(sg
, "triggered query"));
1944 if (pim_addr_is_any(sg
->sgaddr
.src
)) {
1945 gm_send_query(gm_ifp
, sg
->sgaddr
.grp
, NULL
, 0, sg
->query_sbit
);
1949 ref
.grp
= sg
->sgaddr
.grp
;
1950 ref
.s_bit
= sg
->query_sbit
;
1952 pend_gsq
= gm_gsq_pends_find(gm_ifp
->gsq_pends
, &ref
);
1954 pend_gsq
= XCALLOC(MTYPE_GM_GSQ_PENDING
, sizeof(*pend_gsq
));
1955 pend_gsq
->grp
= sg
->sgaddr
.grp
;
1956 pend_gsq
->s_bit
= sg
->query_sbit
;
1957 pend_gsq
->iface
= gm_ifp
;
1958 gm_gsq_pends_add(gm_ifp
->gsq_pends
, pend_gsq
);
1960 event_add_timer_tv(router
->master
, gm_t_gsq_pend
, pend_gsq
,
1961 &gm_ifp
->cfg_timing_fuzz
, &pend_gsq
->t_send
);
1964 assert(pend_gsq
->n_src
< array_size(pend_gsq
->srcs
));
1966 pend_gsq
->srcs
[pend_gsq
->n_src
] = sg
->sgaddr
.src
;
1969 if (pend_gsq
->n_src
== array_size(pend_gsq
->srcs
)) {
1970 EVENT_OFF(pend_gsq
->t_send
);
1971 gm_send_specific(pend_gsq
);
1976 static void gm_vrf_socket_incref(struct pim_instance
*pim
)
1978 struct vrf
*vrf
= pim
->vrf
;
1980 struct icmp6_filter filter
[1];
1982 if (pim
->gm_socket_if_count
++ && pim
->gm_socket
!= -1)
1985 ICMP6_FILTER_SETBLOCKALL(filter
);
1986 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY
, filter
);
1987 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT
, filter
);
1988 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE
, filter
);
1989 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT
, filter
);
1991 frr_with_privs (&pimd_privs
) {
1992 pim
->gm_socket
= vrf_socket(AF_INET6
, SOCK_RAW
, IPPROTO_ICMPV6
,
1993 vrf
->vrf_id
, vrf
->name
);
1994 if (pim
->gm_socket
< 0) {
1995 zlog_err("(VRF %s) could not create MLD socket: %m",
2000 ret
= setsockopt(pim
->gm_socket
, SOL_ICMPV6
, ICMP6_FILTER
,
2001 filter
, sizeof(filter
));
2003 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
2007 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVPKTINFO
,
2008 &intval
, sizeof(intval
));
2010 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
2014 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVHOPOPTS
,
2015 &intval
, sizeof(intval
));
2017 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
2021 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_RECVHOPLIMIT
,
2022 &intval
, sizeof(intval
));
2024 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2028 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_LOOP
,
2029 &intval
, sizeof(intval
));
2032 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2036 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_HOPS
,
2037 &intval
, sizeof(intval
));
2040 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2043 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2044 * RX filtering in Linux. It only means "receive all groups
2045 * that something on the system has joined". To actually
2046 * receive *all* MLD packets - which is what we need -
2047 * multicast routing must be enabled on the interface. And
2048 * this only works for MLD packets specifically.
2050 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2051 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2053 * Also note that the code there explicitly checks for the IPv6
2054 * router alert MLD option (which is required by the RFC to be
2055 * on MLD packets.) That implies trying to support hosts which
2056 * erroneously don't add that option is just not possible.
2059 ret
= setsockopt(pim
->gm_socket
, SOL_IPV6
, IPV6_MULTICAST_ALL
,
2060 &intval
, sizeof(intval
));
2063 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2067 event_add_read(router
->master
, gm_t_recv
, pim
, pim
->gm_socket
,
2071 static void gm_vrf_socket_decref(struct pim_instance
*pim
)
2073 if (--pim
->gm_socket_if_count
)
2076 EVENT_OFF(pim
->t_gm_recv
);
2077 close(pim
->gm_socket
);
2078 pim
->gm_socket
= -1;
2081 static void gm_start(struct interface
*ifp
)
2083 struct pim_interface
*pim_ifp
= ifp
->info
;
2084 struct gm_if
*gm_ifp
;
2087 assert(pim_ifp
->pim
);
2088 assert(pim_ifp
->mroute_vif_index
>= 0);
2089 assert(!pim_ifp
->mld
);
2091 gm_vrf_socket_incref(pim_ifp
->pim
);
2093 gm_ifp
= XCALLOC(MTYPE_GM_IFACE
, sizeof(*gm_ifp
));
2095 pim_ifp
->mld
= gm_ifp
;
2096 gm_ifp
->pim
= pim_ifp
->pim
;
2097 monotime(&gm_ifp
->started
);
2099 zlog_info(log_ifp("starting MLD"));
2101 if (pim_ifp
->mld_version
== 1)
2102 gm_ifp
->cur_version
= GM_MLDV1
;
2104 gm_ifp
->cur_version
= GM_MLDV2
;
2106 gm_ifp
->cur_qrv
= pim_ifp
->gm_default_robustness_variable
;
2107 gm_ifp
->cur_query_intv
= pim_ifp
->gm_default_query_interval
* 1000;
2108 gm_ifp
->cur_query_intv_trig
=
2109 pim_ifp
->gm_specific_query_max_response_time_dsec
* 100;
2110 gm_ifp
->cur_max_resp
= pim_ifp
->gm_query_max_response_time_dsec
* 100;
2111 gm_ifp
->cur_lmqc
= pim_ifp
->gm_last_member_query_count
;
2113 gm_ifp
->cfg_timing_fuzz
.tv_sec
= 0;
2114 gm_ifp
->cfg_timing_fuzz
.tv_usec
= 10 * 1000;
2116 gm_sgs_init(gm_ifp
->sgs
);
2117 gm_subscribers_init(gm_ifp
->subscribers
);
2118 gm_packet_expires_init(gm_ifp
->expires
);
2119 gm_grp_pends_init(gm_ifp
->grp_pends
);
2120 gm_gsq_pends_init(gm_ifp
->gsq_pends
);
2122 frr_with_privs (&pimd_privs
) {
2123 struct ipv6_mreq mreq
;
2126 /* all-MLDv2 group */
2127 mreq
.ipv6mr_multiaddr
= gm_all_routers
;
2128 mreq
.ipv6mr_interface
= ifp
->ifindex
;
2129 ret
= setsockopt(gm_ifp
->pim
->gm_socket
, SOL_IPV6
,
2130 IPV6_JOIN_GROUP
, &mreq
, sizeof(mreq
));
2132 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2137 void gm_group_delete(struct gm_if
*gm_ifp
)
2140 struct gm_packet_state
*pkt
;
2141 struct gm_grp_pending
*pend_grp
;
2142 struct gm_gsq_pending
*pend_gsq
;
2143 struct gm_subscriber
*subscriber
;
2145 while ((pkt
= gm_packet_expires_first(gm_ifp
->expires
)))
2146 gm_packet_drop(pkt
, false);
2148 while ((pend_grp
= gm_grp_pends_pop(gm_ifp
->grp_pends
))) {
2149 EVENT_OFF(pend_grp
->t_expire
);
2150 XFREE(MTYPE_GM_GRP_PENDING
, pend_grp
);
2153 while ((pend_gsq
= gm_gsq_pends_pop(gm_ifp
->gsq_pends
))) {
2154 EVENT_OFF(pend_gsq
->t_send
);
2155 XFREE(MTYPE_GM_GSQ_PENDING
, pend_gsq
);
2158 while ((sg
= gm_sgs_pop(gm_ifp
->sgs
))) {
2159 EVENT_OFF(sg
->t_sg_expire
);
2160 assertf(!gm_packet_sg_subs_count(sg
->subs_negative
), "%pSG",
2162 assertf(!gm_packet_sg_subs_count(sg
->subs_positive
), "%pSG",
2167 while ((subscriber
= gm_subscribers_pop(gm_ifp
->subscribers
))) {
2168 assertf(!gm_packets_count(subscriber
->packets
), "%pPA",
2170 XFREE(MTYPE_GM_SUBSCRIBER
, subscriber
);
2174 void gm_ifp_teardown(struct interface
*ifp
)
2176 struct pim_interface
*pim_ifp
= ifp
->info
;
2177 struct gm_if
*gm_ifp
;
2179 if (!pim_ifp
|| !pim_ifp
->mld
)
2182 gm_ifp
= pim_ifp
->mld
;
2183 gm_ifp
->stopping
= true;
2184 if (PIM_DEBUG_GM_EVENTS
)
2185 zlog_debug(log_ifp("MLD stop"));
2187 EVENT_OFF(gm_ifp
->t_query
);
2188 EVENT_OFF(gm_ifp
->t_other_querier
);
2189 EVENT_OFF(gm_ifp
->t_expire
);
2191 frr_with_privs (&pimd_privs
) {
2192 struct ipv6_mreq mreq
;
2195 /* all-MLDv2 group */
2196 mreq
.ipv6mr_multiaddr
= gm_all_routers
;
2197 mreq
.ipv6mr_interface
= ifp
->ifindex
;
2198 ret
= setsockopt(gm_ifp
->pim
->gm_socket
, SOL_IPV6
,
2199 IPV6_LEAVE_GROUP
, &mreq
, sizeof(mreq
));
2202 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2206 gm_vrf_socket_decref(gm_ifp
->pim
);
2208 gm_group_delete(gm_ifp
);
2210 gm_grp_pends_fini(gm_ifp
->grp_pends
);
2211 gm_packet_expires_fini(gm_ifp
->expires
);
2212 gm_subscribers_fini(gm_ifp
->subscribers
);
2213 gm_sgs_fini(gm_ifp
->sgs
);
2215 XFREE(MTYPE_GM_IFACE
, gm_ifp
);
2216 pim_ifp
->mld
= NULL
;
2219 static void gm_update_ll(struct interface
*ifp
)
2221 struct pim_interface
*pim_ifp
= ifp
->info
;
2222 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
2226 !IPV6_ADDR_CMP(&gm_ifp
->cur_ll_lowest
, &gm_ifp
->querier
) &&
2227 !pim_addr_is_any(gm_ifp
->querier
);
2229 gm_ifp
->cur_ll_lowest
= pim_ifp
->ll_lowest
;
2231 gm_ifp
->querier
= pim_ifp
->ll_lowest
;
2232 EVENT_OFF(gm_ifp
->t_query
);
2234 if (pim_addr_is_any(gm_ifp
->cur_ll_lowest
)) {
2237 "lost link-local address, stopping querier"));
2242 zlog_info(log_ifp("new link-local %pPA while querier"),
2243 &gm_ifp
->cur_ll_lowest
);
2244 else if (IPV6_ADDR_CMP(&gm_ifp
->cur_ll_lowest
, &gm_ifp
->querier
) < 0 ||
2245 pim_addr_is_any(gm_ifp
->querier
)) {
2246 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2247 &gm_ifp
->cur_ll_lowest
);
2248 gm_ifp
->querier
= gm_ifp
->cur_ll_lowest
;
2252 gm_ifp
->n_startup
= gm_ifp
->cur_qrv
;
2253 event_execute(router
->master
, gm_t_query
, gm_ifp
, 0);
2256 void gm_ifp_update(struct interface
*ifp
)
2258 struct pim_interface
*pim_ifp
= ifp
->info
;
2259 struct gm_if
*gm_ifp
;
2260 bool changed
= false;
2264 if (!if_is_operative(ifp
) || !pim_ifp
->pim
||
2265 pim_ifp
->mroute_vif_index
< 0) {
2266 gm_ifp_teardown(ifp
);
2271 * If ipv6 mld is not enabled on interface, do not start mld activites.
2273 if (!pim_ifp
->gm_enable
)
2276 if (!pim_ifp
->mld
) {
2279 assume(pim_ifp
->mld
!= NULL
);
2282 gm_ifp
= pim_ifp
->mld
;
2283 if (IPV6_ADDR_CMP(&pim_ifp
->ll_lowest
, &gm_ifp
->cur_ll_lowest
))
2286 unsigned int cfg_query_intv
= pim_ifp
->gm_default_query_interval
* 1000;
2288 if (gm_ifp
->cur_query_intv
!= cfg_query_intv
) {
2289 gm_ifp
->cur_query_intv
= cfg_query_intv
;
2293 unsigned int cfg_query_intv_trig
=
2294 pim_ifp
->gm_specific_query_max_response_time_dsec
* 100;
2296 if (gm_ifp
->cur_query_intv_trig
!= cfg_query_intv_trig
) {
2297 gm_ifp
->cur_query_intv_trig
= cfg_query_intv_trig
;
2301 unsigned int cfg_max_response
=
2302 pim_ifp
->gm_query_max_response_time_dsec
* 100;
2304 if (gm_ifp
->cur_max_resp
!= cfg_max_response
)
2305 gm_ifp
->cur_max_resp
= cfg_max_response
;
2307 if (gm_ifp
->cur_lmqc
!= pim_ifp
->gm_last_member_query_count
)
2308 gm_ifp
->cur_lmqc
= pim_ifp
->gm_last_member_query_count
;
2310 enum gm_version cfg_version
;
2312 if (pim_ifp
->mld_version
== 1)
2313 cfg_version
= GM_MLDV1
;
2315 cfg_version
= GM_MLDV2
;
2316 if (gm_ifp
->cur_version
!= cfg_version
) {
2317 gm_ifp
->cur_version
= cfg_version
;
2322 if (PIM_DEBUG_GM_TRACE
)
2324 "MLD querier config changed, querying"));
2325 gm_bump_querier(gm_ifp
);
2330 * CLI (show commands only)
2333 #include "lib/command.h"
2335 #include "pimd/pim6_mld_clippy.c"
2337 static struct vrf
*gm_cmd_vrf_lookup(struct vty
*vty
, const char *vrf_str
,
2343 return vrf_lookup_by_id(VRF_DEFAULT
);
2344 if (!strcmp(vrf_str
, "all"))
2346 ret
= vrf_lookup_by_name(vrf_str
);
2350 vty_out(vty
, "%% VRF %pSQq does not exist\n", vrf_str
);
2355 static void gm_show_if_one_detail(struct vty
*vty
, struct interface
*ifp
)
2357 struct pim_interface
*pim_ifp
= (struct pim_interface
*)ifp
->info
;
2358 struct gm_if
*gm_ifp
;
2363 vty_out(vty
, "Interface %s: no PIM/MLD config\n\n", ifp
->name
);
2367 gm_ifp
= pim_ifp
->mld
;
2369 vty_out(vty
, "Interface %s: MLD not running\n\n", ifp
->name
);
2373 querier
= IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
);
2375 vty_out(vty
, "Interface %s: MLD running\n", ifp
->name
);
2376 vty_out(vty
, " Uptime: %pTVMs\n", &gm_ifp
->started
);
2377 vty_out(vty
, " MLD version: %d\n", gm_ifp
->cur_version
);
2378 vty_out(vty
, " Querier: %pPA%s\n", &gm_ifp
->querier
,
2379 querier
? " (this system)" : "");
2380 vty_out(vty
, " Query timer: %pTH\n", gm_ifp
->t_query
);
2381 vty_out(vty
, " Other querier timer: %pTH\n",
2382 gm_ifp
->t_other_querier
);
2383 vty_out(vty
, " Robustness value: %u\n", gm_ifp
->cur_qrv
);
2384 vty_out(vty
, " Query interval: %ums\n",
2385 gm_ifp
->cur_query_intv
);
2386 vty_out(vty
, " Query response timer: %ums\n", gm_ifp
->cur_max_resp
);
2387 vty_out(vty
, " Last member query intv.: %ums\n",
2388 gm_ifp
->cur_query_intv_trig
);
2389 vty_out(vty
, " %u expiry timers from general queries:\n",
2391 for (i
= 0; i
< gm_ifp
->n_pending
; i
++) {
2392 struct gm_general_pending
*p
= &gm_ifp
->pending
[i
];
2394 vty_out(vty
, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2395 &p
->query
, &p
->expiry
);
2397 vty_out(vty
, " %zu expiry timers from *,G queries\n",
2398 gm_grp_pends_count(gm_ifp
->grp_pends
));
2399 vty_out(vty
, " %zu expiry timers from S,G queries\n",
2400 gm_gsq_pends_count(gm_ifp
->gsq_pends
));
2401 vty_out(vty
, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2402 gm_sgs_count(gm_ifp
->sgs
),
2403 gm_subscribers_count(gm_ifp
->subscribers
),
2404 gm_packet_expires_count(gm_ifp
->expires
));
2408 static void gm_show_if_one(struct vty
*vty
, struct interface
*ifp
,
2409 json_object
*js_if
, struct ttable
*tt
)
2411 struct pim_interface
*pim_ifp
= (struct pim_interface
*)ifp
->info
;
2412 struct gm_if
*gm_ifp
= pim_ifp
->mld
;
2415 assume(js_if
|| tt
);
2417 querier
= IPV6_ADDR_SAME(&gm_ifp
->querier
, &pim_ifp
->ll_lowest
);
2420 json_object_string_add(js_if
, "name", ifp
->name
);
2421 json_object_string_addf(js_if
, "address", "%pPA",
2422 &pim_ifp
->primary_address
);
2423 json_object_string_add(js_if
, "state", "up");
2424 json_object_string_addf(js_if
, "version", "%d",
2425 gm_ifp
->cur_version
);
2426 json_object_string_addf(js_if
, "upTime", "%pTVMs",
2428 json_object_boolean_add(js_if
, "querier", querier
);
2429 json_object_string_addf(js_if
, "querierIp", "%pPA",
2432 json_object_string_addf(js_if
, "queryTimer", "%pTH",
2435 json_object_string_addf(js_if
, "otherQuerierTimer",
2437 gm_ifp
->t_other_querier
);
2438 json_object_int_add(js_if
, "timerRobustnessValue",
2440 json_object_int_add(js_if
, "lastMemberQueryCount",
2442 json_object_int_add(js_if
, "timerQueryIntervalMsec",
2443 gm_ifp
->cur_query_intv
);
2444 json_object_int_add(js_if
, "timerQueryResponseTimerMsec",
2445 gm_ifp
->cur_max_resp
);
2446 json_object_int_add(js_if
, "timerLastMemberQueryIntervalMsec",
2447 gm_ifp
->cur_query_intv_trig
);
2449 ttable_add_row(tt
, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
2450 ifp
->name
, "up", &pim_ifp
->primary_address
,
2451 gm_ifp
->cur_version
, querier
? "local" : "other",
2452 &gm_ifp
->querier
, gm_ifp
->t_query
,
2457 static void gm_show_if_vrf(struct vty
*vty
, struct vrf
*vrf
, const char *ifname
,
2458 bool detail
, json_object
*js
)
2460 struct interface
*ifp
;
2461 json_object
*js_vrf
= NULL
;
2462 struct pim_interface
*pim_ifp
;
2463 struct ttable
*tt
= NULL
;
2467 js_vrf
= json_object_new_object();
2468 json_object_object_add(js
, vrf
->name
, js_vrf
);
2471 if (!js
&& !detail
) {
2472 /* Prepare table. */
2473 tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
2476 "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
2477 tt
->style
.cell
.rpad
= 2;
2478 tt
->style
.corner
= '+';
2482 FOR_ALL_INTERFACES (vrf
, ifp
) {
2483 json_object
*js_if
= NULL
;
2485 if (ifname
&& strcmp(ifp
->name
, ifname
))
2487 if (detail
&& !js
) {
2488 gm_show_if_one_detail(vty
, ifp
);
2492 pim_ifp
= ifp
->info
;
2494 if (!pim_ifp
|| !pim_ifp
->mld
)
2498 js_if
= json_object_new_object();
2500 * If we have js as true and detail as false
2501 * and if Coverity thinks that js_if is NULL
2502 * because of a failed call to new then
2503 * when we call gm_show_if_one below
2504 * the tt can be deref'ed and as such
2505 * FRR will crash. But since we know
2506 * that json_object_new_object never fails
2507 * then let's tell Coverity that this assumption
2508 * is true. I'm not worried about fast path
2512 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2515 gm_show_if_one(vty
, ifp
, js_if
, tt
);
2518 /* Dump the generated table. */
2519 if (!js
&& !detail
) {
2520 table
= ttable_dump(tt
, "\n");
2521 vty_out(vty
, "%s\n", table
);
2522 XFREE(MTYPE_TMP
, table
);
2527 static void gm_show_if(struct vty
*vty
, struct vrf
*vrf
, const char *ifname
,
2528 bool detail
, json_object
*js
)
2531 gm_show_if_vrf(vty
, vrf
, ifname
, detail
, js
);
2533 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2534 gm_show_if_vrf(vty
, vrf
, ifname
, detail
, js
);
2537 DEFPY(gm_show_interface
,
2538 gm_show_interface_cmd
,
2539 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
2543 VRF_FULL_CMD_HELP_STR
2544 "MLD interface information\n"
2549 int ret
= CMD_SUCCESS
;
2551 json_object
*js
= NULL
;
2553 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2554 if (ret
!= CMD_SUCCESS
)
2558 js
= json_object_new_object();
2559 gm_show_if(vty
, vrf
, ifname
, !!detail
, js
);
2560 return vty_json(vty
, js
);
2563 static void gm_show_stats_one(struct vty
*vty
, struct gm_if
*gm_ifp
,
2566 struct gm_if_stats
*stats
= &gm_ifp
->stats
;
2567 /* clang-format off */
2572 } *item
, items
[] = {
2573 { "v2 reports received", "rxV2Reports", &stats
->rx_new_report
},
2574 { "v1 reports received", "rxV1Reports", &stats
->rx_old_report
},
2575 { "v1 done received", "rxV1Done", &stats
->rx_old_leave
},
2577 { "v2 *,* queries received", "rxV2QueryGeneral", &stats
->rx_query_new_general
},
2578 { "v2 *,G queries received", "rxV2QueryGroup", &stats
->rx_query_new_group
},
2579 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats
->rx_query_new_groupsrc
},
2580 { "v2 S-bit queries received", "rxV2QuerySBit", &stats
->rx_query_new_sbit
},
2581 { "v1 *,* queries received", "rxV1QueryGeneral", &stats
->rx_query_old_general
},
2582 { "v1 *,G queries received", "rxV1QueryGroup", &stats
->rx_query_old_group
},
2584 { "v2 *,* queries sent", "txV2QueryGeneral", &stats
->tx_query_new_general
},
2585 { "v2 *,G queries sent", "txV2QueryGroup", &stats
->tx_query_new_group
},
2586 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats
->tx_query_new_groupsrc
},
2587 { "v1 *,* queries sent", "txV1QueryGeneral", &stats
->tx_query_old_general
},
2588 { "v1 *,G queries sent", "txV1QueryGroup", &stats
->tx_query_old_group
},
2589 { "TX errors", "txErrors", &stats
->tx_query_fail
},
2591 { "RX dropped (checksum error)", "rxDropChecksum", &stats
->rx_drop_csum
},
2592 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats
->rx_drop_srcaddr
},
2593 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats
->rx_drop_dstaddr
},
2594 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats
->rx_drop_ra
},
2595 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats
->rx_drop_malformed
},
2596 { "RX truncated reports", "rxTruncatedRep", &stats
->rx_trunc_report
},
2598 /* clang-format on */
2600 for (item
= items
; item
< items
+ array_size(items
); item
++) {
2602 json_object_int_add(js_if
, item
->js_key
, *item
->val
);
2604 vty_out(vty
, " %-30s %" PRIu64
"\n", item
->text
,
2609 static void gm_show_stats_vrf(struct vty
*vty
, struct vrf
*vrf
,
2610 const char *ifname
, json_object
*js
)
2612 struct interface
*ifp
;
2613 json_object
*js_vrf
;
2616 js_vrf
= json_object_new_object();
2617 json_object_object_add(js
, vrf
->name
, js_vrf
);
2620 FOR_ALL_INTERFACES (vrf
, ifp
) {
2621 struct pim_interface
*pim_ifp
;
2622 struct gm_if
*gm_ifp
;
2623 json_object
*js_if
= NULL
;
2625 if (ifname
&& strcmp(ifp
->name
, ifname
))
2630 pim_ifp
= ifp
->info
;
2633 gm_ifp
= pim_ifp
->mld
;
2636 js_if
= json_object_new_object();
2637 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2639 vty_out(vty
, "Interface: %s\n", ifp
->name
);
2641 gm_show_stats_one(vty
, gm_ifp
, js_if
);
2647 DEFPY(gm_show_interface_stats
,
2648 gm_show_interface_stats_cmd
,
2649 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2653 VRF_FULL_CMD_HELP_STR
2659 int ret
= CMD_SUCCESS
;
2661 json_object
*js
= NULL
;
2663 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2664 if (ret
!= CMD_SUCCESS
)
2668 js
= json_object_new_object();
2671 gm_show_stats_vrf(vty
, vrf
, ifname
, js
);
2673 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2674 gm_show_stats_vrf(vty
, vrf
, ifname
, js
);
2675 return vty_json(vty
, js
);
2678 static void gm_show_joins_one(struct vty
*vty
, struct gm_if
*gm_ifp
,
2679 const struct prefix_ipv6
*groups
,
2680 const struct prefix_ipv6
*sources
, bool detail
,
2683 struct gm_sg
*sg
, *sg_start
;
2684 json_object
*js_group
= NULL
;
2685 pim_addr js_grpaddr
= PIMADDR_ANY
;
2686 struct gm_subscriber sub_ref
= {}, *sub_untracked
;
2689 struct gm_sg sg_ref
= {};
2691 sg_ref
.sgaddr
.grp
= pim_addr_from_prefix(groups
);
2692 sg_start
= gm_sgs_find_gteq(gm_ifp
->sgs
, &sg_ref
);
2694 sg_start
= gm_sgs_first(gm_ifp
->sgs
);
2696 sub_ref
.addr
= gm_dummy_untracked
;
2697 sub_untracked
= gm_subscribers_find(gm_ifp
->subscribers
, &sub_ref
);
2698 /* NB: sub_untracked may be NULL if no untracked joins exist */
2700 frr_each_from (gm_sgs
, gm_ifp
->sgs
, sg
, sg_start
) {
2701 struct timeval
*recent
= NULL
, *untracked
= NULL
;
2702 json_object
*js_src
;
2705 struct prefix grp_p
;
2707 pim_addr_to_prefix(&grp_p
, sg
->sgaddr
.grp
);
2708 if (!prefix_match(groups
, &grp_p
))
2713 struct prefix src_p
;
2715 pim_addr_to_prefix(&src_p
, sg
->sgaddr
.src
);
2716 if (!prefix_match(sources
, &src_p
))
2720 if (sg
->most_recent
) {
2721 struct gm_packet_state
*packet
;
2723 packet
= gm_packet_sg2state(sg
->most_recent
);
2724 recent
= &packet
->received
;
2727 if (sub_untracked
) {
2728 struct gm_packet_state
*packet
;
2729 struct gm_packet_sg
*item
;
2731 item
= gm_packet_sg_find(sg
, GM_SUB_POS
, sub_untracked
);
2733 packet
= gm_packet_sg2state(item
);
2734 untracked
= &packet
->received
;
2739 FMT_NSTD_BEGIN
; /* %.0p */
2741 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2742 &sg
->sgaddr
.grp
, &sg
->sgaddr
.src
,
2743 gm_states
[sg
->state
], recent
, untracked
,
2749 struct gm_packet_sg
*item
;
2750 struct gm_packet_state
*packet
;
2752 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
2753 packet
= gm_packet_sg2state(item
);
2755 if (packet
->subscriber
== sub_untracked
)
2757 vty_out(vty
, " %-58pPA %-16s %10.0pTVMs\n",
2758 &packet
->subscriber
->addr
, "(JOIN)",
2761 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
2762 packet
= gm_packet_sg2state(item
);
2764 if (packet
->subscriber
== sub_untracked
)
2766 vty_out(vty
, " %-58pPA %-16s %10.0pTVMs\n",
2767 &packet
->subscriber
->addr
, "(PRUNE)",
2770 FMT_NSTD_END
; /* %.0p */
2775 if (!js_group
|| pim_addr_cmp(js_grpaddr
, sg
->sgaddr
.grp
)) {
2776 js_group
= json_object_new_object();
2777 json_object_object_addf(js_if
, js_group
, "%pPA",
2779 js_grpaddr
= sg
->sgaddr
.grp
;
2782 js_src
= json_object_new_object();
2783 json_object_object_addf(js_group
, js_src
, "%pPAs",
2786 json_object_string_add(js_src
, "state", gm_states
[sg
->state
]);
2787 json_object_string_addf(js_src
, "created", "%pTVMs",
2789 json_object_string_addf(js_src
, "lastSeen", "%pTVMs", recent
);
2792 json_object_string_addf(js_src
, "untrackedLastSeen",
2793 "%pTVMs", untracked
);
2797 json_object
*js_subs
;
2798 struct gm_packet_sg
*item
;
2799 struct gm_packet_state
*packet
;
2801 js_subs
= json_object_new_object();
2802 json_object_object_add(js_src
, "joinedBy", js_subs
);
2803 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
2804 packet
= gm_packet_sg2state(item
);
2805 if (packet
->subscriber
== sub_untracked
)
2808 json_object
*js_sub
;
2810 js_sub
= json_object_new_object();
2811 json_object_object_addf(js_subs
, js_sub
, "%pPA",
2812 &packet
->subscriber
->addr
);
2813 json_object_string_addf(js_sub
, "lastSeen", "%pTVMs",
2817 js_subs
= json_object_new_object();
2818 json_object_object_add(js_src
, "prunedBy", js_subs
);
2819 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
2820 packet
= gm_packet_sg2state(item
);
2821 if (packet
->subscriber
== sub_untracked
)
2824 json_object
*js_sub
;
2826 js_sub
= json_object_new_object();
2827 json_object_object_addf(js_subs
, js_sub
, "%pPA",
2828 &packet
->subscriber
->addr
);
2829 json_object_string_addf(js_sub
, "lastSeen", "%pTVMs",
2835 static void gm_show_joins_vrf(struct vty
*vty
, struct vrf
*vrf
,
2837 const struct prefix_ipv6
*groups
,
2838 const struct prefix_ipv6
*sources
, bool detail
,
2841 struct interface
*ifp
;
2842 json_object
*js_vrf
;
2845 js_vrf
= json_object_new_object();
2846 json_object_string_add(js_vrf
, "vrf", vrf
->name
);
2847 json_object_object_add(js
, vrf
->name
, js_vrf
);
2850 FOR_ALL_INTERFACES (vrf
, ifp
) {
2851 struct pim_interface
*pim_ifp
;
2852 struct gm_if
*gm_ifp
;
2853 json_object
*js_if
= NULL
;
2855 if (ifname
&& strcmp(ifp
->name
, ifname
))
2860 pim_ifp
= ifp
->info
;
2863 gm_ifp
= pim_ifp
->mld
;
2866 js_if
= json_object_new_object();
2867 json_object_object_add(js_vrf
, ifp
->name
, js_if
);
2871 vty_out(vty
, "\nOn interface %s:\n", ifp
->name
);
2873 gm_show_joins_one(vty
, gm_ifp
, groups
, sources
, detail
, js_if
);
2877 DEFPY(gm_show_interface_joins
,
2878 gm_show_interface_joins_cmd
,
2879 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2883 VRF_FULL_CMD_HELP_STR
2884 "MLD joined groups & sources\n"
2887 "Limit output to group range\n"
2888 "Show groups covered by this prefix\n"
2889 "Limit output to source range\n"
2890 "Show sources covered by this prefix\n"
2891 "Show details, including tracked receivers\n"
2894 int ret
= CMD_SUCCESS
;
2896 json_object
*js
= NULL
;
2898 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
2899 if (ret
!= CMD_SUCCESS
)
2903 js
= json_object_new_object();
2905 vty_out(vty
, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2906 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2909 gm_show_joins_vrf(vty
, vrf
, ifname
, groups
, sources
, !!detail
,
2912 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
2913 gm_show_joins_vrf(vty
, vrf
, ifname
, groups
, sources
,
2915 return vty_json(vty
, js
);
2918 static void gm_show_groups(struct vty
*vty
, struct vrf
*vrf
, bool uj
)
2920 struct interface
*ifp
;
2921 struct ttable
*tt
= NULL
;
2923 json_object
*json
= NULL
;
2924 json_object
*json_iface
= NULL
;
2925 json_object
*json_group
= NULL
;
2926 json_object
*json_groups
= NULL
;
2927 struct pim_instance
*pim
= vrf
->info
;
2930 json
= json_object_new_object();
2931 json_object_int_add(json
, "totalGroups", pim
->gm_group_count
);
2932 json_object_int_add(json
, "watermarkLimit",
2933 pim
->gm_watermark_limit
);
2935 /* Prepare table. */
2936 tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
2937 ttable_add_row(tt
, "Interface|Group|Version|Uptime");
2938 tt
->style
.cell
.rpad
= 2;
2939 tt
->style
.corner
= '+';
2942 vty_out(vty
, "Total MLD groups: %u\n", pim
->gm_group_count
);
2943 vty_out(vty
, "Watermark warn limit(%s): %u\n",
2944 pim
->gm_watermark_limit
? "Set" : "Not Set",
2945 pim
->gm_watermark_limit
);
2948 /* scan interfaces */
2949 FOR_ALL_INTERFACES (vrf
, ifp
) {
2951 struct pim_interface
*pim_ifp
= ifp
->info
;
2952 struct gm_if
*gm_ifp
;
2958 gm_ifp
= pim_ifp
->mld
;
2962 /* scan mld groups */
2963 frr_each (gm_sgs
, gm_ifp
->sgs
, sg
) {
2966 json_object_object_get_ex(json
, ifp
->name
,
2970 json_iface
= json_object_new_object();
2971 json_object_pim_ifp_add(json_iface
,
2973 json_object_object_add(json
, ifp
->name
,
2975 json_groups
= json_object_new_array();
2976 json_object_object_add(json_iface
,
2981 json_group
= json_object_new_object();
2982 json_object_string_addf(json_group
, "group",
2986 json_object_int_add(json_group
, "version",
2987 pim_ifp
->mld_version
);
2988 json_object_string_addf(json_group
, "uptime",
2989 "%pTVMs", &sg
->created
);
2990 json_object_array_add(json_groups
, json_group
);
2992 ttable_add_row(tt
, "%s|%pPAs|%d|%pTVMs",
2993 ifp
->name
, &sg
->sgaddr
.grp
,
2994 pim_ifp
->mld_version
,
2997 } /* scan gm groups */
2998 } /* scan interfaces */
3001 vty_json(vty
, json
);
3003 /* Dump the generated table. */
3004 table
= ttable_dump(tt
, "\n");
3005 vty_out(vty
, "%s\n", table
);
3006 XFREE(MTYPE_TMP
, table
);
3011 DEFPY(gm_show_mld_groups
,
3012 gm_show_mld_groups_cmd
,
3013 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
3017 VRF_FULL_CMD_HELP_STR
3021 int ret
= CMD_SUCCESS
;
3024 vrf
= gm_cmd_vrf_lookup(vty
, vrf_str
, &ret
);
3025 if (ret
!= CMD_SUCCESS
)
3029 gm_show_groups(vty
, vrf
, !!json
);
3031 RB_FOREACH (vrf
, vrf_name_head
, &vrfs_by_name
)
3032 gm_show_groups(vty
, vrf
, !!json
);
3037 DEFPY(gm_debug_show
,
3039 "debug show mld interface IFNAME",
3046 struct interface
*ifp
;
3047 struct pim_interface
*pim_ifp
;
3048 struct gm_if
*gm_ifp
;
3050 ifp
= if_lookup_by_name(ifname
, VRF_DEFAULT
);
3052 vty_out(vty
, "%% no such interface: %pSQq\n", ifname
);
3056 pim_ifp
= ifp
->info
;
3058 vty_out(vty
, "%% no PIM state for interface %pSQq\n", ifname
);
3062 gm_ifp
= pim_ifp
->mld
;
3064 vty_out(vty
, "%% no MLD state for interface %pSQq\n", ifname
);
3068 vty_out(vty
, "querier: %pPA\n", &gm_ifp
->querier
);
3069 vty_out(vty
, "ll_lowest: %pPA\n\n", &pim_ifp
->ll_lowest
);
3070 vty_out(vty
, "t_query: %pTHD\n", gm_ifp
->t_query
);
3071 vty_out(vty
, "t_other_querier: %pTHD\n", gm_ifp
->t_other_querier
);
3072 vty_out(vty
, "t_expire: %pTHD\n", gm_ifp
->t_expire
);
3074 vty_out(vty
, "\nn_pending: %u\n", gm_ifp
->n_pending
);
3075 for (size_t i
= 0; i
< gm_ifp
->n_pending
; i
++) {
3076 int64_t query
, expiry
;
3078 query
= monotime_since(&gm_ifp
->pending
[i
].query
, NULL
);
3079 expiry
= monotime_until(&gm_ifp
->pending
[i
].expiry
, NULL
);
3081 vty_out(vty
, "[%zu]: query %"PRId64
"ms ago, expiry in %"PRId64
"ms\n",
3082 i
, query
/ 1000, expiry
/ 1000);
3086 struct gm_packet_state
*pkt
;
3087 struct gm_packet_sg
*item
;
3088 struct gm_subscriber
*subscriber
;
3090 vty_out(vty
, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp
->sgs
));
3091 frr_each (gm_sgs
, gm_ifp
->sgs
, sg
) {
3092 vty_out(vty
, "\t%pSG t_expire=%pTHD\n", &sg
->sgaddr
,
3095 vty_out(vty
, "\t @pos:%zu\n",
3096 gm_packet_sg_subs_count(sg
->subs_positive
));
3097 frr_each (gm_packet_sg_subs
, sg
->subs_positive
, item
) {
3098 pkt
= gm_packet_sg2state(item
);
3100 vty_out(vty
, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3101 item
->is_src
? "S" : "",
3102 item
->is_excl
? "E" : "",
3103 &pkt
->subscriber
->addr
, pkt
->subscriber
, pkt
,
3106 assert(item
->sg
== sg
);
3108 vty_out(vty
, "\t @neg:%zu\n",
3109 gm_packet_sg_subs_count(sg
->subs_negative
));
3110 frr_each (gm_packet_sg_subs
, sg
->subs_negative
, item
) {
3111 pkt
= gm_packet_sg2state(item
);
3113 vty_out(vty
, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3114 item
->is_src
? "S" : "",
3115 item
->is_excl
? "E" : "",
3116 &pkt
->subscriber
->addr
, pkt
->subscriber
, pkt
,
3119 assert(item
->sg
== sg
);
3123 vty_out(vty
, "\n%zu subscribers:\n",
3124 gm_subscribers_count(gm_ifp
->subscribers
));
3125 frr_each (gm_subscribers
, gm_ifp
->subscribers
, subscriber
) {
3126 vty_out(vty
, "\t%pPA %p %zu packets\n", &subscriber
->addr
,
3127 subscriber
, gm_packets_count(subscriber
->packets
));
3129 frr_each (gm_packets
, subscriber
->packets
, pkt
) {
3130 vty_out(vty
, "\t\t%p %.3fs ago %u of %u items active\n",
3132 monotime_since(&pkt
->received
, NULL
) *
3134 pkt
->n_active
, pkt
->n_sg
);
3136 for (size_t i
= 0; i
< pkt
->n_sg
; i
++) {
3137 item
= pkt
->items
+ i
;
3139 vty_out(vty
, "\t\t[%zu]", i
);
3142 vty_out(vty
, " inactive\n");
3146 vty_out(vty
, " %s%s %pSG nE=%u\n",
3147 item
->is_src
? "S" : "",
3148 item
->is_excl
? "E" : "",
3149 &item
->sg
->sgaddr
, item
->n_exclude
);
3157 DEFPY(gm_debug_iface_cfg
,
3158 gm_debug_iface_cfg_cmd
,
3161 "query-max-response-time (1-8387584)"
3165 "Multicast Listener Discovery\n"
3167 "maxresp\nmaxresp\n")
3169 VTY_DECLVAR_CONTEXT(interface
, ifp
);
3170 struct pim_interface
*pim_ifp
;
3171 struct gm_if
*gm_ifp
;
3172 bool changed
= false;
3174 pim_ifp
= ifp
->info
;
3176 vty_out(vty
, "%% no PIM state for interface %pSQq\n",
3180 gm_ifp
= pim_ifp
->mld
;
3182 vty_out(vty
, "%% no MLD state for interface %pSQq\n",
3187 if (robustness_str
&& gm_ifp
->cur_qrv
!= robustness
) {
3188 gm_ifp
->cur_qrv
= robustness
;
3191 if (query_max_response_time_str
&&
3192 gm_ifp
->cur_max_resp
!= (unsigned int)query_max_response_time
) {
3193 gm_ifp
->cur_max_resp
= query_max_response_time
;
3198 vty_out(vty
, "%% MLD querier config changed, bumping\n");
3199 gm_bump_querier(gm_ifp
);
3204 void gm_cli_init(void);
3206 void gm_cli_init(void)
3208 install_element(VIEW_NODE
, &gm_show_interface_cmd
);
3209 install_element(VIEW_NODE
, &gm_show_interface_stats_cmd
);
3210 install_element(VIEW_NODE
, &gm_show_interface_joins_cmd
);
3211 install_element(VIEW_NODE
, &gm_show_mld_groups_cmd
);
3213 install_element(VIEW_NODE
, &gm_debug_show_cmd
);
3214 install_element(INTERFACE_NODE
, &gm_debug_iface_cfg_cmd
);