]> git.proxmox.com Git - mirror_frr.git/blame - pimd/pim6_mld.c
pim6d: Display primary address in "show ipv6 mld interface json" cmd
[mirror_frr.git] / pimd / pim6_mld.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: GPL-2.0-or-later
5e5034b0
DL
2/*
3 * PIMv6 MLD querier
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
5e5034b0
DL
5 */
6
7/*
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
10 *
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
13 */
14
15#include <zebra.h>
16#include <netinet/ip6.h>
17
18#include "lib/memory.h"
19#include "lib/jhash.h"
20#include "lib/prefix.h"
21#include "lib/checksum.h"
22#include "lib/thread.h"
cdc1b770 23#include "termtable.h"
5e5034b0
DL
24
25#include "pimd/pim6_mld.h"
26#include "pimd/pim6_mld_protocol.h"
27#include "pimd/pim_memory.h"
28#include "pimd/pim_instance.h"
29#include "pimd/pim_iface.h"
cdc1b770
SG
30#include "pimd/pim6_cmd.h"
31#include "pimd/pim_cmd_common.h"
5e5034b0
DL
32#include "pimd/pim_util.h"
33#include "pimd/pim_tib.h"
34#include "pimd/pimd.h"
35
36#ifndef IPV6_MULTICAST_ALL
37#define IPV6_MULTICAST_ALL 29
38#endif
39
40DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
41DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
42DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
43DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
44DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
45DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
46DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
47
48static void gm_t_query(struct thread *t);
49static void gm_trigger_specific(struct gm_sg *sg);
50static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
51 struct timeval expire_wait);
52
53/* shorthand for log messages */
54#define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56#define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
58 &pkt_src->sin6_addr
59#define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
62
63/* clang-format off */
64#if PIM_IPV == 6
65static const pim_addr gm_all_hosts = {
66 .s6_addr = {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
69 },
70};
71static const pim_addr gm_all_routers = {
72 .s6_addr = {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
75 },
76};
77/* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
79 */
80static const pim_addr gm_dummy_untracked = {
81 .s6_addr = {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
84 },
85};
86#else
87/* 224.0.0.1 */
88static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
89/* 224.0.0.22 */
90static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
91static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
92#endif
93/* clang-format on */
94
95#define IPV6_MULTICAST_SCOPE_LINK 2
96
97static inline uint8_t in6_multicast_scope(const pim_addr *addr)
98{
99 return addr->s6_addr[1] & 0xf;
100}
101
102static inline bool in6_multicast_nofwd(const pim_addr *addr)
103{
104 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
105}
106
107/*
108 * (S,G) -> subscriber,(S,G)
109 */
110
111static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
112 const struct gm_packet_sg *b)
113{
114 const struct gm_packet_state *s_a, *s_b;
115
116 s_a = gm_packet_sg2state(a);
117 s_b = gm_packet_sg2state(b);
118 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
119}
120
121DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
122 gm_packet_sg_cmp);
123
124static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
125 enum gm_sub_sense sense,
126 struct gm_subscriber *sub)
127{
128 struct {
129 struct gm_packet_state hdr;
130 struct gm_packet_sg item;
131 } ref = {
132 /* clang-format off */
133 .hdr = {
134 .subscriber = sub,
135 },
136 .item = {
137 .offset = 0,
138 },
139 /* clang-format on */
140 };
141
142 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
143}
144
145/*
146 * interface -> (*,G),pending
147 */
148
149static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
150 const struct gm_grp_pending *b)
151{
152 return IPV6_ADDR_CMP(&a->grp, &b->grp);
153}
154
155DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
156 gm_grp_pending_cmp);
157
158/*
159 * interface -> ([S1,S2,...],G),pending
160 */
161
162static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
163 const struct gm_gsq_pending *b)
164{
165 if (a->s_bit != b->s_bit)
166 return numcmp(a->s_bit, b->s_bit);
167
168 return IPV6_ADDR_CMP(&a->grp, &b->grp);
169}
170
171static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
172{
173 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
174
175 return jhash(&a->grp, sizeof(a->grp), seed);
176}
177
178DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
179 gm_gsq_pending_hash);
180
181/*
182 * interface -> (S,G)
183 */
184
185static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
186{
187 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
188}
189
190DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
191
192static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
193 pim_addr src)
194{
195 struct gm_sg ref = {};
196
197 ref.sgaddr.grp = grp;
198 ref.sgaddr.src = src;
199 return gm_sgs_find(gm_ifp->sgs, &ref);
200}
201
202static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
203 pim_addr src)
204{
205 struct gm_sg *ret, *prev;
206
207 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
208 ret->sgaddr.grp = grp;
209 ret->sgaddr.src = src;
210 ret->iface = gm_ifp;
211 prev = gm_sgs_add(gm_ifp->sgs, ret);
212
213 if (prev) {
214 XFREE(MTYPE_GM_SG, ret);
215 ret = prev;
216 } else {
aa2f9349 217 monotime(&ret->created);
5e5034b0
DL
218 gm_packet_sg_subs_init(ret->subs_positive);
219 gm_packet_sg_subs_init(ret->subs_negative);
220 }
221 return ret;
222}
223
224/*
225 * interface -> packets, sorted by expiry (because add_tail insert order)
226 */
227
228DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
229
230/*
231 * subscriber -> packets
232 */
233
234DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
235
236/*
237 * interface -> subscriber
238 */
239
240static int gm_subscriber_cmp(const struct gm_subscriber *a,
241 const struct gm_subscriber *b)
242{
243 return IPV6_ADDR_CMP(&a->addr, &b->addr);
244}
245
246static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
247{
248 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
249}
250
251DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
252 gm_subscriber_hash);
253
254static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
255 pim_addr addr)
256{
257 struct gm_subscriber ref = {}, *ret;
258
259 ref.addr = addr;
260 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
261 if (ret)
262 ret->refcount++;
263 return ret;
264}
265
266static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
267 pim_addr addr)
268{
269 struct gm_subscriber ref = {}, *ret;
270
271 ref.addr = addr;
272 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
273
274 if (!ret) {
275 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
276 ret->iface = gm_ifp;
277 ret->addr = addr;
278 ret->refcount = 1;
aa2f9349 279 monotime(&ret->created);
5e5034b0
DL
280 gm_packets_init(ret->packets);
281
282 gm_subscribers_add(gm_ifp->subscribers, ret);
283 }
284 return ret;
285}
286
287static void gm_subscriber_drop(struct gm_subscriber **subp)
288{
289 struct gm_subscriber *sub = *subp;
290 struct gm_if *gm_ifp;
291
292 if (!sub)
293 return;
294 gm_ifp = sub->iface;
295
296 *subp = NULL;
297 sub->refcount--;
298
299 if (sub->refcount)
300 return;
301
302 gm_subscribers_del(gm_ifp->subscribers, sub);
303 XFREE(MTYPE_GM_SUBSCRIBER, sub);
304}
305
306/****************************************************************************/
307
308/* bundle query timer values for combined v1/v2 handling */
309struct gm_query_timers {
310 unsigned int qrv;
311 unsigned int max_resp_ms;
312 unsigned int qqic_ms;
313
314 struct timeval fuzz;
315 struct timeval expire_wait;
316};
317
318static void gm_expiry_calc(struct gm_query_timers *timers)
319{
320 unsigned int expire =
321 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
322 ldiv_t exp_div = ldiv(expire, 1000);
323
324 timers->expire_wait.tv_sec = exp_div.quot;
325 timers->expire_wait.tv_usec = exp_div.rem * 1000;
326 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
327}
328
329static void gm_sg_free(struct gm_sg *sg)
330{
331 /* t_sg_expiry is handled before this is reached */
332 THREAD_OFF(sg->t_sg_query);
333 gm_packet_sg_subs_fini(sg->subs_negative);
334 gm_packet_sg_subs_fini(sg->subs_positive);
335 XFREE(MTYPE_GM_SG, sg);
336}
337
338/* clang-format off */
339static const char *const gm_states[] = {
340 [GM_SG_NOINFO] = "NOINFO",
341 [GM_SG_JOIN] = "JOIN",
342 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
343 [GM_SG_PRUNE] = "PRUNE",
344 [GM_SG_NOPRUNE] = "NOPRUNE",
345 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
346};
347/* clang-format on */
348
349CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
350/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
351 * joined. Whether we actually want/need to support this is a separate
352 * question - it is almost never used. In fact this is exactly what RFC5790
353 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
354 */
355
356static void gm_sg_update(struct gm_sg *sg, bool has_expired)
357{
358 struct gm_if *gm_ifp = sg->iface;
359 enum gm_sg_state prev, desired;
360 bool new_join;
361 struct gm_sg *grp = NULL;
362
363 if (!pim_addr_is_any(sg->sgaddr.src))
364 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
365 else
366 assert(sg->state != GM_SG_PRUNE);
367
368 if (gm_packet_sg_subs_count(sg->subs_positive)) {
369 desired = GM_SG_JOIN;
370 assert(!sg->t_sg_expire);
371 } else if ((sg->state == GM_SG_JOIN ||
372 sg->state == GM_SG_JOIN_EXPIRING) &&
373 !has_expired)
374 desired = GM_SG_JOIN_EXPIRING;
375 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
376 desired = GM_SG_NOINFO;
377 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
378 gm_packet_sg_subs_count(sg->subs_negative)) {
379 if ((sg->state == GM_SG_NOPRUNE ||
380 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
381 !has_expired)
382 desired = GM_SG_NOPRUNE_EXPIRING;
383 else
384 desired = GM_SG_PRUNE;
385 } else if (gm_packet_sg_subs_count(sg->subs_negative))
386 desired = GM_SG_NOPRUNE;
387 else
388 desired = GM_SG_NOINFO;
389
390 if (desired != sg->state && !gm_ifp->stopping) {
95b13dc5 391 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
392 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
393 gm_states[desired]);
394
395 if (desired == GM_SG_JOIN_EXPIRING ||
396 desired == GM_SG_NOPRUNE_EXPIRING) {
397 struct gm_query_timers timers;
398
399 timers.qrv = gm_ifp->cur_qrv;
400 timers.max_resp_ms = gm_ifp->cur_max_resp;
401 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
402 timers.fuzz = gm_ifp->cfg_timing_fuzz;
403
404 gm_expiry_calc(&timers);
405 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
406
407 THREAD_OFF(sg->t_sg_query);
51b4991f 408 sg->n_query = gm_ifp->cur_lmqc;
5e5034b0
DL
409 sg->query_sbit = false;
410 gm_trigger_specific(sg);
411 }
412 }
413 prev = sg->state;
414 sg->state = desired;
415
416 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
417 new_join = false;
418 else
419 new_join = gm_sg_state_want_join(desired);
420
421 if (new_join && !sg->tib_joined) {
422 /* this will retry if join previously failed */
423 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
424 gm_ifp->ifp, &sg->oil);
425 if (!sg->tib_joined)
426 zlog_warn(
427 "MLD join for %pSG%%%s not propagated into TIB",
428 &sg->sgaddr, gm_ifp->ifp->name);
429 else
430 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
431 gm_ifp->ifp->name);
432
433 } else if (sg->tib_joined && !new_join) {
434 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
435
436 sg->oil = NULL;
437 sg->tib_joined = false;
438 }
439
440 if (desired == GM_SG_NOINFO) {
441 assertf((!sg->t_sg_expire &&
442 !gm_packet_sg_subs_count(sg->subs_positive) &&
443 !gm_packet_sg_subs_count(sg->subs_negative)),
444 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
445 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
446 sg->t_sg_expire, gm_states[prev], gm_states[desired],
447 gm_packet_sg_subs_count(sg->subs_positive),
448 gm_packet_sg_subs_count(sg->subs_negative), grp);
449
a96d64b0 450 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
451 zlog_debug(log_sg(sg, "dropping"));
452
453 gm_sgs_del(gm_ifp->sgs, sg);
454 gm_sg_free(sg);
455 }
456}
457
458/****************************************************************************/
459
460/* the following bunch of functions deals with transferring state from
461 * received packets into gm_packet_state. As a reminder, the querier is
462 * structured to keep all items received in one packet together, since they
463 * will share expiry timers and thus allows efficient handling.
464 */
465
466static void gm_packet_free(struct gm_packet_state *pkt)
467{
468 gm_packet_expires_del(pkt->iface->expires, pkt);
469 gm_packets_del(pkt->subscriber->packets, pkt);
470 gm_subscriber_drop(&pkt->subscriber);
471 XFREE(MTYPE_GM_STATE, pkt);
472}
473
474static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
475 struct gm_sg *sg, bool is_excl,
476 bool is_src)
477{
478 struct gm_packet_sg *item;
479
480 assert(pkt->n_active < pkt->n_sg);
481
482 item = &pkt->items[pkt->n_active];
483 item->sg = sg;
484 item->is_excl = is_excl;
485 item->is_src = is_src;
486 item->offset = pkt->n_active;
487
488 pkt->n_active++;
489 return item;
490}
491
492static bool gm_packet_sg_drop(struct gm_packet_sg *item)
493{
494 struct gm_packet_state *pkt;
495 size_t i;
496
497 assert(item->sg);
498
499 pkt = gm_packet_sg2state(item);
500 if (item->sg->most_recent == item)
501 item->sg->most_recent = NULL;
502
503 for (i = 0; i < item->n_exclude; i++) {
504 struct gm_packet_sg *excl_item;
505
506 excl_item = item + 1 + i;
507 if (!excl_item->sg)
508 continue;
509
510 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
511 excl_item->sg = NULL;
512 pkt->n_active--;
513
514 assert(pkt->n_active > 0);
515 }
516
517 if (item->is_excl && item->is_src)
518 gm_packet_sg_subs_del(item->sg->subs_negative, item);
519 else
520 gm_packet_sg_subs_del(item->sg->subs_positive, item);
521 item->sg = NULL;
522 pkt->n_active--;
523
524 if (!pkt->n_active) {
525 gm_packet_free(pkt);
526 return true;
527 }
528 return false;
529}
530
531static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
532{
533 for (size_t i = 0; i < pkt->n_sg; i++) {
534 struct gm_sg *sg = pkt->items[i].sg;
535 bool deleted;
536
537 if (!sg)
538 continue;
539
a96d64b0 540 if (trace && PIM_DEBUG_GM_TRACE)
5e5034b0
DL
541 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
542 &pkt->subscriber->addr);
543 deleted = gm_packet_sg_drop(&pkt->items[i]);
544
545 gm_sg_update(sg, true);
546 if (deleted)
547 break;
548 }
549}
550
551static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
552 struct gm_subscriber *subscriber,
553 pim_addr grp, pim_addr *srcs,
554 size_t n_src, enum gm_sub_sense sense)
555{
556 struct gm_sg *sg;
557 struct gm_packet_sg *old_src;
558 size_t i;
559
560 for (i = 0; i < n_src; i++) {
561 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
562 if (!sg)
563 continue;
564
565 old_src = gm_packet_sg_find(sg, sense, subscriber);
566 if (!old_src)
567 continue;
568
569 gm_packet_sg_drop(old_src);
570 gm_sg_update(sg, false);
571 }
572}
573
574static void gm_sg_expiry_cancel(struct gm_sg *sg)
575{
a96d64b0 576 if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
5e5034b0
DL
577 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
578 THREAD_OFF(sg->t_sg_expire);
579 sg->query_sbit = true;
580}
581
582/* first pass: process all changes resulting in removal of state:
583 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
584 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
585 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
586 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
587 * note *replacing* state is NOT considered *removing* state here
588 *
589 * everything else is thrown into pkt for creation of state in pass 2
590 */
591static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
592 struct mld_v2_rec_hdr *rechdr)
593{
594 /* NB: pkt->subscriber can be NULL here if the subscriber was not
595 * previously seen!
596 */
597 struct gm_subscriber *subscriber = pkt->subscriber;
598 struct gm_sg *grp;
599 struct gm_packet_sg *old_grp = NULL;
600 struct gm_packet_sg *item;
601 size_t n_src = ntohs(rechdr->n_src);
602 size_t j;
603 bool is_excl = false;
604
605 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
606 if (grp && subscriber)
607 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
608
609 assert(old_grp == NULL || old_grp->is_excl);
610
611 switch (rechdr->type) {
612 case MLD_RECTYPE_IS_EXCLUDE:
613 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
614 /* this always replaces or creates state */
615 is_excl = true;
616 if (!grp)
617 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
618
619 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
620 item->n_exclude = n_src;
621
622 /* [EXCL_INCL_SG_NOTE] referenced below
623 *
624 * in theory, we should drop any S,G that the host may have
625 * previously added in INCLUDE mode. In practice, this is both
626 * incredibly rare and entirely irrelevant. It only makes any
627 * difference if an S,G that the host previously had on the
628 * INCLUDE list is now on the blocked list for EXCLUDE, which
629 * we can cover in processing the S,G list in pass2_excl().
630 *
631 * Other S,G from the host are simply left to expire
632 * "naturally" through general expiry.
633 */
634 break;
635
636 case MLD_RECTYPE_IS_INCLUDE:
637 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
638 if (old_grp) {
639 /* INCLUDE has no *,G state, so old_grp here refers to
640 * previous EXCLUDE => delete it
641 */
642 gm_packet_sg_drop(old_grp);
643 gm_sg_update(grp, false);
644 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
645 }
646 break;
647
648 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
649 if (old_grp) {
650 /* remove S,Gs from EXCLUDE, and then we're done */
651 gm_packet_sg_remove_sources(pkt->iface, subscriber,
652 rechdr->grp, rechdr->srcs,
653 n_src, GM_SUB_NEG);
654 return;
655 }
656 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
657 * idential to IS_INCLUDE (because the list of sources in
658 * IS_INCLUDE is not exhaustive)
659 */
660 break;
661
662 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
663 if (old_grp) {
664 /* this is intentionally not implemented because it
665 * would be complicated as hell. we only take the list
666 * of blocked sources from full group state records
667 */
668 return;
669 }
670
671 if (subscriber)
672 gm_packet_sg_remove_sources(pkt->iface, subscriber,
673 rechdr->grp, rechdr->srcs,
674 n_src, GM_SUB_POS);
675 return;
676 }
677
678 for (j = 0; j < n_src; j++) {
679 struct gm_sg *sg;
680
681 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
682 if (!sg)
683 sg = gm_sg_make(pkt->iface, rechdr->grp,
684 rechdr->srcs[j]);
685
686 gm_packet_sg_setup(pkt, sg, is_excl, true);
687 }
688}
689
690/* second pass: creating/updating/refreshing state. All the items from the
691 * received packet have already been thrown into gm_packet_state.
692 */
693
694static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
695{
696 struct gm_packet_sg *item = &pkt->items[i];
697 struct gm_packet_sg *old = NULL;
698 struct gm_sg *sg = item->sg;
699
700 /* EXCLUDE state was already dropped in pass1 */
701 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
702
703 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
704 if (old)
705 gm_packet_sg_drop(old);
706
707 pkt->n_active++;
708 gm_packet_sg_subs_add(sg->subs_positive, item);
709
710 sg->most_recent = item;
711 gm_sg_expiry_cancel(sg);
712 gm_sg_update(sg, false);
713}
714
715static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
716{
717 struct gm_packet_sg *item = &pkt->items[offs];
718 struct gm_packet_sg *old_grp, *item_dup;
719 struct gm_sg *sg_grp = item->sg;
720 size_t i;
721
722 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
723 if (old_grp) {
724 for (i = 0; i < item->n_exclude; i++) {
725 struct gm_packet_sg *item_src, *old_src;
726
727 item_src = &pkt->items[offs + 1 + i];
728 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
729 pkt->subscriber);
730 if (old_src)
731 gm_packet_sg_drop(old_src);
732
733 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
734 * items left over if the host previously had INCLUDE
735 * mode going. Remove them here if we find any.
736 */
737 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
738 pkt->subscriber);
739 if (old_src)
740 gm_packet_sg_drop(old_src);
741 }
742
743 /* the previous loop has removed the S,G entries which are
744 * still excluded after this update. So anything left on the
745 * old item was previously excluded but is now included
746 * => need to trigger update on S,G
747 */
748 for (i = 0; i < old_grp->n_exclude; i++) {
749 struct gm_packet_sg *old_src;
750 struct gm_sg *old_sg_src;
751
752 old_src = old_grp + 1 + i;
753 old_sg_src = old_src->sg;
754 if (!old_sg_src)
755 continue;
756
757 gm_packet_sg_drop(old_src);
758 gm_sg_update(old_sg_src, false);
759 }
760
761 gm_packet_sg_drop(old_grp);
762 }
763
764 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
765 assert(!item_dup);
766 pkt->n_active++;
767
768 sg_grp->most_recent = item;
769 gm_sg_expiry_cancel(sg_grp);
770
771 for (i = 0; i < item->n_exclude; i++) {
772 struct gm_packet_sg *item_src;
773
774 item_src = &pkt->items[offs + 1 + i];
775 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
776 item_src);
777
778 if (item_dup)
779 item_src->sg = NULL;
780 else {
781 pkt->n_active++;
782 gm_sg_update(item_src->sg, false);
783 }
784 }
785
786 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
787 * to get lower PIM churn/flapping
788 */
789 gm_sg_update(sg_grp, false);
790}
791
792CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
793/* on receiving a query, we need to update our robustness/query interval to
794 * match, so we correctly process group/source specific queries after last
795 * member leaves
796 */
797
798static void gm_handle_v2_report(struct gm_if *gm_ifp,
799 const struct sockaddr_in6 *pkt_src, char *data,
800 size_t len)
801{
802 struct mld_v2_report_hdr *hdr;
803 size_t i, n_records, max_entries;
804 struct gm_packet_state *pkt;
805
806 if (len < sizeof(*hdr)) {
55eb347d 807 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
808 zlog_debug(log_pkt_src(
809 "malformed MLDv2 report (truncated header)"));
aa2f9349 810 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
811 return;
812 }
813
aa2f9349
DL
814 /* errors after this may at least partially process the packet */
815 gm_ifp->stats.rx_new_report++;
816
5e5034b0
DL
817 hdr = (struct mld_v2_report_hdr *)data;
818 data += sizeof(*hdr);
819 len -= sizeof(*hdr);
820
821 /* can't have more *,G and S,G items than there is space for ipv6
822 * addresses, so just use this to allocate temporary buffer
823 */
824 max_entries = len / sizeof(pim_addr);
825 pkt = XCALLOC(MTYPE_GM_STATE,
826 offsetof(struct gm_packet_state, items[max_entries]));
827 pkt->n_sg = max_entries;
828 pkt->iface = gm_ifp;
829 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
830
831 n_records = ntohs(hdr->n_records);
832
833 /* validate & remove state in v2_pass1() */
834 for (i = 0; i < n_records; i++) {
835 struct mld_v2_rec_hdr *rechdr;
836 size_t n_src, record_size;
837
838 if (len < sizeof(*rechdr)) {
839 zlog_warn(log_pkt_src(
840 "malformed MLDv2 report (truncated record header)"));
aa2f9349 841 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
842 break;
843 }
844
845 rechdr = (struct mld_v2_rec_hdr *)data;
846 data += sizeof(*rechdr);
847 len -= sizeof(*rechdr);
848
849 n_src = ntohs(rechdr->n_src);
850 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
851
852 if (len < record_size) {
853 zlog_warn(log_pkt_src(
854 "malformed MLDv2 report (truncated source list)"));
aa2f9349 855 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
856 break;
857 }
858 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
859 zlog_warn(
860 log_pkt_src(
861 "malformed MLDv2 report (invalid group %pI6)"),
862 &rechdr->grp);
aa2f9349 863 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
864 break;
865 }
866
867 data += record_size;
868 len -= record_size;
869
870 gm_handle_v2_pass1(pkt, rechdr);
871 }
872
873 if (!pkt->n_active) {
874 gm_subscriber_drop(&pkt->subscriber);
875 XFREE(MTYPE_GM_STATE, pkt);
876 return;
877 }
878
879 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
880 offsetof(struct gm_packet_state, items[pkt->n_active]));
881 pkt->n_sg = pkt->n_active;
882 pkt->n_active = 0;
883
884 monotime(&pkt->received);
885 if (!pkt->subscriber)
886 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
887 gm_packets_add_tail(pkt->subscriber->packets, pkt);
888 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
889
890 for (i = 0; i < pkt->n_sg; i++)
891 if (!pkt->items[i].is_excl)
892 gm_handle_v2_pass2_incl(pkt, i);
893 else {
894 gm_handle_v2_pass2_excl(pkt, i);
895 i += pkt->items[i].n_exclude;
896 }
897
898 if (pkt->n_active == 0)
899 gm_packet_free(pkt);
900}
901
902static void gm_handle_v1_report(struct gm_if *gm_ifp,
903 const struct sockaddr_in6 *pkt_src, char *data,
904 size_t len)
905{
906 struct mld_v1_pkt *hdr;
907 struct gm_packet_state *pkt;
908 struct gm_sg *grp;
909 struct gm_packet_sg *item;
910 size_t max_entries;
911
912 if (len < sizeof(*hdr)) {
55eb347d 913 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
914 zlog_debug(log_pkt_src(
915 "malformed MLDv1 report (truncated)"));
aa2f9349 916 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
917 return;
918 }
919
aa2f9349
DL
920 gm_ifp->stats.rx_old_report++;
921
5e5034b0
DL
922 hdr = (struct mld_v1_pkt *)data;
923
924 max_entries = 1;
925 pkt = XCALLOC(MTYPE_GM_STATE,
926 offsetof(struct gm_packet_state, items[max_entries]));
927 pkt->n_sg = max_entries;
928 pkt->iface = gm_ifp;
929 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
930
931 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
932
933 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
934 if (!grp)
935 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
936
937 item = gm_packet_sg_setup(pkt, grp, true, false);
938 item->n_exclude = 0;
939 CPP_NOTICE("set v1-seen timer on grp here");
940
941 /* } */
942
943 /* pass2 will count n_active back up to 1. Also since a v1 report
944 * has exactly 1 group, we can skip the realloc() that v2 needs here.
945 */
946 assert(pkt->n_active == 1);
947 pkt->n_sg = pkt->n_active;
948 pkt->n_active = 0;
949
950 monotime(&pkt->received);
951 if (!pkt->subscriber)
952 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
953 gm_packets_add_tail(pkt->subscriber->packets, pkt);
954 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
955
956 /* pass2 covers installing state & removing old state; all the v1
957 * compat is handled at this point.
958 *
959 * Note that "old state" may be v2; subscribers will switch from v2
960 * reports to v1 reports when the querier changes from v2 to v1. So,
961 * limiting this to v1 would be wrong.
962 */
963 gm_handle_v2_pass2_excl(pkt, 0);
964
965 if (pkt->n_active == 0)
966 gm_packet_free(pkt);
967}
968
969static void gm_handle_v1_leave(struct gm_if *gm_ifp,
970 const struct sockaddr_in6 *pkt_src, char *data,
971 size_t len)
972{
973 struct mld_v1_pkt *hdr;
974 struct gm_subscriber *subscriber;
975 struct gm_sg *grp;
976 struct gm_packet_sg *old_grp;
977
978 if (len < sizeof(*hdr)) {
55eb347d 979 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
980 zlog_debug(log_pkt_src(
981 "malformed MLDv1 leave (truncated)"));
aa2f9349 982 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
983 return;
984 }
985
aa2f9349
DL
986 gm_ifp->stats.rx_old_leave++;
987
5e5034b0
DL
988 hdr = (struct mld_v1_pkt *)data;
989
990 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
991 if (!subscriber)
992 return;
993
994 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
995
996 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
997 if (grp) {
998 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
999 if (old_grp) {
1000 gm_packet_sg_drop(old_grp);
1001 gm_sg_update(grp, false);
1002 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1003 }
1004 }
1005
1006 /* } */
1007
1008 /* nothing more to do here, pass2 is no-op for leaves */
1009 gm_subscriber_drop(&subscriber);
1010}
1011
1012/* for each general query received (or sent), a timer is started to expire
1013 * _everything_ at the appropriate time (including robustness multiplier).
1014 *
1015 * So when this timer hits, all packets - with all of their items - that were
1016 * received *before* the query are aged out, and state updated accordingly.
1017 * Note that when we receive a refresh/update, the previous/old packet is
1018 * already dropped and replaced with a new one, so in normal steady-state
1019 * operation, this timer won't be doing anything.
1020 *
1021 * Additionally, if a subscriber actively leaves a group, that goes through
1022 * its own path too and won't hit this. This is really only triggered when a
1023 * host straight up disappears.
1024 */
1025static void gm_t_expire(struct thread *t)
1026{
1027 struct gm_if *gm_ifp = THREAD_ARG(t);
1028 struct gm_packet_state *pkt;
1029
1030 zlog_info(log_ifp("general expiry timer"));
1031
1032 while (gm_ifp->n_pending) {
1033 struct gm_general_pending *pend = gm_ifp->pending;
1034 struct timeval remain;
1035 int64_t remain_ms;
1036
1037 remain_ms = monotime_until(&pend->expiry, &remain);
1038 if (remain_ms > 0) {
95b13dc5 1039 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1040 zlog_debug(
1041 log_ifp("next general expiry in %" PRId64 "ms"),
1042 remain_ms / 1000);
1043
1044 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1045 &remain, &gm_ifp->t_expire);
1046 return;
1047 }
1048
1049 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1050 if (timercmp(&pkt->received, &pend->query, >=))
1051 break;
1052
55eb347d 1053 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1054 zlog_debug(log_ifp("expire packet %p"), pkt);
1055 gm_packet_drop(pkt, true);
1056 }
1057
1058 gm_ifp->n_pending--;
1059 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1060 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1061 }
1062
95b13dc5 1063 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1064 zlog_debug(log_ifp("next general expiry waiting for query"));
1065}
1066
1067/* NB: the receive handlers will also run when sending packets, since we
1068 * receive our own packets back in.
1069 */
1070static void gm_handle_q_general(struct gm_if *gm_ifp,
1071 struct gm_query_timers *timers)
1072{
1073 struct timeval now, expiry;
1074 struct gm_general_pending *pend;
1075
1076 monotime(&now);
1077 timeradd(&now, &timers->expire_wait, &expiry);
1078
1079 while (gm_ifp->n_pending) {
1080 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1081
1082 if (timercmp(&pend->expiry, &expiry, <))
1083 break;
1084
1085 /* if we end up here, the last item in pending[] has an expiry
1086 * later than the expiry for this query. But our query time
1087 * (now) is later than that of the item (because, well, that's
1088 * how time works.) This makes this query meaningless since
1089 * it's "supersetted" within the preexisting query
1090 */
1091
a96d64b0 1092 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1093 zlog_debug(
1094 log_ifp("zapping supersetted general timer %pTVMu"),
1095 &pend->expiry);
1096
1097 gm_ifp->n_pending--;
1098 if (!gm_ifp->n_pending)
1099 THREAD_OFF(gm_ifp->t_expire);
1100 }
1101
1102 /* people might be messing with their configs or something */
1103 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1104 return;
1105
1106 pend = &gm_ifp->pending[gm_ifp->n_pending];
1107 pend->query = now;
1108 pend->expiry = expiry;
1109
1110 if (!gm_ifp->n_pending++) {
a96d64b0 1111 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1112 zlog_debug(
1113 log_ifp("starting general timer @ 0: %pTVMu"),
1114 &pend->expiry);
1115 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1116 &timers->expire_wait, &gm_ifp->t_expire);
a96d64b0 1117 } else if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1118 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1119 gm_ifp->n_pending, &pend->expiry);
1120}
1121
1122static void gm_t_sg_expire(struct thread *t)
1123{
1124 struct gm_sg *sg = THREAD_ARG(t);
1125 struct gm_if *gm_ifp = sg->iface;
1126 struct gm_packet_sg *item;
1127
1128 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1129 sg->state == GM_SG_NOPRUNE_EXPIRING,
1130 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1131
1132 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1133 /* this will also drop EXCLUDE mode S,G lists together with
1134 * the *,G entry
1135 */
1136 gm_packet_sg_drop(item);
1137
1138 /* subs_negative items are only timed out together with the *,G entry
1139 * since we won't get any reports for a group-and-source query
1140 */
1141 gm_sg_update(sg, true);
1142}
1143
1144static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1145 struct timeval ref)
1146{
1147 struct gm_packet_state *pkt;
1148
1149 if (!sg->most_recent) {
1150 struct gm_packet_state *best_pkt = NULL;
1151 struct gm_packet_sg *item;
1152
1153 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1154 pkt = gm_packet_sg2state(item);
1155
1156 if (!best_pkt ||
1157 timercmp(&pkt->received, &best_pkt->received, >)) {
1158 best_pkt = pkt;
1159 sg->most_recent = item;
1160 }
1161 }
1162 }
1163 if (sg->most_recent) {
1164 struct timeval fuzz;
1165
1166 pkt = gm_packet_sg2state(sg->most_recent);
1167
1168 /* this shouldn't happen on plain old real ethernet segment,
1169 * but on something like a VXLAN or VPLS it is very possible
1170 * that we get a report before the query that triggered it.
1171 * (imagine a triangle scenario with 3 datacenters, it's very
1172 * possible A->B + B->C is faster than A->C due to odd routing)
1173 *
1174 * This makes a little tolerance allowance to handle that case.
1175 */
1176 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1177
1178 if (timercmp(&fuzz, &ref, >))
1179 return true;
1180 }
1181 return false;
1182}
1183
1184static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1185 struct timeval expire_wait)
1186{
1187 struct timeval now;
1188
1189 if (!sg)
1190 return;
1191 if (sg->state == GM_SG_PRUNE)
1192 return;
1193
1194 monotime(&now);
1195 if (gm_sg_check_recent(gm_ifp, sg, now))
1196 return;
1197
a96d64b0 1198 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1199 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1200
1201 if (sg->t_sg_expire) {
1202 struct timeval remain;
1203
1204 remain = thread_timer_remain(sg->t_sg_expire);
1205 if (timercmp(&remain, &expire_wait, <=))
1206 return;
1207
1208 THREAD_OFF(sg->t_sg_expire);
1209 }
1210
1211 thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1212 &sg->t_sg_expire);
1213}
1214
1215static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1216 struct gm_query_timers *timers, pim_addr grp,
1217 const pim_addr *srcs, size_t n_src)
1218{
1219 struct gm_sg *sg;
1220 size_t i;
1221
1222 for (i = 0; i < n_src; i++) {
1223 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1224 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1225 }
1226}
1227
1228static void gm_t_grp_expire(struct thread *t)
1229{
1230 /* if we're here, that means when we received the group-specific query
1231 * there was one or more active S,G for this group. For *,G the timer
1232 * in sg->t_sg_expire is running separately and gets cancelled when we
1233 * receive a report, so that work is left to gm_t_sg_expire and we
1234 * shouldn't worry about it here.
1235 */
1236 struct gm_grp_pending *pend = THREAD_ARG(t);
1237 struct gm_if *gm_ifp = pend->iface;
1238 struct gm_sg *sg, *sg_start, sg_ref = {};
1239
95b13dc5 1240 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1241 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1242
1243 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1244 * could technically be gt to skip a possible *,G
1245 */
1246 sg_ref.sgaddr.grp = pend->grp;
1247 sg_ref.sgaddr.src = PIMADDR_ANY;
1248 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1249
1250 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1251 struct gm_packet_sg *item;
1252
1253 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1254 break;
1255 if (pim_addr_is_any(sg->sgaddr.src))
1256 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1257 continue;
1258 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1259 continue;
1260
1261 /* we may also have a group-source-specific query going on in
1262 * parallel. But if we received nothing for the *,G query,
1263 * the S,G query is kinda irrelevant.
1264 */
1265 THREAD_OFF(sg->t_sg_expire);
1266
1267 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1268 /* this will also drop the EXCLUDE S,G lists */
1269 gm_packet_sg_drop(item);
1270
1271 gm_sg_update(sg, true);
1272 }
1273
1274 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1275 XFREE(MTYPE_GM_GRP_PENDING, pend);
1276}
1277
1278static void gm_handle_q_group(struct gm_if *gm_ifp,
1279 struct gm_query_timers *timers, pim_addr grp)
1280{
1281 struct gm_sg *sg, sg_ref = {};
1282 struct gm_grp_pending *pend, pend_ref = {};
1283
1284 sg_ref.sgaddr.grp = grp;
1285 sg_ref.sgaddr.src = PIMADDR_ANY;
1286 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1287 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1288
1289 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1290 /* we have nothing at all for this group - don't waste RAM */
1291 return;
1292
1293 if (pim_addr_is_any(sg->sgaddr.src)) {
1294 /* actually found *,G entry here */
a96d64b0 1295 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1296 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1297 &grp);
1298 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1299
1300 sg = gm_sgs_next(gm_ifp->sgs, sg);
1301 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1302 /* no S,G for this group */
1303 return;
1304 }
1305
1306 pend_ref.grp = grp;
1307 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1308
1309 if (pend) {
1310 struct timeval remain;
1311
1312 remain = thread_timer_remain(pend->t_expire);
1313 if (timercmp(&remain, &timers->expire_wait, <=))
1314 return;
1315
1316 THREAD_OFF(pend->t_expire);
1317 } else {
1318 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1319 pend->grp = grp;
1320 pend->iface = gm_ifp;
1321 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1322 }
1323
1324 monotime(&pend->query);
1325 thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
1326 &timers->expire_wait, &pend->t_expire);
1327
a96d64b0 1328 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1329 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1330 pend->t_expire);
1331}
1332
1333static void gm_bump_querier(struct gm_if *gm_ifp)
1334{
1335 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1336
1337 THREAD_OFF(gm_ifp->t_query);
1338
1339 if (pim_addr_is_any(pim_ifp->ll_lowest))
1340 return;
1341 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1342 return;
1343
1344 gm_ifp->n_startup = gm_ifp->cur_qrv;
1345
1346 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1347}
1348
1349static void gm_t_other_querier(struct thread *t)
1350{
1351 struct gm_if *gm_ifp = THREAD_ARG(t);
1352 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1353
1354 zlog_info(log_ifp("other querier timer expired"));
1355
1356 gm_ifp->querier = pim_ifp->ll_lowest;
1357 gm_ifp->n_startup = gm_ifp->cur_qrv;
1358
1359 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1360}
1361
1362static void gm_handle_query(struct gm_if *gm_ifp,
1363 const struct sockaddr_in6 *pkt_src,
1364 pim_addr *pkt_dst, char *data, size_t len)
1365{
1366 struct mld_v2_query_hdr *hdr;
1367 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1368 struct gm_query_timers timers;
1369 bool general_query;
1370
1371 if (len < sizeof(struct mld_v2_query_hdr) &&
1372 len != sizeof(struct mld_v1_pkt)) {
1373 zlog_warn(log_pkt_src("invalid query size"));
aa2f9349 1374 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1375 return;
1376 }
1377
1378 hdr = (struct mld_v2_query_hdr *)data;
1379 general_query = pim_addr_is_any(hdr->grp);
1380
1381 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1382 zlog_warn(log_pkt_src(
1383 "malformed MLDv2 query (invalid group %pI6)"),
1384 &hdr->grp);
aa2f9349 1385 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1386 return;
1387 }
1388
1389 if (len >= sizeof(struct mld_v2_query_hdr)) {
1390 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1391
1392 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1393 zlog_warn(log_pkt_src(
1394 "malformed MLDv2 query (truncated source list)"));
aa2f9349 1395 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1396 return;
1397 }
1398
1399 if (general_query && src_space) {
1400 zlog_warn(log_pkt_src(
1401 "malformed MLDv2 query (general query with non-empty source list)"));
aa2f9349 1402 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1403 return;
1404 }
1405 }
1406
1407 /* accepting queries unicast to us (or addressed to a wrong group)
1408 * can mess up querier election as well as cause us to terminate
1409 * traffic (since after a unicast query no reports will be coming in)
1410 */
1411 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1412 if (pim_addr_is_any(hdr->grp)) {
1413 zlog_warn(
1414 log_pkt_src(
1415 "wrong destination %pPA for general query"),
1416 pkt_dst);
aa2f9349 1417 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1418 return;
1419 }
1420
1421 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
aa2f9349 1422 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1423 zlog_warn(
1424 log_pkt_src(
1425 "wrong destination %pPA for group specific query"),
1426 pkt_dst);
1427 return;
1428 }
1429 }
1430
1431 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
95b13dc5 1432 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1433 zlog_debug(
1434 log_pkt_src("replacing elected querier %pPA"),
1435 &gm_ifp->querier);
1436
1437 gm_ifp->querier = pkt_src->sin6_addr;
1438 }
1439
1440 if (len == sizeof(struct mld_v1_pkt)) {
1441 timers.qrv = gm_ifp->cur_qrv;
1442 timers.max_resp_ms = hdr->max_resp_code;
1443 timers.qqic_ms = gm_ifp->cur_query_intv;
1444 } else {
1445 timers.qrv = (hdr->flags & 0x7) ?: 8;
1446 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1447 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1448 }
1449 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1450
1451 gm_expiry_calc(&timers);
1452
a96d64b0 1453 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1454 zlog_debug(
1455 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1456 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1457 &timers.expire_wait);
1458
1459 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1460 unsigned int other_ms;
1461
1462 THREAD_OFF(gm_ifp->t_query);
1463 THREAD_OFF(gm_ifp->t_other_querier);
1464
1465 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1466 thread_add_timer_msec(router->master, gm_t_other_querier,
1467 gm_ifp, other_ms,
1468 &gm_ifp->t_other_querier);
1469 }
1470
1471 if (len == sizeof(struct mld_v1_pkt)) {
aa2f9349 1472 if (general_query) {
5e5034b0 1473 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1474 gm_ifp->stats.rx_query_old_general++;
1475 } else {
5e5034b0 1476 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1477 gm_ifp->stats.rx_query_old_group++;
1478 }
5e5034b0
DL
1479 return;
1480 }
1481
1482 /* v2 query - [S]uppress bit */
aa2f9349
DL
1483 if (hdr->flags & 0x8) {
1484 gm_ifp->stats.rx_query_new_sbit++;
5e5034b0 1485 return;
aa2f9349 1486 }
5e5034b0 1487
aa2f9349 1488 if (general_query) {
5e5034b0 1489 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1490 gm_ifp->stats.rx_query_new_general++;
1491 } else if (!ntohs(hdr->n_src)) {
5e5034b0 1492 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1493 gm_ifp->stats.rx_query_new_group++;
1494 } else {
5e5034b0
DL
1495 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1496 ntohs(hdr->n_src));
aa2f9349
DL
1497 gm_ifp->stats.rx_query_new_groupsrc++;
1498 }
5e5034b0
DL
1499}
1500
1501static void gm_rx_process(struct gm_if *gm_ifp,
1502 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1503 void *data, size_t pktlen)
1504{
1505 struct icmp6_plain_hdr *icmp6 = data;
1506 uint16_t pkt_csum, ref_csum;
1507 struct ipv6_ph ph6 = {
1508 .src = pkt_src->sin6_addr,
1509 .dst = *pkt_dst,
1510 .ulpl = htons(pktlen),
1511 .next_hdr = IPPROTO_ICMPV6,
1512 };
1513
1514 pkt_csum = icmp6->icmp6_cksum;
1515 icmp6->icmp6_cksum = 0;
1516 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1517
1518 if (pkt_csum != ref_csum) {
1519 zlog_warn(
1520 log_pkt_src(
1521 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1522 pkt_dst, pkt_csum, ref_csum);
aa2f9349 1523 gm_ifp->stats.rx_drop_csum++;
5e5034b0
DL
1524 return;
1525 }
1526
1527 data = (icmp6 + 1);
1528 pktlen -= sizeof(*icmp6);
1529
1530 switch (icmp6->icmp6_type) {
1531 case ICMP6_MLD_QUERY:
1532 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1533 break;
1534 case ICMP6_MLD_V1_REPORT:
1535 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1536 break;
1537 case ICMP6_MLD_V1_DONE:
1538 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1539 break;
1540 case ICMP6_MLD_V2_REPORT:
1541 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1542 break;
1543 }
1544}
1545
1546static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1547 uint16_t alert_type)
1548{
1549 uint8_t *hopopt_end;
1550
1551 if (hopopt_len < 8)
1552 return false;
1553 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1554 return false;
1555
1556 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1557 hopopts += 2;
1558
1559 while (hopopts < hopopt_end) {
1560 if (hopopts[0] == IP6OPT_PAD1) {
1561 hopopts++;
1562 continue;
1563 }
1564
1565 if (hopopts > hopopt_end - 2)
1566 break;
1567 if (hopopts > hopopt_end - 2 - hopopts[1])
1568 break;
1569
1570 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1571 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1572
1573 if (have_type == alert_type)
1574 return true;
1575 }
1576
1577 hopopts += 2 + hopopts[1];
1578 }
1579 return false;
1580}
1581
1582static void gm_t_recv(struct thread *t)
1583{
df655593 1584 struct pim_instance *pim = THREAD_ARG(t);
5e5034b0
DL
1585 union {
1586 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1587 CMSG_SPACE(256) /* hop options */ +
1588 CMSG_SPACE(sizeof(int)) /* hopcount */];
1589 struct cmsghdr align;
1590 } cmsgbuf;
1591 struct cmsghdr *cmsg;
1592 struct in6_pktinfo *pktinfo = NULL;
1593 uint8_t *hopopts = NULL;
1594 size_t hopopt_len = 0;
1595 int *hoplimit = NULL;
1596 char rxbuf[2048];
1597 struct msghdr mh[1] = {};
1598 struct iovec iov[1];
5784a878 1599 struct sockaddr_in6 pkt_src[1] = {};
5e5034b0
DL
1600 ssize_t nread;
1601 size_t pktlen;
1602
df655593
DL
1603 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1604 &pim->t_gm_recv);
5e5034b0
DL
1605
1606 iov->iov_base = rxbuf;
1607 iov->iov_len = sizeof(rxbuf);
1608
1609 mh->msg_name = pkt_src;
1610 mh->msg_namelen = sizeof(pkt_src);
1611 mh->msg_control = cmsgbuf.buf;
1612 mh->msg_controllen = sizeof(cmsgbuf.buf);
1613 mh->msg_iov = iov;
1614 mh->msg_iovlen = array_size(iov);
1615 mh->msg_flags = 0;
1616
df655593 1617 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
5e5034b0 1618 if (nread <= 0) {
df655593
DL
1619 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1620 pim->gm_rx_drop_sys++;
5e5034b0
DL
1621 return;
1622 }
1623
1624 if ((size_t)nread > sizeof(rxbuf)) {
1625 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1626 iov->iov_len = nread;
1627 }
df655593 1628 nread = recvmsg(pim->gm_socket, mh, 0);
5e5034b0 1629 if (nread <= 0) {
df655593
DL
1630 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1631 pim->gm_rx_drop_sys++;
5e5034b0
DL
1632 goto out_free;
1633 }
1634
df655593
DL
1635 struct interface *ifp;
1636
1637 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1638 if (!ifp || !ifp->info)
1639 goto out_free;
1640
1641 struct pim_interface *pim_ifp = ifp->info;
1642 struct gm_if *gm_ifp = pim_ifp->mld;
1643
1644 if (!gm_ifp)
5e5034b0
DL
1645 goto out_free;
1646
1647 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1648 if (cmsg->cmsg_level != SOL_IPV6)
1649 continue;
1650
1651 switch (cmsg->cmsg_type) {
1652 case IPV6_PKTINFO:
1653 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1654 break;
1655 case IPV6_HOPOPTS:
1656 hopopts = CMSG_DATA(cmsg);
1657 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1658 break;
1659 case IPV6_HOPLIMIT:
1660 hoplimit = (int *)CMSG_DATA(cmsg);
1661 break;
1662 }
1663 }
1664
1665 if (!pktinfo || !hoplimit) {
1666 zlog_err(log_ifp(
1667 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
df655593 1668 pim->gm_rx_drop_sys++;
5e5034b0
DL
1669 goto out_free;
1670 }
1671
1672 if (*hoplimit != 1) {
1673 zlog_err(log_pkt_src("packet with hop limit != 1"));
aa2f9349
DL
1674 /* spoofing attempt => count on srcaddr counter */
1675 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1676 goto out_free;
1677 }
1678
1679 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1680 zlog_err(log_pkt_src(
1681 "packet without IPv6 Router Alert MLD option"));
aa2f9349 1682 gm_ifp->stats.rx_drop_ra++;
5e5034b0
DL
1683 goto out_free;
1684 }
1685
1686 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1687 /* reports from :: happen in normal operation for DAD, so
1688 * don't spam log messages about this
1689 */
1690 goto out_free;
1691
1692 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1693 zlog_warn(log_pkt_src("packet from invalid source address"));
aa2f9349 1694 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1695 goto out_free;
1696 }
1697
1698 pktlen = nread;
1699 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1700 zlog_warn(log_pkt_src("truncated packet"));
aa2f9349 1701 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1702 goto out_free;
1703 }
1704
1705 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1706 pktlen);
1707
1708out_free:
1709 if (iov->iov_base != rxbuf)
1710 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1711}
1712
1713static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1714 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1715{
1716 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1717 struct sockaddr_in6 dstaddr = {
1718 .sin6_family = AF_INET6,
1719 .sin6_scope_id = gm_ifp->ifp->ifindex,
1720 };
1721 struct {
1722 struct icmp6_plain_hdr hdr;
1723 struct mld_v2_query_hdr v2_query;
1724 } query = {
1725 /* clang-format off */
1726 .hdr = {
1727 .icmp6_type = ICMP6_MLD_QUERY,
1728 .icmp6_code = 0,
1729 },
1730 .v2_query = {
1731 .grp = grp,
1732 },
1733 /* clang-format on */
1734 };
1735 struct ipv6_ph ph6 = {
1736 .src = pim_ifp->ll_lowest,
1737 .ulpl = htons(sizeof(query)),
1738 .next_hdr = IPPROTO_ICMPV6,
1739 };
1740 union {
df655593
DL
1741 char buf[CMSG_SPACE(8) /* hop options */ +
1742 CMSG_SPACE(sizeof(struct in6_pktinfo))];
5e5034b0
DL
1743 struct cmsghdr align;
1744 } cmsg = {};
1745 struct cmsghdr *cmh;
1746 struct msghdr mh[1] = {};
1747 struct iovec iov[3];
1748 size_t iov_len;
1749 ssize_t ret, expect_ret;
1750 uint8_t *dp;
df655593 1751 struct in6_pktinfo *pktinfo;
5e5034b0
DL
1752
1753 if (if_is_loopback(gm_ifp->ifp)) {
1754 /* Linux is a bit odd with multicast on loopback */
1755 ph6.src = in6addr_loopback;
1756 dstaddr.sin6_addr = in6addr_loopback;
1757 } else if (pim_addr_is_any(grp))
1758 dstaddr.sin6_addr = gm_all_hosts;
1759 else
1760 dstaddr.sin6_addr = grp;
1761
1762 query.v2_query.max_resp_code =
1763 mld_max_resp_encode(gm_ifp->cur_max_resp);
1764 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1765 if (s_bit)
1766 query.v2_query.flags |= 0x08;
1767 query.v2_query.qqic =
1768 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1769 query.v2_query.n_src = htons(n_srcs);
1770
1771 ph6.dst = dstaddr.sin6_addr;
1772
1773 /* ph6 not included in sendmsg */
1774 iov[0].iov_base = &ph6;
1775 iov[0].iov_len = sizeof(ph6);
1776 iov[1].iov_base = &query;
1777 if (gm_ifp->cur_version == GM_MLDV1) {
1778 iov_len = 2;
1779 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1780 } else if (!n_srcs) {
1781 iov_len = 2;
1782 iov[1].iov_len = sizeof(query);
1783 } else {
1784 iov[1].iov_len = sizeof(query);
1785 iov[2].iov_base = (void *)srcs;
1786 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1787 iov_len = 3;
1788 }
1789
1790 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1791
55eb347d 1792 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1793 zlog_debug(
1794 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1795 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1796
1797 mh->msg_name = &dstaddr;
1798 mh->msg_namelen = sizeof(dstaddr);
1799 mh->msg_iov = iov + 1;
1800 mh->msg_iovlen = iov_len - 1;
1801 mh->msg_control = &cmsg;
1802 mh->msg_controllen = sizeof(cmsg.buf);
df655593 1803
5e5034b0
DL
1804 cmh = CMSG_FIRSTHDR(mh);
1805 cmh->cmsg_level = IPPROTO_IPV6;
1806 cmh->cmsg_type = IPV6_HOPOPTS;
1807 cmh->cmsg_len = CMSG_LEN(8);
1808 dp = CMSG_DATA(cmh);
1809 *dp++ = 0; /* next header */
1810 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1811 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1812 *dp++ = 2; /* length */
1813 *dp++ = 0; /* value (2 bytes) */
1814 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1815 *dp++ = 0; /* pad0 */
1816 *dp++ = 0; /* pad0 */
1817
df655593
DL
1818 cmh = CMSG_NXTHDR(mh, cmh);
1819 cmh->cmsg_level = IPPROTO_IPV6;
1820 cmh->cmsg_type = IPV6_PKTINFO;
1821 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1822 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1823 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1824 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1825
5e5034b0
DL
1826 expect_ret = iov[1].iov_len;
1827 if (iov_len == 3)
1828 expect_ret += iov[2].iov_len;
1829
1830 frr_with_privs (&pimd_privs) {
df655593 1831 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
5e5034b0
DL
1832 }
1833
aa2f9349 1834 if (ret != expect_ret) {
5e5034b0 1835 zlog_warn(log_ifp("failed to send query: %m"));
aa2f9349
DL
1836 gm_ifp->stats.tx_query_fail++;
1837 } else {
1838 if (gm_ifp->cur_version == GM_MLDV1) {
1839 if (pim_addr_is_any(grp))
1840 gm_ifp->stats.tx_query_old_general++;
1841 else
1842 gm_ifp->stats.tx_query_old_group++;
1843 } else {
1844 if (pim_addr_is_any(grp))
1845 gm_ifp->stats.tx_query_new_general++;
1846 else if (!n_srcs)
1847 gm_ifp->stats.tx_query_new_group++;
1848 else
1849 gm_ifp->stats.tx_query_new_groupsrc++;
1850 }
1851 }
5e5034b0
DL
1852}
1853
1854static void gm_t_query(struct thread *t)
1855{
1856 struct gm_if *gm_ifp = THREAD_ARG(t);
1857 unsigned int timer_ms = gm_ifp->cur_query_intv;
1858
1859 if (gm_ifp->n_startup) {
1860 timer_ms /= 4;
1861 gm_ifp->n_startup--;
1862 }
1863
1864 thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1865 &gm_ifp->t_query);
1866
1867 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1868}
1869
1870static void gm_t_sg_query(struct thread *t)
1871{
1872 struct gm_sg *sg = THREAD_ARG(t);
1873
1874 gm_trigger_specific(sg);
1875}
1876
1877/* S,G specific queries (triggered by a member leaving) get a little slack
1878 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1879 */
1880static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1881{
1882 struct gm_if *gm_ifp = pend_gsq->iface;
1883
1884 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1885 pend_gsq->s_bit);
1886
1887 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1888 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1889}
1890
1891static void gm_t_gsq_pend(struct thread *t)
1892{
1893 struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
1894
1895 gm_send_specific(pend_gsq);
1896}
1897
1898static void gm_trigger_specific(struct gm_sg *sg)
1899{
1900 struct gm_if *gm_ifp = sg->iface;
1901 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1902 struct gm_gsq_pending *pend_gsq, ref = {};
1903
1904 sg->n_query--;
1905 if (sg->n_query)
1906 thread_add_timer_msec(router->master, gm_t_sg_query, sg,
1907 gm_ifp->cur_query_intv_trig,
1908 &sg->t_sg_query);
1909
1910 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1911 return;
df655593 1912 if (gm_ifp->pim->gm_socket == -1)
5e5034b0
DL
1913 return;
1914
a96d64b0 1915 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1916 zlog_debug(log_sg(sg, "triggered query"));
1917
1918 if (pim_addr_is_any(sg->sgaddr.src)) {
1919 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1920 return;
1921 }
1922
1923 ref.grp = sg->sgaddr.grp;
1924 ref.s_bit = sg->query_sbit;
1925
1926 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1927 if (!pend_gsq) {
1928 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1929 pend_gsq->grp = sg->sgaddr.grp;
1930 pend_gsq->s_bit = sg->query_sbit;
1931 pend_gsq->iface = gm_ifp;
1932 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1933
1934 thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1935 &gm_ifp->cfg_timing_fuzz,
1936 &pend_gsq->t_send);
1937 }
1938
1939 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1940
1941 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1942 pend_gsq->n_src++;
1943
1944 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1945 THREAD_OFF(pend_gsq->t_send);
1946 gm_send_specific(pend_gsq);
1947 pend_gsq = NULL;
1948 }
1949}
1950
df655593 1951static void gm_vrf_socket_incref(struct pim_instance *pim)
5e5034b0 1952{
df655593 1953 struct vrf *vrf = pim->vrf;
5e5034b0
DL
1954 int ret, intval;
1955 struct icmp6_filter filter[1];
1956
df655593
DL
1957 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1958 return;
5e5034b0
DL
1959
1960 ICMP6_FILTER_SETBLOCKALL(filter);
1961 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1962 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1963 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1964 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1965
1966 frr_with_privs (&pimd_privs) {
df655593
DL
1967 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1968 vrf->vrf_id, vrf->name);
1969 if (pim->gm_socket < 0) {
1970 zlog_err("(VRF %s) could not create MLD socket: %m",
1971 vrf->name);
5e5034b0
DL
1972 return;
1973 }
1974
df655593
DL
1975 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
1976 filter, sizeof(filter));
5e5034b0 1977 if (ret)
df655593
DL
1978 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1979 vrf->name);
5e5034b0
DL
1980
1981 intval = 1;
df655593 1982 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
5e5034b0
DL
1983 &intval, sizeof(intval));
1984 if (ret)
df655593
DL
1985 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1986 vrf->name);
5e5034b0
DL
1987
1988 intval = 1;
df655593 1989 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
5e5034b0
DL
1990 &intval, sizeof(intval));
1991 if (ret)
df655593
DL
1992 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
1993 vrf->name);
5e5034b0
DL
1994
1995 intval = 1;
df655593 1996 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
5e5034b0
DL
1997 &intval, sizeof(intval));
1998 if (ret)
df655593
DL
1999 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2000 vrf->name);
5e5034b0
DL
2001
2002 intval = 1;
df655593 2003 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
5e5034b0
DL
2004 &intval, sizeof(intval));
2005 if (ret)
2006 zlog_err(
df655593
DL
2007 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2008 vrf->name);
5e5034b0
DL
2009
2010 intval = 1;
df655593 2011 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
5e5034b0
DL
2012 &intval, sizeof(intval));
2013 if (ret)
df655593
DL
2014 zlog_err(
2015 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2016 vrf->name);
5e5034b0
DL
2017
2018 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2019 * RX filtering in Linux. It only means "receive all groups
2020 * that something on the system has joined". To actually
2021 * receive *all* MLD packets - which is what we need -
2022 * multicast routing must be enabled on the interface. And
2023 * this only works for MLD packets specifically.
2024 *
2025 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2026 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2027 *
2028 * Also note that the code there explicitly checks for the IPv6
2029 * router alert MLD option (which is required by the RFC to be
2030 * on MLD packets.) That implies trying to support hosts which
2031 * erroneously don't add that option is just not possible.
2032 */
2033 intval = 1;
df655593 2034 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
5e5034b0
DL
2035 &intval, sizeof(intval));
2036 if (ret)
2037 zlog_info(
df655593
DL
2038 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2039 vrf->name);
2040 }
5e5034b0 2041
df655593
DL
2042 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2043 &pim->t_gm_recv);
2044}
2045
2046static void gm_vrf_socket_decref(struct pim_instance *pim)
2047{
2048 if (--pim->gm_socket_if_count)
2049 return;
2050
2051 THREAD_OFF(pim->t_gm_recv);
2052 close(pim->gm_socket);
2053 pim->gm_socket = -1;
2054}
2055
2056static void gm_start(struct interface *ifp)
2057{
2058 struct pim_interface *pim_ifp = ifp->info;
2059 struct gm_if *gm_ifp;
2060
2061 assert(pim_ifp);
2062 assert(pim_ifp->pim);
2063 assert(pim_ifp->mroute_vif_index >= 0);
2064 assert(!pim_ifp->mld);
2065
2066 gm_vrf_socket_incref(pim_ifp->pim);
2067
2068 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2069 gm_ifp->ifp = ifp;
2070 pim_ifp->mld = gm_ifp;
2071 gm_ifp->pim = pim_ifp->pim;
2072 monotime(&gm_ifp->started);
2073
2074 zlog_info(log_ifp("starting MLD"));
2075
2076 if (pim_ifp->mld_version == 1)
2077 gm_ifp->cur_version = GM_MLDV1;
2078 else
2079 gm_ifp->cur_version = GM_MLDV2;
2080
914710d7 2081 gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
df655593 2082 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
707a9e9a
A
2083 gm_ifp->cur_query_intv_trig =
2084 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
914710d7 2085 gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
51b4991f 2086 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
df655593
DL
2087
2088 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2089 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2090
2091 gm_sgs_init(gm_ifp->sgs);
2092 gm_subscribers_init(gm_ifp->subscribers);
2093 gm_packet_expires_init(gm_ifp->expires);
2094 gm_grp_pends_init(gm_ifp->grp_pends);
2095 gm_gsq_pends_init(gm_ifp->gsq_pends);
2096
2097 frr_with_privs (&pimd_privs) {
5e5034b0 2098 struct ipv6_mreq mreq;
df655593 2099 int ret;
5e5034b0
DL
2100
2101 /* all-MLDv2 group */
2102 mreq.ipv6mr_multiaddr = gm_all_routers;
2103 mreq.ipv6mr_interface = ifp->ifindex;
df655593
DL
2104 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2105 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
5e5034b0
DL
2106 if (ret)
2107 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2108 ifp->name);
2109 }
5e5034b0
DL
2110}
2111
e309780f 2112void gm_group_delete(struct gm_if *gm_ifp)
5e5034b0 2113{
e309780f 2114 struct gm_sg *sg;
5e5034b0
DL
2115 struct gm_packet_state *pkt;
2116 struct gm_grp_pending *pend_grp;
2117 struct gm_gsq_pending *pend_gsq;
2118 struct gm_subscriber *subscriber;
e309780f
SP
2119
2120 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2121 gm_packet_drop(pkt, false);
2122
2123 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2124 THREAD_OFF(pend_grp->t_expire);
2125 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2126 }
2127
2128 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2129 THREAD_OFF(pend_gsq->t_send);
2130 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2131 }
2132
2133 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2134 THREAD_OFF(sg->t_sg_expire);
2135 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2136 &sg->sgaddr);
2137 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2138 &sg->sgaddr);
2139
2140 gm_sg_free(sg);
2141 }
2142 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2143 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2144 &subscriber->addr);
2145 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2146 }
2147}
2148
2149void gm_ifp_teardown(struct interface *ifp)
2150{
2151 struct pim_interface *pim_ifp = ifp->info;
2152 struct gm_if *gm_ifp;
5e5034b0
DL
2153
2154 if (!pim_ifp || !pim_ifp->mld)
2155 return;
2156
2157 gm_ifp = pim_ifp->mld;
2158 gm_ifp->stopping = true;
95b13dc5 2159 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
2160 zlog_debug(log_ifp("MLD stop"));
2161
2162 THREAD_OFF(gm_ifp->t_query);
2163 THREAD_OFF(gm_ifp->t_other_querier);
5e5034b0
DL
2164 THREAD_OFF(gm_ifp->t_expire);
2165
df655593
DL
2166 frr_with_privs (&pimd_privs) {
2167 struct ipv6_mreq mreq;
2168 int ret;
2169
2170 /* all-MLDv2 group */
2171 mreq.ipv6mr_multiaddr = gm_all_routers;
2172 mreq.ipv6mr_interface = ifp->ifindex;
2173 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2174 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2175 if (ret)
2176 zlog_err(
2177 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2178 ifp->name);
5e5034b0
DL
2179 }
2180
df655593
DL
2181 gm_vrf_socket_decref(gm_ifp->pim);
2182
e309780f 2183 gm_group_delete(gm_ifp);
5e5034b0
DL
2184
2185 gm_grp_pends_fini(gm_ifp->grp_pends);
2186 gm_packet_expires_fini(gm_ifp->expires);
2187 gm_subscribers_fini(gm_ifp->subscribers);
2188 gm_sgs_fini(gm_ifp->sgs);
2189
2190 XFREE(MTYPE_GM_IFACE, gm_ifp);
2191 pim_ifp->mld = NULL;
2192}
2193
2194static void gm_update_ll(struct interface *ifp)
2195{
2196 struct pim_interface *pim_ifp = ifp->info;
f4e8f5d4 2197 struct gm_if *gm_ifp = pim_ifp->mld;
5e5034b0
DL
2198 bool was_querier;
2199
2200 was_querier =
2201 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2202 !pim_addr_is_any(gm_ifp->querier);
2203
2204 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2205 if (was_querier)
2206 gm_ifp->querier = pim_ifp->ll_lowest;
2207 THREAD_OFF(gm_ifp->t_query);
2208
2209 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2210 if (was_querier)
2211 zlog_info(log_ifp(
2212 "lost link-local address, stopping querier"));
2213 return;
2214 }
2215
2216 if (was_querier)
2217 zlog_info(log_ifp("new link-local %pPA while querier"),
2218 &gm_ifp->cur_ll_lowest);
2219 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2220 pim_addr_is_any(gm_ifp->querier)) {
2221 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2222 &gm_ifp->cur_ll_lowest);
2223 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2224 } else
2225 return;
2226
5e5034b0
DL
2227 gm_ifp->n_startup = gm_ifp->cur_qrv;
2228 thread_execute(router->master, gm_t_query, gm_ifp, 0);
2229}
2230
2231void gm_ifp_update(struct interface *ifp)
2232{
2233 struct pim_interface *pim_ifp = ifp->info;
2234 struct gm_if *gm_ifp;
2235 bool changed = false;
2236
2237 if (!pim_ifp)
2238 return;
2239 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2240 pim_ifp->mroute_vif_index < 0) {
2241 gm_ifp_teardown(ifp);
2242 return;
2243 }
2244
5c1b3cd2
A
2245 /*
2246 * If ipv6 mld is not enabled on interface, do not start mld activites.
2247 */
2248 if (!pim_ifp->gm_enable)
2249 return;
2250
2251 if (!pim_ifp->mld) {
2252 changed = true;
5e5034b0 2253 gm_start(ifp);
5c1b3cd2 2254 }
5e5034b0
DL
2255
2256 gm_ifp = pim_ifp->mld;
2257 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2258 gm_update_ll(ifp);
2259
2260 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2261
2262 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2263 gm_ifp->cur_query_intv = cfg_query_intv;
707a9e9a
A
2264 changed = true;
2265 }
2266
2267 unsigned int cfg_query_intv_trig =
2268 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2269
2270 if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
2271 gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
5e5034b0
DL
2272 changed = true;
2273 }
2274
914710d7
A
2275 unsigned int cfg_max_response =
2276 pim_ifp->gm_query_max_response_time_dsec * 100;
2277
2278 if (gm_ifp->cur_max_resp != cfg_max_response)
2279 gm_ifp->cur_max_resp = cfg_max_response;
2280
51b4991f
A
2281 if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
2282 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2283
5e5034b0
DL
2284 enum gm_version cfg_version;
2285
2286 if (pim_ifp->mld_version == 1)
2287 cfg_version = GM_MLDV1;
2288 else
2289 cfg_version = GM_MLDV2;
2290 if (gm_ifp->cur_version != cfg_version) {
2291 gm_ifp->cur_version = cfg_version;
2292 changed = true;
2293 }
2294
2295 if (changed) {
a96d64b0 2296 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
2297 zlog_debug(log_ifp(
2298 "MLD querier config changed, querying"));
2299 gm_bump_querier(gm_ifp);
2300 }
2301}
2302
d2951219
DL
2303/*
2304 * CLI (show commands only)
2305 */
5e5034b0
DL
2306
2307#include "lib/command.h"
2308
5e5034b0 2309#include "pimd/pim6_mld_clippy.c"
5e5034b0 2310
d2951219
DL
2311static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2312 int *err)
2313{
2314 struct vrf *ret;
2315
2316 if (!vrf_str)
2317 return vrf_lookup_by_id(VRF_DEFAULT);
2318 if (!strcmp(vrf_str, "all"))
2319 return NULL;
2320 ret = vrf_lookup_by_name(vrf_str);
2321 if (ret)
2322 return ret;
2323
2324 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2325 *err = CMD_WARNING;
2326 return NULL;
2327}
2328
2329static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2330{
2331 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2332 struct gm_if *gm_ifp;
2333 bool querier;
2334 size_t i;
2335
2336 if (!pim_ifp) {
2337 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2338 return;
2339 }
2340
2341 gm_ifp = pim_ifp->mld;
2342 if (!gm_ifp) {
2343 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2344 return;
2345 }
2346
2347 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2348
2349 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2350 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2351 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2352 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2353 querier ? " (this system)" : "");
2354 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2355 vty_out(vty, " Other querier timer: %pTH\n",
2356 gm_ifp->t_other_querier);
2357 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2358 vty_out(vty, " Query interval: %ums\n",
2359 gm_ifp->cur_query_intv);
2360 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2361 vty_out(vty, " Last member query intv.: %ums\n",
2362 gm_ifp->cur_query_intv_trig);
2363 vty_out(vty, " %u expiry timers from general queries:\n",
2364 gm_ifp->n_pending);
2365 for (i = 0; i < gm_ifp->n_pending; i++) {
2366 struct gm_general_pending *p = &gm_ifp->pending[i];
2367
2368 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2369 &p->query, &p->expiry);
2370 }
2371 vty_out(vty, " %zu expiry timers from *,G queries\n",
2372 gm_grp_pends_count(gm_ifp->grp_pends));
2373 vty_out(vty, " %zu expiry timers from S,G queries\n",
2374 gm_gsq_pends_count(gm_ifp->gsq_pends));
2375 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2376 gm_sgs_count(gm_ifp->sgs),
2377 gm_subscribers_count(gm_ifp->subscribers),
2378 gm_packet_expires_count(gm_ifp->expires));
2379 vty_out(vty, "\n");
2380}
2381
2382static void gm_show_if_one(struct vty *vty, struct interface *ifp,
2383 json_object *js_if)
2384{
2385 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2386 struct gm_if *gm_ifp = pim_ifp->mld;
2387 bool querier;
2388
d2951219
DL
2389 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2390
2391 if (js_if) {
cb406d5c 2392 json_object_string_add(js_if, "name", ifp->name);
3ab119a4
SP
2393 json_object_string_addf(js_if, "address", "%pPA",
2394 &pim_ifp->primary_address);
d2951219
DL
2395 json_object_string_add(js_if, "state", "up");
2396 json_object_string_addf(js_if, "version", "%d",
2397 gm_ifp->cur_version);
2398 json_object_string_addf(js_if, "upTime", "%pTVMs",
2399 &gm_ifp->started);
2400 json_object_boolean_add(js_if, "querier", querier);
2401 json_object_string_addf(js_if, "querierIp", "%pPA",
2402 &gm_ifp->querier);
2403 if (querier)
2404 json_object_string_addf(js_if, "queryTimer", "%pTH",
2405 gm_ifp->t_query);
2406 else
2407 json_object_string_addf(js_if, "otherQuerierTimer",
2408 "%pTH",
2409 gm_ifp->t_other_querier);
cb406d5c
A
2410 json_object_int_add(js_if, "timerRobustnessValue",
2411 gm_ifp->cur_qrv);
6b94500d
A
2412 json_object_int_add(js_if, "lastMemberQueryCount",
2413 gm_ifp->cur_lmqc);
cb406d5c
A
2414 json_object_int_add(js_if, "timerQueryIntervalMsec",
2415 gm_ifp->cur_query_intv);
2416 json_object_int_add(js_if, "timerQueryResponseTimerMsec",
2417 gm_ifp->cur_max_resp);
2418 json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
2419 gm_ifp->cur_query_intv_trig);
d2951219
DL
2420 } else {
2421 vty_out(vty, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n",
2422 ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier,
2423 querier ? "query" : "other",
2424 querier ? gm_ifp->t_query : gm_ifp->t_other_querier,
2425 &gm_ifp->started);
2426 }
2427}
2428
2429static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2430 bool detail, json_object *js)
2431{
2432 struct interface *ifp;
2433 json_object *js_vrf;
fe4db62c 2434 struct pim_interface *pim_ifp;
d2951219
DL
2435
2436 if (js) {
2437 js_vrf = json_object_new_object();
2438 json_object_object_add(js, vrf->name, js_vrf);
2439 }
2440
2441 FOR_ALL_INTERFACES (vrf, ifp) {
2442 json_object *js_if = NULL;
2443
2444 if (ifname && strcmp(ifp->name, ifname))
2445 continue;
2446 if (detail && !js) {
2447 gm_show_if_one_detail(vty, ifp);
2448 continue;
2449 }
2450
fe4db62c
SP
2451 pim_ifp = ifp ->info;
2452
2453 if (!pim_ifp || !pim_ifp->mld)
d2951219 2454 continue;
fe4db62c 2455
d2951219
DL
2456 if (js) {
2457 js_if = json_object_new_object();
2458 json_object_object_add(js_vrf, ifp->name, js_if);
2459 }
2460
2461 gm_show_if_one(vty, ifp, js_if);
2462 }
2463}
2464
2465static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2466 bool detail, json_object *js)
2467{
2468 if (!js && !detail)
2469 vty_out(vty, "%-16s %-5s V %-25s %-18s %s\n", "Interface",
2470 "State", "Querier", "Timer", "Uptime");
2471
2472 if (vrf)
2473 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2474 else
2475 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2476 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2477}
2478
2479DEFPY(gm_show_interface,
2480 gm_show_interface_cmd,
ad56b07c 2481 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
d2951219
DL
2482 SHOW_STR
2483 IPV6_STR
2484 MLD_STR
2485 VRF_FULL_CMD_HELP_STR
2486 "MLD interface information\n"
ad56b07c 2487 "Interface name\n"
d2951219
DL
2488 "Detailed output\n"
2489 JSON_STR)
2490{
2491 int ret = CMD_SUCCESS;
2492 struct vrf *vrf;
2493 json_object *js = NULL;
2494
2495 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2496 if (ret != CMD_SUCCESS)
2497 return ret;
2498
2499 if (json)
2500 js = json_object_new_object();
2501 gm_show_if(vty, vrf, ifname, !!detail, js);
2502 return vty_json(vty, js);
2503}
2504
2505static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2506 json_object *js_if)
2507{
2508 struct gm_if_stats *stats = &gm_ifp->stats;
2509 /* clang-format off */
2510 struct {
2511 const char *text;
2512 const char *js_key;
2513 uint64_t *val;
2514 } *item, items[] = {
2515 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2516 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2517 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2518
2519 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2520 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2521 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2522 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2523 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2524 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2525
2526 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2527 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2528 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2529 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2530 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2531 { "TX errors", "txErrors", &stats->tx_query_fail },
2532
d2951219
DL
2533 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2534 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2535 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2536 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2537 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2538 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2539 };
2540 /* clang-format on */
2541
2542 for (item = items; item < items + array_size(items); item++) {
2543 if (js_if)
2544 json_object_int_add(js_if, item->js_key, *item->val);
2545 else
2546 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2547 *item->val);
2548 }
2549}
2550
2551static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2552 const char *ifname, json_object *js)
2553{
2554 struct interface *ifp;
2555 json_object *js_vrf;
2556
2557 if (js) {
2558 js_vrf = json_object_new_object();
2559 json_object_object_add(js, vrf->name, js_vrf);
2560 }
2561
2562 FOR_ALL_INTERFACES (vrf, ifp) {
2563 struct pim_interface *pim_ifp;
2564 struct gm_if *gm_ifp;
2565 json_object *js_if = NULL;
2566
2567 if (ifname && strcmp(ifp->name, ifname))
2568 continue;
2569
2570 if (!ifp->info)
2571 continue;
2572 pim_ifp = ifp->info;
2573 if (!pim_ifp->mld)
2574 continue;
2575 gm_ifp = pim_ifp->mld;
2576
2577 if (js) {
2578 js_if = json_object_new_object();
2579 json_object_object_add(js_vrf, ifp->name, js_if);
2580 } else {
2581 vty_out(vty, "Interface: %s\n", ifp->name);
2582 }
2583 gm_show_stats_one(vty, gm_ifp, js_if);
2584 if (!js)
2585 vty_out(vty, "\n");
2586 }
2587}
2588
2589DEFPY(gm_show_interface_stats,
2590 gm_show_interface_stats_cmd,
2591 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2592 SHOW_STR
2593 IPV6_STR
2594 MLD_STR
2595 VRF_FULL_CMD_HELP_STR
2596 "MLD statistics\n"
2597 INTERFACE_STR
2598 "Interface name\n"
2599 JSON_STR)
2600{
2601 int ret = CMD_SUCCESS;
2602 struct vrf *vrf;
2603 json_object *js = NULL;
2604
2605 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2606 if (ret != CMD_SUCCESS)
2607 return ret;
2608
2609 if (json)
2610 js = json_object_new_object();
2611
2612 if (vrf)
2613 gm_show_stats_vrf(vty, vrf, ifname, js);
2614 else
2615 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2616 gm_show_stats_vrf(vty, vrf, ifname, js);
2617 return vty_json(vty, js);
2618}
2619
2620static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2621 const struct prefix_ipv6 *groups,
2622 const struct prefix_ipv6 *sources, bool detail,
2623 json_object *js_if)
2624{
2625 struct gm_sg *sg, *sg_start;
2626 json_object *js_group = NULL;
2627 pim_addr js_grpaddr = PIMADDR_ANY;
2628 struct gm_subscriber sub_ref = {}, *sub_untracked;
2629
2630 if (groups) {
2631 struct gm_sg sg_ref = {};
2632
2633 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2634 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2635 } else
2636 sg_start = gm_sgs_first(gm_ifp->sgs);
2637
2638 sub_ref.addr = gm_dummy_untracked;
2639 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2640 /* NB: sub_untracked may be NULL if no untracked joins exist */
2641
2642 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2643 struct timeval *recent = NULL, *untracked = NULL;
2644 json_object *js_src;
2645
2646 if (groups) {
2647 struct prefix grp_p;
2648
2649 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2650 if (!prefix_match(groups, &grp_p))
2651 break;
2652 }
2653
2654 if (sources) {
2655 struct prefix src_p;
2656
2657 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2658 if (!prefix_match(sources, &src_p))
2659 continue;
2660 }
2661
2662 if (sg->most_recent) {
2663 struct gm_packet_state *packet;
2664
2665 packet = gm_packet_sg2state(sg->most_recent);
2666 recent = &packet->received;
2667 }
2668
2669 if (sub_untracked) {
2670 struct gm_packet_state *packet;
2671 struct gm_packet_sg *item;
2672
2673 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2674 if (item) {
2675 packet = gm_packet_sg2state(item);
2676 untracked = &packet->received;
2677 }
2678 }
2679
2680 if (!js_if) {
2681 FMT_NSTD_BEGIN; /* %.0p */
2682 vty_out(vty,
2683 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2684 &sg->sgaddr.grp, &sg->sgaddr.src,
2685 gm_states[sg->state], recent, untracked,
2686 &sg->created);
2687
2688 if (!detail)
2689 continue;
2690
2691 struct gm_packet_sg *item;
2692 struct gm_packet_state *packet;
2693
2694 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2695 packet = gm_packet_sg2state(item);
2696
2697 if (packet->subscriber == sub_untracked)
2698 continue;
2699 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2700 &packet->subscriber->addr, "(JOIN)",
2701 &packet->received);
2702 }
2703 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2704 packet = gm_packet_sg2state(item);
2705
2706 if (packet->subscriber == sub_untracked)
2707 continue;
2708 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2709 &packet->subscriber->addr, "(PRUNE)",
2710 &packet->received);
2711 }
2712 FMT_NSTD_END; /* %.0p */
2713 continue;
2714 }
2715 /* if (js_if) */
2716
2717 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2718 js_group = json_object_new_object();
2719 json_object_object_addf(js_if, js_group, "%pPA",
2720 &sg->sgaddr.grp);
2721 js_grpaddr = sg->sgaddr.grp;
2722 }
2723
2724 js_src = json_object_new_object();
58971e15 2725 json_object_object_addf(js_group, js_src, "%pPAs",
d2951219
DL
2726 &sg->sgaddr.src);
2727
2728 json_object_string_add(js_src, "state", gm_states[sg->state]);
2729 json_object_string_addf(js_src, "created", "%pTVMs",
2730 &sg->created);
2731 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2732
2733 if (untracked)
2734 json_object_string_addf(js_src, "untrackedLastSeen",
2735 "%pTVMs", untracked);
2736 if (!detail)
2737 continue;
2738
2739 json_object *js_subs;
2740 struct gm_packet_sg *item;
2741 struct gm_packet_state *packet;
2742
2743 js_subs = json_object_new_object();
2744 json_object_object_add(js_src, "joinedBy", js_subs);
2745 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2746 packet = gm_packet_sg2state(item);
2747 if (packet->subscriber == sub_untracked)
2748 continue;
2749
2750 json_object *js_sub;
2751
2752 js_sub = json_object_new_object();
2753 json_object_object_addf(js_subs, js_sub, "%pPA",
2754 &packet->subscriber->addr);
2755 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2756 &packet->received);
2757 }
2758
2759 js_subs = json_object_new_object();
2760 json_object_object_add(js_src, "prunedBy", js_subs);
2761 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2762 packet = gm_packet_sg2state(item);
2763 if (packet->subscriber == sub_untracked)
2764 continue;
2765
2766 json_object *js_sub;
2767
2768 js_sub = json_object_new_object();
2769 json_object_object_addf(js_subs, js_sub, "%pPA",
2770 &packet->subscriber->addr);
2771 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2772 &packet->received);
2773 }
2774 }
2775}
2776
2777static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2778 const char *ifname,
2779 const struct prefix_ipv6 *groups,
2780 const struct prefix_ipv6 *sources, bool detail,
2781 json_object *js)
2782{
2783 struct interface *ifp;
2784 json_object *js_vrf;
2785
2786 if (js) {
2787 js_vrf = json_object_new_object();
58971e15 2788 json_object_string_add(js_vrf, "vrf", vrf->name);
d2951219
DL
2789 json_object_object_add(js, vrf->name, js_vrf);
2790 }
2791
2792 FOR_ALL_INTERFACES (vrf, ifp) {
2793 struct pim_interface *pim_ifp;
2794 struct gm_if *gm_ifp;
2795 json_object *js_if = NULL;
2796
2797 if (ifname && strcmp(ifp->name, ifname))
2798 continue;
2799
2800 if (!ifp->info)
2801 continue;
2802 pim_ifp = ifp->info;
2803 if (!pim_ifp->mld)
2804 continue;
2805 gm_ifp = pim_ifp->mld;
2806
2807 if (js) {
2808 js_if = json_object_new_object();
2809 json_object_object_add(js_vrf, ifp->name, js_if);
2810 }
2811
2812 if (!js && !ifname)
2813 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2814
2815 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2816 }
2817}
2818
2819DEFPY(gm_show_interface_joins,
2820 gm_show_interface_joins_cmd,
2821 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2822 SHOW_STR
2823 IPV6_STR
2824 MLD_STR
2825 VRF_FULL_CMD_HELP_STR
2826 "MLD joined groups & sources\n"
2827 INTERFACE_STR
2828 "Interface name\n"
2829 "Limit output to group range\n"
2830 "Show groups covered by this prefix\n"
2831 "Limit output to source range\n"
2832 "Show sources covered by this prefix\n"
2833 "Show details, including tracked receivers\n"
2834 JSON_STR)
2835{
2836 int ret = CMD_SUCCESS;
2837 struct vrf *vrf;
2838 json_object *js = NULL;
2839
2840 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2841 if (ret != CMD_SUCCESS)
2842 return ret;
2843
2844 if (json)
2845 js = json_object_new_object();
2846 else
2847 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2848 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2849
2850 if (vrf)
2851 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2852 js);
2853 else
2854 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2855 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2856 !!detail, js);
2857 return vty_json(vty, js);
2858}
2859
cdc1b770
SG
2860static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
2861{
2862 struct interface *ifp;
2863 struct ttable *tt = NULL;
2864 char *table;
2865 json_object *json = NULL;
2866 json_object *json_iface = NULL;
2867 json_object *json_group = NULL;
2868 json_object *json_groups = NULL;
2869 struct pim_instance *pim = vrf->info;
2870
2871 if (uj) {
2872 json = json_object_new_object();
2873 json_object_int_add(json, "totalGroups", pim->gm_group_count);
2874 json_object_int_add(json, "watermarkLimit",
2875 pim->gm_watermark_limit);
2876 } else {
2877 /* Prepare table. */
2878 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2879 ttable_add_row(tt, "Interface|Group|Version|Uptime");
2880 tt->style.cell.rpad = 2;
2881 tt->style.corner = '+';
2882 ttable_restyle(tt);
2883
2884 vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
2885 vty_out(vty, "Watermark warn limit(%s): %u\n",
2886 pim->gm_watermark_limit ? "Set" : "Not Set",
2887 pim->gm_watermark_limit);
2888 }
2889
2890 /* scan interfaces */
2891 FOR_ALL_INTERFACES (vrf, ifp) {
2892
2893 struct pim_interface *pim_ifp = ifp->info;
2894 struct gm_if *gm_ifp;
2895 struct gm_sg *sg;
2896
2897 if (!pim_ifp)
2898 continue;
2899
2900 gm_ifp = pim_ifp->mld;
2901 if (!gm_ifp)
2902 continue;
2903
2904 /* scan mld groups */
2905 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2906
2907 if (uj) {
2908 json_object_object_get_ex(json, ifp->name,
2909 &json_iface);
2910
2911 if (!json_iface) {
2912 json_iface = json_object_new_object();
2913 json_object_pim_ifp_add(json_iface,
2914 ifp);
2915 json_object_object_add(json, ifp->name,
2916 json_iface);
2917 json_groups = json_object_new_array();
2918 json_object_object_add(json_iface,
2919 "groups",
2920 json_groups);
2921 }
2922
2923 json_group = json_object_new_object();
2924 json_object_string_addf(json_group, "group",
2925 "%pPAs",
2926 &sg->sgaddr.grp);
2927
2928 json_object_int_add(json_group, "version",
2929 pim_ifp->mld_version);
2930 json_object_string_addf(json_group, "uptime",
2931 "%pTVMs", &sg->created);
2932 json_object_array_add(json_groups, json_group);
2933 } else {
2934 ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
2935 ifp->name, &sg->sgaddr.grp,
2936 pim_ifp->mld_version,
2937 &sg->created);
2938 }
2939 } /* scan gm groups */
2940 } /* scan interfaces */
2941
2942 if (uj)
2943 vty_json(vty, json);
2944 else {
2945 /* Dump the generated table. */
2946 table = ttable_dump(tt, "\n");
2947 vty_out(vty, "%s\n", table);
2948 XFREE(MTYPE_TMP, table);
2949 ttable_del(tt);
2950 }
2951}
2952
2953DEFPY(gm_show_mld_groups,
2954 gm_show_mld_groups_cmd,
2955 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2956 SHOW_STR
2957 IPV6_STR
2958 MLD_STR
2959 VRF_FULL_CMD_HELP_STR
2960 MLD_GROUP_STR
2961 JSON_STR)
2962{
2963 int ret = CMD_SUCCESS;
2964 struct vrf *vrf;
2965
2966 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2967 if (ret != CMD_SUCCESS)
2968 return ret;
2969
2970 if (vrf)
2971 gm_show_groups(vty, vrf, !!json);
2972 else
2973 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2974 gm_show_groups(vty, vrf, !!json);
2975
2976 return CMD_SUCCESS;
2977}
2978
5e5034b0
DL
2979DEFPY(gm_debug_show,
2980 gm_debug_show_cmd,
2981 "debug show mld interface IFNAME",
2982 DEBUG_STR
2983 SHOW_STR
a0dfca37 2984 MLD_STR
5e5034b0 2985 INTERFACE_STR
a0dfca37 2986 "interface name\n")
5e5034b0
DL
2987{
2988 struct interface *ifp;
2989 struct pim_interface *pim_ifp;
2990 struct gm_if *gm_ifp;
2991
2992 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
2993 if (!ifp) {
2994 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
2995 return CMD_WARNING;
2996 }
2997
2998 pim_ifp = ifp->info;
2999 if (!pim_ifp) {
3000 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
3001 return CMD_WARNING;
3002 }
3003
3004 gm_ifp = pim_ifp->mld;
3005 if (!gm_ifp) {
3006 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
3007 return CMD_WARNING;
3008 }
3009
3010 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
3011 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
3012 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
3013 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
5e5034b0
DL
3014 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
3015
3016 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
3017 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
3018 int64_t query, expiry;
3019
3020 query = monotime_since(&gm_ifp->pending[i].query, NULL);
3021 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
3022
3023 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
3024 i, query / 1000, expiry / 1000);
3025 }
3026
3027 struct gm_sg *sg;
3028 struct gm_packet_state *pkt;
3029 struct gm_packet_sg *item;
3030 struct gm_subscriber *subscriber;
3031
3032 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
3033 frr_each (gm_sgs, gm_ifp->sgs, sg) {
3034 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
3035 sg->t_sg_expire);
3036
3037 vty_out(vty, "\t @pos:%zu\n",
3038 gm_packet_sg_subs_count(sg->subs_positive));
3039 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
3040 pkt = gm_packet_sg2state(item);
3041
3042 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3043 item->is_src ? "S" : "",
3044 item->is_excl ? "E" : "",
3045 &pkt->subscriber->addr, pkt->subscriber, pkt,
3046 item->offset);
3047
3048 assert(item->sg == sg);
3049 }
3050 vty_out(vty, "\t @neg:%zu\n",
3051 gm_packet_sg_subs_count(sg->subs_negative));
3052 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
3053 pkt = gm_packet_sg2state(item);
3054
3055 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3056 item->is_src ? "S" : "",
3057 item->is_excl ? "E" : "",
3058 &pkt->subscriber->addr, pkt->subscriber, pkt,
3059 item->offset);
3060
3061 assert(item->sg == sg);
3062 }
3063 }
3064
3065 vty_out(vty, "\n%zu subscribers:\n",
3066 gm_subscribers_count(gm_ifp->subscribers));
3067 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
3068 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
3069 subscriber, gm_packets_count(subscriber->packets));
3070
3071 frr_each (gm_packets, subscriber->packets, pkt) {
3072 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
3073 pkt,
3074 monotime_since(&pkt->received, NULL) *
3075 0.000001f,
3076 pkt->n_active, pkt->n_sg);
3077
3078 for (size_t i = 0; i < pkt->n_sg; i++) {
3079 item = pkt->items + i;
3080
3081 vty_out(vty, "\t\t[%zu]", i);
3082
3083 if (!item->sg) {
3084 vty_out(vty, " inactive\n");
3085 continue;
3086 }
3087
3088 vty_out(vty, " %s%s %pSG nE=%u\n",
3089 item->is_src ? "S" : "",
3090 item->is_excl ? "E" : "",
3091 &item->sg->sgaddr, item->n_exclude);
3092 }
3093 }
3094 }
3095
3096 return CMD_SUCCESS;
3097}
3098
3099DEFPY(gm_debug_iface_cfg,
3100 gm_debug_iface_cfg_cmd,
3101 "debug ipv6 mld {"
3102 "robustness (0-7)|"
3103 "query-max-response-time (1-8387584)"
3104 "}",
3105 DEBUG_STR
3106 IPV6_STR
3107 "Multicast Listener Discovery\n"
3108 "QRV\nQRV\n"
3109 "maxresp\nmaxresp\n")
3110{
3111 VTY_DECLVAR_CONTEXT(interface, ifp);
3112 struct pim_interface *pim_ifp;
3113 struct gm_if *gm_ifp;
3114 bool changed = false;
3115
3116 pim_ifp = ifp->info;
3117 if (!pim_ifp) {
3118 vty_out(vty, "%% no PIM state for interface %pSQq\n",
3119 ifp->name);
3120 return CMD_WARNING;
3121 }
3122 gm_ifp = pim_ifp->mld;
3123 if (!gm_ifp) {
3124 vty_out(vty, "%% no MLD state for interface %pSQq\n",
3125 ifp->name);
3126 return CMD_WARNING;
3127 }
3128
3129 if (robustness_str && gm_ifp->cur_qrv != robustness) {
3130 gm_ifp->cur_qrv = robustness;
3131 changed = true;
3132 }
3133 if (query_max_response_time_str &&
3134 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
3135 gm_ifp->cur_max_resp = query_max_response_time;
3136 changed = true;
3137 }
3138
3139 if (changed) {
3140 vty_out(vty, "%% MLD querier config changed, bumping\n");
3141 gm_bump_querier(gm_ifp);
3142 }
3143 return CMD_SUCCESS;
3144}
3145
3146void gm_cli_init(void);
3147
3148void gm_cli_init(void)
3149{
d2951219
DL
3150 install_element(VIEW_NODE, &gm_show_interface_cmd);
3151 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3152 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
cdc1b770 3153 install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
d2951219 3154
5e5034b0
DL
3155 install_element(VIEW_NODE, &gm_debug_show_cmd);
3156 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3157}