]> git.proxmox.com Git - mirror_frr.git/blame - pimd/pim6_mld.c
pimd: Tell coverity what is really going on
[mirror_frr.git] / pimd / pim6_mld.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: GPL-2.0-or-later
5e5034b0
DL
2/*
3 * PIMv6 MLD querier
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
5e5034b0
DL
5 */
6
7/*
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
10 *
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
13 */
14
15#include <zebra.h>
16#include <netinet/ip6.h>
17
18#include "lib/memory.h"
19#include "lib/jhash.h"
20#include "lib/prefix.h"
21#include "lib/checksum.h"
24a58196 22#include "lib/frrevent.h"
cdc1b770 23#include "termtable.h"
5e5034b0
DL
24
25#include "pimd/pim6_mld.h"
26#include "pimd/pim6_mld_protocol.h"
27#include "pimd/pim_memory.h"
28#include "pimd/pim_instance.h"
29#include "pimd/pim_iface.h"
cdc1b770
SG
30#include "pimd/pim6_cmd.h"
31#include "pimd/pim_cmd_common.h"
5e5034b0
DL
32#include "pimd/pim_util.h"
33#include "pimd/pim_tib.h"
34#include "pimd/pimd.h"
35
36#ifndef IPV6_MULTICAST_ALL
37#define IPV6_MULTICAST_ALL 29
38#endif
39
40DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
41DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
42DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
43DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
44DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
45DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
46DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
47
e6685141 48static void gm_t_query(struct event *t);
5e5034b0
DL
49static void gm_trigger_specific(struct gm_sg *sg);
50static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
51 struct timeval expire_wait);
52
53/* shorthand for log messages */
54#define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56#define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
58 &pkt_src->sin6_addr
59#define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
62
63/* clang-format off */
64#if PIM_IPV == 6
65static const pim_addr gm_all_hosts = {
66 .s6_addr = {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
69 },
70};
71static const pim_addr gm_all_routers = {
72 .s6_addr = {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
75 },
76};
77/* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
79 */
80static const pim_addr gm_dummy_untracked = {
81 .s6_addr = {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
84 },
85};
86#else
87/* 224.0.0.1 */
88static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
89/* 224.0.0.22 */
90static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
91static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
92#endif
93/* clang-format on */
94
95#define IPV6_MULTICAST_SCOPE_LINK 2
96
97static inline uint8_t in6_multicast_scope(const pim_addr *addr)
98{
99 return addr->s6_addr[1] & 0xf;
100}
101
00fed6ed 102bool in6_multicast_nofwd(const pim_addr *addr)
5e5034b0
DL
103{
104 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
105}
106
107/*
108 * (S,G) -> subscriber,(S,G)
109 */
110
111static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
112 const struct gm_packet_sg *b)
113{
114 const struct gm_packet_state *s_a, *s_b;
115
116 s_a = gm_packet_sg2state(a);
117 s_b = gm_packet_sg2state(b);
118 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
119}
120
121DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
122 gm_packet_sg_cmp);
123
124static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
125 enum gm_sub_sense sense,
126 struct gm_subscriber *sub)
127{
128 struct {
129 struct gm_packet_state hdr;
130 struct gm_packet_sg item;
131 } ref = {
132 /* clang-format off */
133 .hdr = {
134 .subscriber = sub,
135 },
136 .item = {
137 .offset = 0,
138 },
139 /* clang-format on */
140 };
141
142 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
143}
144
145/*
146 * interface -> (*,G),pending
147 */
148
149static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
150 const struct gm_grp_pending *b)
151{
152 return IPV6_ADDR_CMP(&a->grp, &b->grp);
153}
154
155DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
156 gm_grp_pending_cmp);
157
158/*
159 * interface -> ([S1,S2,...],G),pending
160 */
161
162static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
163 const struct gm_gsq_pending *b)
164{
165 if (a->s_bit != b->s_bit)
166 return numcmp(a->s_bit, b->s_bit);
167
168 return IPV6_ADDR_CMP(&a->grp, &b->grp);
169}
170
171static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
172{
173 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
174
175 return jhash(&a->grp, sizeof(a->grp), seed);
176}
177
178DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
179 gm_gsq_pending_hash);
180
181/*
182 * interface -> (S,G)
183 */
184
00fed6ed 185int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
5e5034b0
DL
186{
187 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
188}
189
5e5034b0
DL
190static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
191 pim_addr src)
192{
193 struct gm_sg ref = {};
194
195 ref.sgaddr.grp = grp;
196 ref.sgaddr.src = src;
197 return gm_sgs_find(gm_ifp->sgs, &ref);
198}
199
200static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
201 pim_addr src)
202{
203 struct gm_sg *ret, *prev;
204
205 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
206 ret->sgaddr.grp = grp;
207 ret->sgaddr.src = src;
208 ret->iface = gm_ifp;
209 prev = gm_sgs_add(gm_ifp->sgs, ret);
210
211 if (prev) {
212 XFREE(MTYPE_GM_SG, ret);
213 ret = prev;
214 } else {
aa2f9349 215 monotime(&ret->created);
5e5034b0
DL
216 gm_packet_sg_subs_init(ret->subs_positive);
217 gm_packet_sg_subs_init(ret->subs_negative);
218 }
219 return ret;
220}
221
222/*
223 * interface -> packets, sorted by expiry (because add_tail insert order)
224 */
225
226DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
227
228/*
229 * subscriber -> packets
230 */
231
232DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
233
234/*
235 * interface -> subscriber
236 */
237
238static int gm_subscriber_cmp(const struct gm_subscriber *a,
239 const struct gm_subscriber *b)
240{
241 return IPV6_ADDR_CMP(&a->addr, &b->addr);
242}
243
244static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
245{
246 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
247}
248
249DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
250 gm_subscriber_hash);
251
252static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
253 pim_addr addr)
254{
255 struct gm_subscriber ref = {}, *ret;
256
257 ref.addr = addr;
258 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
259 if (ret)
260 ret->refcount++;
261 return ret;
262}
263
264static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
265 pim_addr addr)
266{
267 struct gm_subscriber ref = {}, *ret;
268
269 ref.addr = addr;
270 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
271
272 if (!ret) {
273 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
274 ret->iface = gm_ifp;
275 ret->addr = addr;
276 ret->refcount = 1;
aa2f9349 277 monotime(&ret->created);
5e5034b0
DL
278 gm_packets_init(ret->packets);
279
280 gm_subscribers_add(gm_ifp->subscribers, ret);
281 }
282 return ret;
283}
284
285static void gm_subscriber_drop(struct gm_subscriber **subp)
286{
287 struct gm_subscriber *sub = *subp;
288 struct gm_if *gm_ifp;
289
290 if (!sub)
291 return;
292 gm_ifp = sub->iface;
293
294 *subp = NULL;
295 sub->refcount--;
296
297 if (sub->refcount)
298 return;
299
300 gm_subscribers_del(gm_ifp->subscribers, sub);
301 XFREE(MTYPE_GM_SUBSCRIBER, sub);
302}
303
304/****************************************************************************/
305
306/* bundle query timer values for combined v1/v2 handling */
307struct gm_query_timers {
308 unsigned int qrv;
309 unsigned int max_resp_ms;
310 unsigned int qqic_ms;
311
312 struct timeval fuzz;
313 struct timeval expire_wait;
314};
315
316static void gm_expiry_calc(struct gm_query_timers *timers)
317{
318 unsigned int expire =
319 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
320 ldiv_t exp_div = ldiv(expire, 1000);
321
322 timers->expire_wait.tv_sec = exp_div.quot;
323 timers->expire_wait.tv_usec = exp_div.rem * 1000;
324 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
325}
326
327static void gm_sg_free(struct gm_sg *sg)
328{
329 /* t_sg_expiry is handled before this is reached */
e16d030c 330 EVENT_OFF(sg->t_sg_query);
5e5034b0
DL
331 gm_packet_sg_subs_fini(sg->subs_negative);
332 gm_packet_sg_subs_fini(sg->subs_positive);
333 XFREE(MTYPE_GM_SG, sg);
334}
335
336/* clang-format off */
337static const char *const gm_states[] = {
338 [GM_SG_NOINFO] = "NOINFO",
339 [GM_SG_JOIN] = "JOIN",
340 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
341 [GM_SG_PRUNE] = "PRUNE",
342 [GM_SG_NOPRUNE] = "NOPRUNE",
343 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
344};
345/* clang-format on */
346
347CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
348/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
349 * joined. Whether we actually want/need to support this is a separate
350 * question - it is almost never used. In fact this is exactly what RFC5790
351 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
352 */
353
354static void gm_sg_update(struct gm_sg *sg, bool has_expired)
355{
356 struct gm_if *gm_ifp = sg->iface;
357 enum gm_sg_state prev, desired;
358 bool new_join;
359 struct gm_sg *grp = NULL;
360
361 if (!pim_addr_is_any(sg->sgaddr.src))
362 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
363 else
364 assert(sg->state != GM_SG_PRUNE);
365
366 if (gm_packet_sg_subs_count(sg->subs_positive)) {
367 desired = GM_SG_JOIN;
368 assert(!sg->t_sg_expire);
369 } else if ((sg->state == GM_SG_JOIN ||
370 sg->state == GM_SG_JOIN_EXPIRING) &&
371 !has_expired)
372 desired = GM_SG_JOIN_EXPIRING;
373 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
374 desired = GM_SG_NOINFO;
375 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
376 gm_packet_sg_subs_count(sg->subs_negative)) {
377 if ((sg->state == GM_SG_NOPRUNE ||
378 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
379 !has_expired)
380 desired = GM_SG_NOPRUNE_EXPIRING;
381 else
382 desired = GM_SG_PRUNE;
383 } else if (gm_packet_sg_subs_count(sg->subs_negative))
384 desired = GM_SG_NOPRUNE;
385 else
386 desired = GM_SG_NOINFO;
387
388 if (desired != sg->state && !gm_ifp->stopping) {
95b13dc5 389 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
390 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
391 gm_states[desired]);
392
393 if (desired == GM_SG_JOIN_EXPIRING ||
394 desired == GM_SG_NOPRUNE_EXPIRING) {
395 struct gm_query_timers timers;
396
397 timers.qrv = gm_ifp->cur_qrv;
398 timers.max_resp_ms = gm_ifp->cur_max_resp;
399 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
400 timers.fuzz = gm_ifp->cfg_timing_fuzz;
401
402 gm_expiry_calc(&timers);
403 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
404
e16d030c 405 EVENT_OFF(sg->t_sg_query);
51b4991f 406 sg->n_query = gm_ifp->cur_lmqc;
5e5034b0
DL
407 sg->query_sbit = false;
408 gm_trigger_specific(sg);
409 }
410 }
411 prev = sg->state;
412 sg->state = desired;
413
414 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
415 new_join = false;
416 else
417 new_join = gm_sg_state_want_join(desired);
418
419 if (new_join && !sg->tib_joined) {
420 /* this will retry if join previously failed */
421 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
422 gm_ifp->ifp, &sg->oil);
423 if (!sg->tib_joined)
424 zlog_warn(
425 "MLD join for %pSG%%%s not propagated into TIB",
426 &sg->sgaddr, gm_ifp->ifp->name);
427 else
428 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
429 gm_ifp->ifp->name);
430
431 } else if (sg->tib_joined && !new_join) {
432 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
433
434 sg->oil = NULL;
435 sg->tib_joined = false;
436 }
437
438 if (desired == GM_SG_NOINFO) {
f4ac0a1c
DL
439 /* multiple paths can lead to the last state going away;
440 * t_sg_expire can still be running if we're arriving from
441 * another path.
442 */
443 if (has_expired)
e16d030c 444 EVENT_OFF(sg->t_sg_expire);
f4ac0a1c 445
5e5034b0
DL
446 assertf((!sg->t_sg_expire &&
447 !gm_packet_sg_subs_count(sg->subs_positive) &&
448 !gm_packet_sg_subs_count(sg->subs_negative)),
449 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
450 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
451 sg->t_sg_expire, gm_states[prev], gm_states[desired],
452 gm_packet_sg_subs_count(sg->subs_positive),
453 gm_packet_sg_subs_count(sg->subs_negative), grp);
454
a96d64b0 455 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
456 zlog_debug(log_sg(sg, "dropping"));
457
458 gm_sgs_del(gm_ifp->sgs, sg);
459 gm_sg_free(sg);
460 }
461}
462
463/****************************************************************************/
464
465/* the following bunch of functions deals with transferring state from
466 * received packets into gm_packet_state. As a reminder, the querier is
467 * structured to keep all items received in one packet together, since they
468 * will share expiry timers and thus allows efficient handling.
469 */
470
471static void gm_packet_free(struct gm_packet_state *pkt)
472{
473 gm_packet_expires_del(pkt->iface->expires, pkt);
474 gm_packets_del(pkt->subscriber->packets, pkt);
475 gm_subscriber_drop(&pkt->subscriber);
476 XFREE(MTYPE_GM_STATE, pkt);
477}
478
479static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
480 struct gm_sg *sg, bool is_excl,
481 bool is_src)
482{
483 struct gm_packet_sg *item;
484
485 assert(pkt->n_active < pkt->n_sg);
486
487 item = &pkt->items[pkt->n_active];
488 item->sg = sg;
489 item->is_excl = is_excl;
490 item->is_src = is_src;
491 item->offset = pkt->n_active;
492
493 pkt->n_active++;
494 return item;
495}
496
497static bool gm_packet_sg_drop(struct gm_packet_sg *item)
498{
499 struct gm_packet_state *pkt;
500 size_t i;
501
502 assert(item->sg);
503
504 pkt = gm_packet_sg2state(item);
505 if (item->sg->most_recent == item)
506 item->sg->most_recent = NULL;
507
508 for (i = 0; i < item->n_exclude; i++) {
509 struct gm_packet_sg *excl_item;
510
511 excl_item = item + 1 + i;
512 if (!excl_item->sg)
513 continue;
514
515 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
516 excl_item->sg = NULL;
517 pkt->n_active--;
518
519 assert(pkt->n_active > 0);
520 }
521
522 if (item->is_excl && item->is_src)
523 gm_packet_sg_subs_del(item->sg->subs_negative, item);
524 else
525 gm_packet_sg_subs_del(item->sg->subs_positive, item);
526 item->sg = NULL;
527 pkt->n_active--;
528
529 if (!pkt->n_active) {
530 gm_packet_free(pkt);
531 return true;
532 }
533 return false;
534}
535
536static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
537{
538 for (size_t i = 0; i < pkt->n_sg; i++) {
539 struct gm_sg *sg = pkt->items[i].sg;
540 bool deleted;
541
542 if (!sg)
543 continue;
544
a96d64b0 545 if (trace && PIM_DEBUG_GM_TRACE)
5e5034b0
DL
546 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
547 &pkt->subscriber->addr);
548 deleted = gm_packet_sg_drop(&pkt->items[i]);
549
550 gm_sg_update(sg, true);
551 if (deleted)
552 break;
553 }
554}
555
556static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
557 struct gm_subscriber *subscriber,
558 pim_addr grp, pim_addr *srcs,
559 size_t n_src, enum gm_sub_sense sense)
560{
561 struct gm_sg *sg;
562 struct gm_packet_sg *old_src;
563 size_t i;
564
565 for (i = 0; i < n_src; i++) {
566 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
567 if (!sg)
568 continue;
569
570 old_src = gm_packet_sg_find(sg, sense, subscriber);
571 if (!old_src)
572 continue;
573
574 gm_packet_sg_drop(old_src);
575 gm_sg_update(sg, false);
576 }
577}
578
579static void gm_sg_expiry_cancel(struct gm_sg *sg)
580{
a96d64b0 581 if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
5e5034b0 582 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
e16d030c 583 EVENT_OFF(sg->t_sg_expire);
5e5034b0
DL
584 sg->query_sbit = true;
585}
586
587/* first pass: process all changes resulting in removal of state:
588 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
589 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
590 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
591 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
592 * note *replacing* state is NOT considered *removing* state here
593 *
594 * everything else is thrown into pkt for creation of state in pass 2
595 */
596static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
597 struct mld_v2_rec_hdr *rechdr)
598{
599 /* NB: pkt->subscriber can be NULL here if the subscriber was not
600 * previously seen!
601 */
602 struct gm_subscriber *subscriber = pkt->subscriber;
603 struct gm_sg *grp;
604 struct gm_packet_sg *old_grp = NULL;
605 struct gm_packet_sg *item;
606 size_t n_src = ntohs(rechdr->n_src);
607 size_t j;
608 bool is_excl = false;
609
610 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
611 if (grp && subscriber)
612 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
613
614 assert(old_grp == NULL || old_grp->is_excl);
615
616 switch (rechdr->type) {
617 case MLD_RECTYPE_IS_EXCLUDE:
618 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
619 /* this always replaces or creates state */
620 is_excl = true;
621 if (!grp)
622 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
623
624 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
625 item->n_exclude = n_src;
626
627 /* [EXCL_INCL_SG_NOTE] referenced below
628 *
629 * in theory, we should drop any S,G that the host may have
630 * previously added in INCLUDE mode. In practice, this is both
631 * incredibly rare and entirely irrelevant. It only makes any
632 * difference if an S,G that the host previously had on the
633 * INCLUDE list is now on the blocked list for EXCLUDE, which
634 * we can cover in processing the S,G list in pass2_excl().
635 *
636 * Other S,G from the host are simply left to expire
637 * "naturally" through general expiry.
638 */
639 break;
640
641 case MLD_RECTYPE_IS_INCLUDE:
642 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
643 if (old_grp) {
644 /* INCLUDE has no *,G state, so old_grp here refers to
645 * previous EXCLUDE => delete it
646 */
647 gm_packet_sg_drop(old_grp);
648 gm_sg_update(grp, false);
649 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
650 }
651 break;
652
653 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
654 if (old_grp) {
655 /* remove S,Gs from EXCLUDE, and then we're done */
656 gm_packet_sg_remove_sources(pkt->iface, subscriber,
657 rechdr->grp, rechdr->srcs,
658 n_src, GM_SUB_NEG);
659 return;
660 }
661 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
662 * idential to IS_INCLUDE (because the list of sources in
663 * IS_INCLUDE is not exhaustive)
664 */
665 break;
666
667 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
668 if (old_grp) {
669 /* this is intentionally not implemented because it
670 * would be complicated as hell. we only take the list
671 * of blocked sources from full group state records
672 */
673 return;
674 }
675
676 if (subscriber)
677 gm_packet_sg_remove_sources(pkt->iface, subscriber,
678 rechdr->grp, rechdr->srcs,
679 n_src, GM_SUB_POS);
680 return;
681 }
682
683 for (j = 0; j < n_src; j++) {
684 struct gm_sg *sg;
685
686 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
687 if (!sg)
688 sg = gm_sg_make(pkt->iface, rechdr->grp,
689 rechdr->srcs[j]);
690
691 gm_packet_sg_setup(pkt, sg, is_excl, true);
692 }
693}
694
695/* second pass: creating/updating/refreshing state. All the items from the
696 * received packet have already been thrown into gm_packet_state.
697 */
698
699static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
700{
701 struct gm_packet_sg *item = &pkt->items[i];
702 struct gm_packet_sg *old = NULL;
703 struct gm_sg *sg = item->sg;
704
705 /* EXCLUDE state was already dropped in pass1 */
706 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
707
708 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
709 if (old)
710 gm_packet_sg_drop(old);
711
712 pkt->n_active++;
713 gm_packet_sg_subs_add(sg->subs_positive, item);
714
715 sg->most_recent = item;
716 gm_sg_expiry_cancel(sg);
717 gm_sg_update(sg, false);
718}
719
720static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
721{
722 struct gm_packet_sg *item = &pkt->items[offs];
723 struct gm_packet_sg *old_grp, *item_dup;
724 struct gm_sg *sg_grp = item->sg;
725 size_t i;
726
727 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
728 if (old_grp) {
729 for (i = 0; i < item->n_exclude; i++) {
730 struct gm_packet_sg *item_src, *old_src;
731
732 item_src = &pkt->items[offs + 1 + i];
733 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
734 pkt->subscriber);
735 if (old_src)
736 gm_packet_sg_drop(old_src);
737
738 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
739 * items left over if the host previously had INCLUDE
740 * mode going. Remove them here if we find any.
741 */
742 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
743 pkt->subscriber);
744 if (old_src)
745 gm_packet_sg_drop(old_src);
746 }
747
748 /* the previous loop has removed the S,G entries which are
749 * still excluded after this update. So anything left on the
750 * old item was previously excluded but is now included
751 * => need to trigger update on S,G
752 */
753 for (i = 0; i < old_grp->n_exclude; i++) {
754 struct gm_packet_sg *old_src;
755 struct gm_sg *old_sg_src;
756
757 old_src = old_grp + 1 + i;
758 old_sg_src = old_src->sg;
759 if (!old_sg_src)
760 continue;
761
762 gm_packet_sg_drop(old_src);
763 gm_sg_update(old_sg_src, false);
764 }
765
766 gm_packet_sg_drop(old_grp);
767 }
768
769 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
770 assert(!item_dup);
771 pkt->n_active++;
772
773 sg_grp->most_recent = item;
774 gm_sg_expiry_cancel(sg_grp);
775
776 for (i = 0; i < item->n_exclude; i++) {
777 struct gm_packet_sg *item_src;
778
779 item_src = &pkt->items[offs + 1 + i];
780 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
781 item_src);
782
783 if (item_dup)
784 item_src->sg = NULL;
785 else {
786 pkt->n_active++;
787 gm_sg_update(item_src->sg, false);
788 }
789 }
790
791 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
792 * to get lower PIM churn/flapping
793 */
794 gm_sg_update(sg_grp, false);
795}
796
797CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
798/* on receiving a query, we need to update our robustness/query interval to
799 * match, so we correctly process group/source specific queries after last
800 * member leaves
801 */
802
803static void gm_handle_v2_report(struct gm_if *gm_ifp,
804 const struct sockaddr_in6 *pkt_src, char *data,
805 size_t len)
806{
807 struct mld_v2_report_hdr *hdr;
808 size_t i, n_records, max_entries;
809 struct gm_packet_state *pkt;
810
811 if (len < sizeof(*hdr)) {
55eb347d 812 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
813 zlog_debug(log_pkt_src(
814 "malformed MLDv2 report (truncated header)"));
aa2f9349 815 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
816 return;
817 }
818
aa2f9349
DL
819 /* errors after this may at least partially process the packet */
820 gm_ifp->stats.rx_new_report++;
821
5e5034b0
DL
822 hdr = (struct mld_v2_report_hdr *)data;
823 data += sizeof(*hdr);
824 len -= sizeof(*hdr);
825
826 /* can't have more *,G and S,G items than there is space for ipv6
827 * addresses, so just use this to allocate temporary buffer
828 */
829 max_entries = len / sizeof(pim_addr);
830 pkt = XCALLOC(MTYPE_GM_STATE,
831 offsetof(struct gm_packet_state, items[max_entries]));
832 pkt->n_sg = max_entries;
833 pkt->iface = gm_ifp;
834 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
835
836 n_records = ntohs(hdr->n_records);
837
838 /* validate & remove state in v2_pass1() */
839 for (i = 0; i < n_records; i++) {
840 struct mld_v2_rec_hdr *rechdr;
841 size_t n_src, record_size;
842
843 if (len < sizeof(*rechdr)) {
844 zlog_warn(log_pkt_src(
845 "malformed MLDv2 report (truncated record header)"));
aa2f9349 846 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
847 break;
848 }
849
850 rechdr = (struct mld_v2_rec_hdr *)data;
851 data += sizeof(*rechdr);
852 len -= sizeof(*rechdr);
853
854 n_src = ntohs(rechdr->n_src);
855 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
856
857 if (len < record_size) {
858 zlog_warn(log_pkt_src(
859 "malformed MLDv2 report (truncated source list)"));
aa2f9349 860 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
861 break;
862 }
863 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
864 zlog_warn(
865 log_pkt_src(
866 "malformed MLDv2 report (invalid group %pI6)"),
867 &rechdr->grp);
aa2f9349 868 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
869 break;
870 }
871
872 data += record_size;
873 len -= record_size;
874
875 gm_handle_v2_pass1(pkt, rechdr);
876 }
877
878 if (!pkt->n_active) {
879 gm_subscriber_drop(&pkt->subscriber);
880 XFREE(MTYPE_GM_STATE, pkt);
881 return;
882 }
883
884 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
885 offsetof(struct gm_packet_state, items[pkt->n_active]));
886 pkt->n_sg = pkt->n_active;
887 pkt->n_active = 0;
888
889 monotime(&pkt->received);
890 if (!pkt->subscriber)
891 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
892 gm_packets_add_tail(pkt->subscriber->packets, pkt);
893 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
894
895 for (i = 0; i < pkt->n_sg; i++)
896 if (!pkt->items[i].is_excl)
897 gm_handle_v2_pass2_incl(pkt, i);
898 else {
899 gm_handle_v2_pass2_excl(pkt, i);
900 i += pkt->items[i].n_exclude;
901 }
902
903 if (pkt->n_active == 0)
904 gm_packet_free(pkt);
905}
906
907static void gm_handle_v1_report(struct gm_if *gm_ifp,
908 const struct sockaddr_in6 *pkt_src, char *data,
909 size_t len)
910{
911 struct mld_v1_pkt *hdr;
912 struct gm_packet_state *pkt;
913 struct gm_sg *grp;
914 struct gm_packet_sg *item;
915 size_t max_entries;
916
917 if (len < sizeof(*hdr)) {
55eb347d 918 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
919 zlog_debug(log_pkt_src(
920 "malformed MLDv1 report (truncated)"));
aa2f9349 921 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
922 return;
923 }
924
aa2f9349
DL
925 gm_ifp->stats.rx_old_report++;
926
5e5034b0
DL
927 hdr = (struct mld_v1_pkt *)data;
928
929 max_entries = 1;
930 pkt = XCALLOC(MTYPE_GM_STATE,
931 offsetof(struct gm_packet_state, items[max_entries]));
932 pkt->n_sg = max_entries;
933 pkt->iface = gm_ifp;
934 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
935
936 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
937
938 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
939 if (!grp)
940 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
941
942 item = gm_packet_sg_setup(pkt, grp, true, false);
943 item->n_exclude = 0;
944 CPP_NOTICE("set v1-seen timer on grp here");
945
946 /* } */
947
948 /* pass2 will count n_active back up to 1. Also since a v1 report
949 * has exactly 1 group, we can skip the realloc() that v2 needs here.
950 */
951 assert(pkt->n_active == 1);
952 pkt->n_sg = pkt->n_active;
953 pkt->n_active = 0;
954
955 monotime(&pkt->received);
956 if (!pkt->subscriber)
957 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
958 gm_packets_add_tail(pkt->subscriber->packets, pkt);
959 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
960
961 /* pass2 covers installing state & removing old state; all the v1
962 * compat is handled at this point.
963 *
964 * Note that "old state" may be v2; subscribers will switch from v2
965 * reports to v1 reports when the querier changes from v2 to v1. So,
966 * limiting this to v1 would be wrong.
967 */
968 gm_handle_v2_pass2_excl(pkt, 0);
969
970 if (pkt->n_active == 0)
971 gm_packet_free(pkt);
972}
973
974static void gm_handle_v1_leave(struct gm_if *gm_ifp,
975 const struct sockaddr_in6 *pkt_src, char *data,
976 size_t len)
977{
978 struct mld_v1_pkt *hdr;
979 struct gm_subscriber *subscriber;
980 struct gm_sg *grp;
981 struct gm_packet_sg *old_grp;
982
983 if (len < sizeof(*hdr)) {
55eb347d 984 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
985 zlog_debug(log_pkt_src(
986 "malformed MLDv1 leave (truncated)"));
aa2f9349 987 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
988 return;
989 }
990
aa2f9349
DL
991 gm_ifp->stats.rx_old_leave++;
992
5e5034b0
DL
993 hdr = (struct mld_v1_pkt *)data;
994
995 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
996 if (!subscriber)
997 return;
998
999 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1000
1001 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
1002 if (grp) {
1003 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
1004 if (old_grp) {
1005 gm_packet_sg_drop(old_grp);
1006 gm_sg_update(grp, false);
1007 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1008 }
1009 }
1010
1011 /* } */
1012
1013 /* nothing more to do here, pass2 is no-op for leaves */
1014 gm_subscriber_drop(&subscriber);
1015}
1016
1017/* for each general query received (or sent), a timer is started to expire
1018 * _everything_ at the appropriate time (including robustness multiplier).
1019 *
1020 * So when this timer hits, all packets - with all of their items - that were
1021 * received *before* the query are aged out, and state updated accordingly.
1022 * Note that when we receive a refresh/update, the previous/old packet is
1023 * already dropped and replaced with a new one, so in normal steady-state
1024 * operation, this timer won't be doing anything.
1025 *
1026 * Additionally, if a subscriber actively leaves a group, that goes through
1027 * its own path too and won't hit this. This is really only triggered when a
1028 * host straight up disappears.
1029 */
e6685141 1030static void gm_t_expire(struct event *t)
5e5034b0 1031{
e16d030c 1032 struct gm_if *gm_ifp = EVENT_ARG(t);
5e5034b0
DL
1033 struct gm_packet_state *pkt;
1034
1035 zlog_info(log_ifp("general expiry timer"));
1036
1037 while (gm_ifp->n_pending) {
1038 struct gm_general_pending *pend = gm_ifp->pending;
1039 struct timeval remain;
1040 int64_t remain_ms;
1041
1042 remain_ms = monotime_until(&pend->expiry, &remain);
1043 if (remain_ms > 0) {
95b13dc5 1044 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1045 zlog_debug(
1046 log_ifp("next general expiry in %" PRId64 "ms"),
1047 remain_ms / 1000);
1048
907a2395
DS
1049 event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1050 &remain, &gm_ifp->t_expire);
5e5034b0
DL
1051 return;
1052 }
1053
1054 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1055 if (timercmp(&pkt->received, &pend->query, >=))
1056 break;
1057
55eb347d 1058 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1059 zlog_debug(log_ifp("expire packet %p"), pkt);
1060 gm_packet_drop(pkt, true);
1061 }
1062
1063 gm_ifp->n_pending--;
1064 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1065 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1066 }
1067
95b13dc5 1068 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1069 zlog_debug(log_ifp("next general expiry waiting for query"));
1070}
1071
1072/* NB: the receive handlers will also run when sending packets, since we
1073 * receive our own packets back in.
1074 */
1075static void gm_handle_q_general(struct gm_if *gm_ifp,
1076 struct gm_query_timers *timers)
1077{
1078 struct timeval now, expiry;
1079 struct gm_general_pending *pend;
1080
1081 monotime(&now);
1082 timeradd(&now, &timers->expire_wait, &expiry);
1083
1084 while (gm_ifp->n_pending) {
1085 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1086
1087 if (timercmp(&pend->expiry, &expiry, <))
1088 break;
1089
1090 /* if we end up here, the last item in pending[] has an expiry
1091 * later than the expiry for this query. But our query time
1092 * (now) is later than that of the item (because, well, that's
1093 * how time works.) This makes this query meaningless since
1094 * it's "supersetted" within the preexisting query
1095 */
1096
a96d64b0 1097 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1098 zlog_debug(
1099 log_ifp("zapping supersetted general timer %pTVMu"),
1100 &pend->expiry);
1101
1102 gm_ifp->n_pending--;
1103 if (!gm_ifp->n_pending)
e16d030c 1104 EVENT_OFF(gm_ifp->t_expire);
5e5034b0
DL
1105 }
1106
1107 /* people might be messing with their configs or something */
1108 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1109 return;
1110
1111 pend = &gm_ifp->pending[gm_ifp->n_pending];
1112 pend->query = now;
1113 pend->expiry = expiry;
1114
1115 if (!gm_ifp->n_pending++) {
a96d64b0 1116 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1117 zlog_debug(
1118 log_ifp("starting general timer @ 0: %pTVMu"),
1119 &pend->expiry);
907a2395
DS
1120 event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1121 &timers->expire_wait, &gm_ifp->t_expire);
a96d64b0 1122 } else if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1123 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1124 gm_ifp->n_pending, &pend->expiry);
1125}
1126
e6685141 1127static void gm_t_sg_expire(struct event *t)
5e5034b0 1128{
e16d030c 1129 struct gm_sg *sg = EVENT_ARG(t);
5e5034b0
DL
1130 struct gm_if *gm_ifp = sg->iface;
1131 struct gm_packet_sg *item;
1132
1133 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1134 sg->state == GM_SG_NOPRUNE_EXPIRING,
1135 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1136
1137 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1138 /* this will also drop EXCLUDE mode S,G lists together with
1139 * the *,G entry
1140 */
1141 gm_packet_sg_drop(item);
1142
1143 /* subs_negative items are only timed out together with the *,G entry
1144 * since we won't get any reports for a group-and-source query
1145 */
1146 gm_sg_update(sg, true);
1147}
1148
1149static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1150 struct timeval ref)
1151{
1152 struct gm_packet_state *pkt;
1153
1154 if (!sg->most_recent) {
1155 struct gm_packet_state *best_pkt = NULL;
1156 struct gm_packet_sg *item;
1157
1158 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1159 pkt = gm_packet_sg2state(item);
1160
1161 if (!best_pkt ||
1162 timercmp(&pkt->received, &best_pkt->received, >)) {
1163 best_pkt = pkt;
1164 sg->most_recent = item;
1165 }
1166 }
1167 }
1168 if (sg->most_recent) {
1169 struct timeval fuzz;
1170
1171 pkt = gm_packet_sg2state(sg->most_recent);
1172
1173 /* this shouldn't happen on plain old real ethernet segment,
1174 * but on something like a VXLAN or VPLS it is very possible
1175 * that we get a report before the query that triggered it.
1176 * (imagine a triangle scenario with 3 datacenters, it's very
1177 * possible A->B + B->C is faster than A->C due to odd routing)
1178 *
1179 * This makes a little tolerance allowance to handle that case.
1180 */
1181 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1182
1183 if (timercmp(&fuzz, &ref, >))
1184 return true;
1185 }
1186 return false;
1187}
1188
1189static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1190 struct timeval expire_wait)
1191{
1192 struct timeval now;
1193
1194 if (!sg)
1195 return;
1196 if (sg->state == GM_SG_PRUNE)
1197 return;
1198
1199 monotime(&now);
1200 if (gm_sg_check_recent(gm_ifp, sg, now))
1201 return;
1202
a96d64b0 1203 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1204 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1205
1206 if (sg->t_sg_expire) {
1207 struct timeval remain;
1208
4f830a07 1209 remain = event_timer_remain(sg->t_sg_expire);
5e5034b0
DL
1210 if (timercmp(&remain, &expire_wait, <=))
1211 return;
1212
e16d030c 1213 EVENT_OFF(sg->t_sg_expire);
5e5034b0
DL
1214 }
1215
907a2395
DS
1216 event_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1217 &sg->t_sg_expire);
5e5034b0
DL
1218}
1219
1220static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1221 struct gm_query_timers *timers, pim_addr grp,
1222 const pim_addr *srcs, size_t n_src)
1223{
1224 struct gm_sg *sg;
1225 size_t i;
1226
1227 for (i = 0; i < n_src; i++) {
1228 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1229 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1230 }
1231}
1232
e6685141 1233static void gm_t_grp_expire(struct event *t)
5e5034b0
DL
1234{
1235 /* if we're here, that means when we received the group-specific query
1236 * there was one or more active S,G for this group. For *,G the timer
1237 * in sg->t_sg_expire is running separately and gets cancelled when we
1238 * receive a report, so that work is left to gm_t_sg_expire and we
1239 * shouldn't worry about it here.
1240 */
e16d030c 1241 struct gm_grp_pending *pend = EVENT_ARG(t);
5e5034b0
DL
1242 struct gm_if *gm_ifp = pend->iface;
1243 struct gm_sg *sg, *sg_start, sg_ref = {};
1244
95b13dc5 1245 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1246 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1247
1248 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1249 * could technically be gt to skip a possible *,G
1250 */
1251 sg_ref.sgaddr.grp = pend->grp;
1252 sg_ref.sgaddr.src = PIMADDR_ANY;
1253 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1254
1255 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1256 struct gm_packet_sg *item;
1257
1258 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1259 break;
1260 if (pim_addr_is_any(sg->sgaddr.src))
1261 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1262 continue;
1263 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1264 continue;
1265
1266 /* we may also have a group-source-specific query going on in
1267 * parallel. But if we received nothing for the *,G query,
1268 * the S,G query is kinda irrelevant.
1269 */
e16d030c 1270 EVENT_OFF(sg->t_sg_expire);
5e5034b0
DL
1271
1272 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1273 /* this will also drop the EXCLUDE S,G lists */
1274 gm_packet_sg_drop(item);
1275
1276 gm_sg_update(sg, true);
1277 }
1278
1279 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1280 XFREE(MTYPE_GM_GRP_PENDING, pend);
1281}
1282
1283static void gm_handle_q_group(struct gm_if *gm_ifp,
1284 struct gm_query_timers *timers, pim_addr grp)
1285{
1286 struct gm_sg *sg, sg_ref = {};
1287 struct gm_grp_pending *pend, pend_ref = {};
1288
1289 sg_ref.sgaddr.grp = grp;
1290 sg_ref.sgaddr.src = PIMADDR_ANY;
1291 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1292 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1293
1294 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1295 /* we have nothing at all for this group - don't waste RAM */
1296 return;
1297
1298 if (pim_addr_is_any(sg->sgaddr.src)) {
1299 /* actually found *,G entry here */
a96d64b0 1300 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1301 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1302 &grp);
1303 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1304
1305 sg = gm_sgs_next(gm_ifp->sgs, sg);
1306 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1307 /* no S,G for this group */
1308 return;
1309 }
1310
1311 pend_ref.grp = grp;
1312 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1313
1314 if (pend) {
1315 struct timeval remain;
1316
4f830a07 1317 remain = event_timer_remain(pend->t_expire);
5e5034b0
DL
1318 if (timercmp(&remain, &timers->expire_wait, <=))
1319 return;
1320
e16d030c 1321 EVENT_OFF(pend->t_expire);
5e5034b0
DL
1322 } else {
1323 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1324 pend->grp = grp;
1325 pend->iface = gm_ifp;
1326 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1327 }
1328
1329 monotime(&pend->query);
907a2395
DS
1330 event_add_timer_tv(router->master, gm_t_grp_expire, pend,
1331 &timers->expire_wait, &pend->t_expire);
5e5034b0 1332
a96d64b0 1333 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1334 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1335 pend->t_expire);
1336}
1337
1338static void gm_bump_querier(struct gm_if *gm_ifp)
1339{
1340 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1341
e16d030c 1342 EVENT_OFF(gm_ifp->t_query);
5e5034b0
DL
1343
1344 if (pim_addr_is_any(pim_ifp->ll_lowest))
1345 return;
1346 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1347 return;
1348
1349 gm_ifp->n_startup = gm_ifp->cur_qrv;
1350
8c1186d3 1351 event_execute(router->master, gm_t_query, gm_ifp, 0);
5e5034b0
DL
1352}
1353
e6685141 1354static void gm_t_other_querier(struct event *t)
5e5034b0 1355{
e16d030c 1356 struct gm_if *gm_ifp = EVENT_ARG(t);
5e5034b0
DL
1357 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1358
1359 zlog_info(log_ifp("other querier timer expired"));
1360
1361 gm_ifp->querier = pim_ifp->ll_lowest;
1362 gm_ifp->n_startup = gm_ifp->cur_qrv;
1363
8c1186d3 1364 event_execute(router->master, gm_t_query, gm_ifp, 0);
5e5034b0
DL
1365}
1366
1367static void gm_handle_query(struct gm_if *gm_ifp,
1368 const struct sockaddr_in6 *pkt_src,
1369 pim_addr *pkt_dst, char *data, size_t len)
1370{
1371 struct mld_v2_query_hdr *hdr;
1372 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1373 struct gm_query_timers timers;
1374 bool general_query;
1375
1376 if (len < sizeof(struct mld_v2_query_hdr) &&
1377 len != sizeof(struct mld_v1_pkt)) {
1378 zlog_warn(log_pkt_src("invalid query size"));
aa2f9349 1379 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1380 return;
1381 }
1382
1383 hdr = (struct mld_v2_query_hdr *)data;
1384 general_query = pim_addr_is_any(hdr->grp);
1385
1386 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1387 zlog_warn(log_pkt_src(
1388 "malformed MLDv2 query (invalid group %pI6)"),
1389 &hdr->grp);
aa2f9349 1390 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1391 return;
1392 }
1393
1394 if (len >= sizeof(struct mld_v2_query_hdr)) {
1395 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1396
1397 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1398 zlog_warn(log_pkt_src(
1399 "malformed MLDv2 query (truncated source list)"));
aa2f9349 1400 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1401 return;
1402 }
1403
1404 if (general_query && src_space) {
1405 zlog_warn(log_pkt_src(
1406 "malformed MLDv2 query (general query with non-empty source list)"));
aa2f9349 1407 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1408 return;
1409 }
1410 }
1411
1412 /* accepting queries unicast to us (or addressed to a wrong group)
1413 * can mess up querier election as well as cause us to terminate
1414 * traffic (since after a unicast query no reports will be coming in)
1415 */
1416 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1417 if (pim_addr_is_any(hdr->grp)) {
1418 zlog_warn(
1419 log_pkt_src(
1420 "wrong destination %pPA for general query"),
1421 pkt_dst);
aa2f9349 1422 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1423 return;
1424 }
1425
1426 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
aa2f9349 1427 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1428 zlog_warn(
1429 log_pkt_src(
1430 "wrong destination %pPA for group specific query"),
1431 pkt_dst);
1432 return;
1433 }
1434 }
1435
1436 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
95b13dc5 1437 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1438 zlog_debug(
1439 log_pkt_src("replacing elected querier %pPA"),
1440 &gm_ifp->querier);
1441
1442 gm_ifp->querier = pkt_src->sin6_addr;
1443 }
1444
1445 if (len == sizeof(struct mld_v1_pkt)) {
1446 timers.qrv = gm_ifp->cur_qrv;
1447 timers.max_resp_ms = hdr->max_resp_code;
1448 timers.qqic_ms = gm_ifp->cur_query_intv;
1449 } else {
1450 timers.qrv = (hdr->flags & 0x7) ?: 8;
1451 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1452 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1453 }
1454 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1455
1456 gm_expiry_calc(&timers);
1457
a96d64b0 1458 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1459 zlog_debug(
1460 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1461 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1462 &timers.expire_wait);
1463
1464 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1465 unsigned int other_ms;
1466
e16d030c
DS
1467 EVENT_OFF(gm_ifp->t_query);
1468 EVENT_OFF(gm_ifp->t_other_querier);
5e5034b0
DL
1469
1470 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
907a2395
DS
1471 event_add_timer_msec(router->master, gm_t_other_querier, gm_ifp,
1472 other_ms, &gm_ifp->t_other_querier);
5e5034b0
DL
1473 }
1474
1475 if (len == sizeof(struct mld_v1_pkt)) {
aa2f9349 1476 if (general_query) {
5e5034b0 1477 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1478 gm_ifp->stats.rx_query_old_general++;
1479 } else {
5e5034b0 1480 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1481 gm_ifp->stats.rx_query_old_group++;
1482 }
5e5034b0
DL
1483 return;
1484 }
1485
1486 /* v2 query - [S]uppress bit */
aa2f9349
DL
1487 if (hdr->flags & 0x8) {
1488 gm_ifp->stats.rx_query_new_sbit++;
5e5034b0 1489 return;
aa2f9349 1490 }
5e5034b0 1491
aa2f9349 1492 if (general_query) {
5e5034b0 1493 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1494 gm_ifp->stats.rx_query_new_general++;
1495 } else if (!ntohs(hdr->n_src)) {
5e5034b0 1496 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1497 gm_ifp->stats.rx_query_new_group++;
1498 } else {
5e5034b0
DL
1499 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1500 ntohs(hdr->n_src));
aa2f9349
DL
1501 gm_ifp->stats.rx_query_new_groupsrc++;
1502 }
5e5034b0
DL
1503}
1504
1505static void gm_rx_process(struct gm_if *gm_ifp,
1506 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1507 void *data, size_t pktlen)
1508{
1509 struct icmp6_plain_hdr *icmp6 = data;
1510 uint16_t pkt_csum, ref_csum;
1511 struct ipv6_ph ph6 = {
1512 .src = pkt_src->sin6_addr,
1513 .dst = *pkt_dst,
1514 .ulpl = htons(pktlen),
1515 .next_hdr = IPPROTO_ICMPV6,
1516 };
1517
1518 pkt_csum = icmp6->icmp6_cksum;
1519 icmp6->icmp6_cksum = 0;
1520 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1521
1522 if (pkt_csum != ref_csum) {
1523 zlog_warn(
1524 log_pkt_src(
1525 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1526 pkt_dst, pkt_csum, ref_csum);
aa2f9349 1527 gm_ifp->stats.rx_drop_csum++;
5e5034b0
DL
1528 return;
1529 }
1530
1531 data = (icmp6 + 1);
1532 pktlen -= sizeof(*icmp6);
1533
1534 switch (icmp6->icmp6_type) {
1535 case ICMP6_MLD_QUERY:
1536 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1537 break;
1538 case ICMP6_MLD_V1_REPORT:
1539 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1540 break;
1541 case ICMP6_MLD_V1_DONE:
1542 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1543 break;
1544 case ICMP6_MLD_V2_REPORT:
1545 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1546 break;
1547 }
1548}
1549
1550static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1551 uint16_t alert_type)
1552{
1553 uint8_t *hopopt_end;
1554
1555 if (hopopt_len < 8)
1556 return false;
1557 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1558 return false;
1559
1560 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1561 hopopts += 2;
1562
1563 while (hopopts < hopopt_end) {
1564 if (hopopts[0] == IP6OPT_PAD1) {
1565 hopopts++;
1566 continue;
1567 }
1568
1569 if (hopopts > hopopt_end - 2)
1570 break;
1571 if (hopopts > hopopt_end - 2 - hopopts[1])
1572 break;
1573
1574 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1575 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1576
1577 if (have_type == alert_type)
1578 return true;
1579 }
1580
1581 hopopts += 2 + hopopts[1];
1582 }
1583 return false;
1584}
1585
e6685141 1586static void gm_t_recv(struct event *t)
5e5034b0 1587{
e16d030c 1588 struct pim_instance *pim = EVENT_ARG(t);
5e5034b0
DL
1589 union {
1590 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1591 CMSG_SPACE(256) /* hop options */ +
1592 CMSG_SPACE(sizeof(int)) /* hopcount */];
1593 struct cmsghdr align;
1594 } cmsgbuf;
1595 struct cmsghdr *cmsg;
1596 struct in6_pktinfo *pktinfo = NULL;
1597 uint8_t *hopopts = NULL;
1598 size_t hopopt_len = 0;
1599 int *hoplimit = NULL;
1600 char rxbuf[2048];
1601 struct msghdr mh[1] = {};
1602 struct iovec iov[1];
5784a878 1603 struct sockaddr_in6 pkt_src[1] = {};
5e5034b0
DL
1604 ssize_t nread;
1605 size_t pktlen;
1606
907a2395
DS
1607 event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1608 &pim->t_gm_recv);
5e5034b0
DL
1609
1610 iov->iov_base = rxbuf;
1611 iov->iov_len = sizeof(rxbuf);
1612
1613 mh->msg_name = pkt_src;
1614 mh->msg_namelen = sizeof(pkt_src);
1615 mh->msg_control = cmsgbuf.buf;
1616 mh->msg_controllen = sizeof(cmsgbuf.buf);
1617 mh->msg_iov = iov;
1618 mh->msg_iovlen = array_size(iov);
1619 mh->msg_flags = 0;
1620
df655593 1621 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
5e5034b0 1622 if (nread <= 0) {
df655593
DL
1623 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1624 pim->gm_rx_drop_sys++;
5e5034b0
DL
1625 return;
1626 }
1627
1628 if ((size_t)nread > sizeof(rxbuf)) {
1629 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1630 iov->iov_len = nread;
1631 }
df655593 1632 nread = recvmsg(pim->gm_socket, mh, 0);
5e5034b0 1633 if (nread <= 0) {
df655593
DL
1634 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1635 pim->gm_rx_drop_sys++;
5e5034b0
DL
1636 goto out_free;
1637 }
1638
df655593
DL
1639 struct interface *ifp;
1640
1641 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1642 if (!ifp || !ifp->info)
1643 goto out_free;
1644
1645 struct pim_interface *pim_ifp = ifp->info;
1646 struct gm_if *gm_ifp = pim_ifp->mld;
1647
1648 if (!gm_ifp)
5e5034b0
DL
1649 goto out_free;
1650
1651 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1652 if (cmsg->cmsg_level != SOL_IPV6)
1653 continue;
1654
1655 switch (cmsg->cmsg_type) {
1656 case IPV6_PKTINFO:
1657 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1658 break;
1659 case IPV6_HOPOPTS:
1660 hopopts = CMSG_DATA(cmsg);
1661 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1662 break;
1663 case IPV6_HOPLIMIT:
1664 hoplimit = (int *)CMSG_DATA(cmsg);
1665 break;
1666 }
1667 }
1668
1669 if (!pktinfo || !hoplimit) {
1670 zlog_err(log_ifp(
1671 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
df655593 1672 pim->gm_rx_drop_sys++;
5e5034b0
DL
1673 goto out_free;
1674 }
1675
1676 if (*hoplimit != 1) {
1677 zlog_err(log_pkt_src("packet with hop limit != 1"));
aa2f9349
DL
1678 /* spoofing attempt => count on srcaddr counter */
1679 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1680 goto out_free;
1681 }
1682
1683 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1684 zlog_err(log_pkt_src(
1685 "packet without IPv6 Router Alert MLD option"));
aa2f9349 1686 gm_ifp->stats.rx_drop_ra++;
5e5034b0
DL
1687 goto out_free;
1688 }
1689
1690 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1691 /* reports from :: happen in normal operation for DAD, so
1692 * don't spam log messages about this
1693 */
1694 goto out_free;
1695
1696 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1697 zlog_warn(log_pkt_src("packet from invalid source address"));
aa2f9349 1698 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1699 goto out_free;
1700 }
1701
1702 pktlen = nread;
1703 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1704 zlog_warn(log_pkt_src("truncated packet"));
aa2f9349 1705 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1706 goto out_free;
1707 }
1708
1709 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1710 pktlen);
1711
1712out_free:
1713 if (iov->iov_base != rxbuf)
1714 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1715}
1716
1717static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1718 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1719{
1720 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1721 struct sockaddr_in6 dstaddr = {
1722 .sin6_family = AF_INET6,
1723 .sin6_scope_id = gm_ifp->ifp->ifindex,
1724 };
1725 struct {
1726 struct icmp6_plain_hdr hdr;
1727 struct mld_v2_query_hdr v2_query;
1728 } query = {
1729 /* clang-format off */
1730 .hdr = {
1731 .icmp6_type = ICMP6_MLD_QUERY,
1732 .icmp6_code = 0,
1733 },
1734 .v2_query = {
1735 .grp = grp,
1736 },
1737 /* clang-format on */
1738 };
1739 struct ipv6_ph ph6 = {
1740 .src = pim_ifp->ll_lowest,
1741 .ulpl = htons(sizeof(query)),
1742 .next_hdr = IPPROTO_ICMPV6,
1743 };
1744 union {
df655593
DL
1745 char buf[CMSG_SPACE(8) /* hop options */ +
1746 CMSG_SPACE(sizeof(struct in6_pktinfo))];
5e5034b0
DL
1747 struct cmsghdr align;
1748 } cmsg = {};
1749 struct cmsghdr *cmh;
1750 struct msghdr mh[1] = {};
1751 struct iovec iov[3];
1752 size_t iov_len;
1753 ssize_t ret, expect_ret;
1754 uint8_t *dp;
df655593 1755 struct in6_pktinfo *pktinfo;
5e5034b0
DL
1756
1757 if (if_is_loopback(gm_ifp->ifp)) {
1758 /* Linux is a bit odd with multicast on loopback */
1759 ph6.src = in6addr_loopback;
1760 dstaddr.sin6_addr = in6addr_loopback;
1761 } else if (pim_addr_is_any(grp))
1762 dstaddr.sin6_addr = gm_all_hosts;
1763 else
1764 dstaddr.sin6_addr = grp;
1765
1766 query.v2_query.max_resp_code =
1767 mld_max_resp_encode(gm_ifp->cur_max_resp);
1768 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1769 if (s_bit)
1770 query.v2_query.flags |= 0x08;
1771 query.v2_query.qqic =
1772 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1773 query.v2_query.n_src = htons(n_srcs);
1774
1775 ph6.dst = dstaddr.sin6_addr;
1776
1777 /* ph6 not included in sendmsg */
1778 iov[0].iov_base = &ph6;
1779 iov[0].iov_len = sizeof(ph6);
1780 iov[1].iov_base = &query;
1781 if (gm_ifp->cur_version == GM_MLDV1) {
1782 iov_len = 2;
1783 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1784 } else if (!n_srcs) {
1785 iov_len = 2;
1786 iov[1].iov_len = sizeof(query);
1787 } else {
1788 iov[1].iov_len = sizeof(query);
1789 iov[2].iov_base = (void *)srcs;
1790 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1791 iov_len = 3;
1792 }
1793
1794 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1795
55eb347d 1796 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1797 zlog_debug(
1798 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1799 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1800
1801 mh->msg_name = &dstaddr;
1802 mh->msg_namelen = sizeof(dstaddr);
1803 mh->msg_iov = iov + 1;
1804 mh->msg_iovlen = iov_len - 1;
1805 mh->msg_control = &cmsg;
1806 mh->msg_controllen = sizeof(cmsg.buf);
df655593 1807
5e5034b0
DL
1808 cmh = CMSG_FIRSTHDR(mh);
1809 cmh->cmsg_level = IPPROTO_IPV6;
1810 cmh->cmsg_type = IPV6_HOPOPTS;
1811 cmh->cmsg_len = CMSG_LEN(8);
1812 dp = CMSG_DATA(cmh);
1813 *dp++ = 0; /* next header */
1814 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1815 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1816 *dp++ = 2; /* length */
1817 *dp++ = 0; /* value (2 bytes) */
1818 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1819 *dp++ = 0; /* pad0 */
1820 *dp++ = 0; /* pad0 */
1821
df655593
DL
1822 cmh = CMSG_NXTHDR(mh, cmh);
1823 cmh->cmsg_level = IPPROTO_IPV6;
1824 cmh->cmsg_type = IPV6_PKTINFO;
1825 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1826 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1827 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1828 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1829
5e5034b0
DL
1830 expect_ret = iov[1].iov_len;
1831 if (iov_len == 3)
1832 expect_ret += iov[2].iov_len;
1833
1834 frr_with_privs (&pimd_privs) {
df655593 1835 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
5e5034b0
DL
1836 }
1837
aa2f9349 1838 if (ret != expect_ret) {
5e5034b0 1839 zlog_warn(log_ifp("failed to send query: %m"));
aa2f9349
DL
1840 gm_ifp->stats.tx_query_fail++;
1841 } else {
1842 if (gm_ifp->cur_version == GM_MLDV1) {
1843 if (pim_addr_is_any(grp))
1844 gm_ifp->stats.tx_query_old_general++;
1845 else
1846 gm_ifp->stats.tx_query_old_group++;
1847 } else {
1848 if (pim_addr_is_any(grp))
1849 gm_ifp->stats.tx_query_new_general++;
1850 else if (!n_srcs)
1851 gm_ifp->stats.tx_query_new_group++;
1852 else
1853 gm_ifp->stats.tx_query_new_groupsrc++;
1854 }
1855 }
5e5034b0
DL
1856}
1857
e6685141 1858static void gm_t_query(struct event *t)
5e5034b0 1859{
e16d030c 1860 struct gm_if *gm_ifp = EVENT_ARG(t);
5e5034b0
DL
1861 unsigned int timer_ms = gm_ifp->cur_query_intv;
1862
1863 if (gm_ifp->n_startup) {
1864 timer_ms /= 4;
1865 gm_ifp->n_startup--;
1866 }
1867
907a2395
DS
1868 event_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1869 &gm_ifp->t_query);
5e5034b0
DL
1870
1871 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1872}
1873
e6685141 1874static void gm_t_sg_query(struct event *t)
5e5034b0 1875{
e16d030c 1876 struct gm_sg *sg = EVENT_ARG(t);
5e5034b0
DL
1877
1878 gm_trigger_specific(sg);
1879}
1880
1881/* S,G specific queries (triggered by a member leaving) get a little slack
1882 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1883 */
1884static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1885{
1886 struct gm_if *gm_ifp = pend_gsq->iface;
1887
1888 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1889 pend_gsq->s_bit);
1890
1891 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1892 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1893}
1894
e6685141 1895static void gm_t_gsq_pend(struct event *t)
5e5034b0 1896{
e16d030c 1897 struct gm_gsq_pending *pend_gsq = EVENT_ARG(t);
5e5034b0
DL
1898
1899 gm_send_specific(pend_gsq);
1900}
1901
1902static void gm_trigger_specific(struct gm_sg *sg)
1903{
1904 struct gm_if *gm_ifp = sg->iface;
1905 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1906 struct gm_gsq_pending *pend_gsq, ref = {};
1907
1908 sg->n_query--;
1909 if (sg->n_query)
907a2395
DS
1910 event_add_timer_msec(router->master, gm_t_sg_query, sg,
1911 gm_ifp->cur_query_intv_trig,
1912 &sg->t_sg_query);
5e5034b0
DL
1913
1914 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1915 return;
df655593 1916 if (gm_ifp->pim->gm_socket == -1)
5e5034b0
DL
1917 return;
1918
a96d64b0 1919 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1920 zlog_debug(log_sg(sg, "triggered query"));
1921
1922 if (pim_addr_is_any(sg->sgaddr.src)) {
1923 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1924 return;
1925 }
1926
1927 ref.grp = sg->sgaddr.grp;
1928 ref.s_bit = sg->query_sbit;
1929
1930 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1931 if (!pend_gsq) {
1932 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1933 pend_gsq->grp = sg->sgaddr.grp;
1934 pend_gsq->s_bit = sg->query_sbit;
1935 pend_gsq->iface = gm_ifp;
1936 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1937
907a2395
DS
1938 event_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1939 &gm_ifp->cfg_timing_fuzz, &pend_gsq->t_send);
5e5034b0
DL
1940 }
1941
1942 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1943
1944 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1945 pend_gsq->n_src++;
1946
1947 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
e16d030c 1948 EVENT_OFF(pend_gsq->t_send);
5e5034b0
DL
1949 gm_send_specific(pend_gsq);
1950 pend_gsq = NULL;
1951 }
1952}
1953
df655593 1954static void gm_vrf_socket_incref(struct pim_instance *pim)
5e5034b0 1955{
df655593 1956 struct vrf *vrf = pim->vrf;
5e5034b0
DL
1957 int ret, intval;
1958 struct icmp6_filter filter[1];
1959
df655593
DL
1960 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1961 return;
5e5034b0
DL
1962
1963 ICMP6_FILTER_SETBLOCKALL(filter);
1964 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1965 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1966 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1967 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1968
1969 frr_with_privs (&pimd_privs) {
df655593
DL
1970 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1971 vrf->vrf_id, vrf->name);
1972 if (pim->gm_socket < 0) {
1973 zlog_err("(VRF %s) could not create MLD socket: %m",
1974 vrf->name);
5e5034b0
DL
1975 return;
1976 }
1977
df655593
DL
1978 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
1979 filter, sizeof(filter));
5e5034b0 1980 if (ret)
df655593
DL
1981 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1982 vrf->name);
5e5034b0
DL
1983
1984 intval = 1;
df655593 1985 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
5e5034b0
DL
1986 &intval, sizeof(intval));
1987 if (ret)
df655593
DL
1988 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1989 vrf->name);
5e5034b0
DL
1990
1991 intval = 1;
df655593 1992 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
5e5034b0
DL
1993 &intval, sizeof(intval));
1994 if (ret)
df655593
DL
1995 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
1996 vrf->name);
5e5034b0
DL
1997
1998 intval = 1;
df655593 1999 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
5e5034b0
DL
2000 &intval, sizeof(intval));
2001 if (ret)
df655593
DL
2002 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2003 vrf->name);
5e5034b0
DL
2004
2005 intval = 1;
df655593 2006 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
5e5034b0
DL
2007 &intval, sizeof(intval));
2008 if (ret)
2009 zlog_err(
df655593
DL
2010 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2011 vrf->name);
5e5034b0
DL
2012
2013 intval = 1;
df655593 2014 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
5e5034b0
DL
2015 &intval, sizeof(intval));
2016 if (ret)
df655593
DL
2017 zlog_err(
2018 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2019 vrf->name);
5e5034b0
DL
2020
2021 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2022 * RX filtering in Linux. It only means "receive all groups
2023 * that something on the system has joined". To actually
2024 * receive *all* MLD packets - which is what we need -
2025 * multicast routing must be enabled on the interface. And
2026 * this only works for MLD packets specifically.
2027 *
2028 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2029 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2030 *
2031 * Also note that the code there explicitly checks for the IPv6
2032 * router alert MLD option (which is required by the RFC to be
2033 * on MLD packets.) That implies trying to support hosts which
2034 * erroneously don't add that option is just not possible.
2035 */
2036 intval = 1;
df655593 2037 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
5e5034b0
DL
2038 &intval, sizeof(intval));
2039 if (ret)
2040 zlog_info(
df655593
DL
2041 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2042 vrf->name);
2043 }
5e5034b0 2044
907a2395
DS
2045 event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2046 &pim->t_gm_recv);
df655593
DL
2047}
2048
2049static void gm_vrf_socket_decref(struct pim_instance *pim)
2050{
2051 if (--pim->gm_socket_if_count)
2052 return;
2053
e16d030c 2054 EVENT_OFF(pim->t_gm_recv);
df655593
DL
2055 close(pim->gm_socket);
2056 pim->gm_socket = -1;
2057}
2058
2059static void gm_start(struct interface *ifp)
2060{
2061 struct pim_interface *pim_ifp = ifp->info;
2062 struct gm_if *gm_ifp;
2063
2064 assert(pim_ifp);
2065 assert(pim_ifp->pim);
2066 assert(pim_ifp->mroute_vif_index >= 0);
2067 assert(!pim_ifp->mld);
2068
2069 gm_vrf_socket_incref(pim_ifp->pim);
2070
2071 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2072 gm_ifp->ifp = ifp;
2073 pim_ifp->mld = gm_ifp;
2074 gm_ifp->pim = pim_ifp->pim;
2075 monotime(&gm_ifp->started);
2076
2077 zlog_info(log_ifp("starting MLD"));
2078
2079 if (pim_ifp->mld_version == 1)
2080 gm_ifp->cur_version = GM_MLDV1;
2081 else
2082 gm_ifp->cur_version = GM_MLDV2;
2083
914710d7 2084 gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
df655593 2085 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
707a9e9a
A
2086 gm_ifp->cur_query_intv_trig =
2087 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
914710d7 2088 gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
51b4991f 2089 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
df655593
DL
2090
2091 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2092 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2093
2094 gm_sgs_init(gm_ifp->sgs);
2095 gm_subscribers_init(gm_ifp->subscribers);
2096 gm_packet_expires_init(gm_ifp->expires);
2097 gm_grp_pends_init(gm_ifp->grp_pends);
2098 gm_gsq_pends_init(gm_ifp->gsq_pends);
2099
2100 frr_with_privs (&pimd_privs) {
5e5034b0 2101 struct ipv6_mreq mreq;
df655593 2102 int ret;
5e5034b0
DL
2103
2104 /* all-MLDv2 group */
2105 mreq.ipv6mr_multiaddr = gm_all_routers;
2106 mreq.ipv6mr_interface = ifp->ifindex;
df655593
DL
2107 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2108 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
5e5034b0
DL
2109 if (ret)
2110 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2111 ifp->name);
2112 }
5e5034b0
DL
2113}
2114
e309780f 2115void gm_group_delete(struct gm_if *gm_ifp)
5e5034b0 2116{
e309780f 2117 struct gm_sg *sg;
5e5034b0
DL
2118 struct gm_packet_state *pkt;
2119 struct gm_grp_pending *pend_grp;
2120 struct gm_gsq_pending *pend_gsq;
2121 struct gm_subscriber *subscriber;
e309780f
SP
2122
2123 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2124 gm_packet_drop(pkt, false);
2125
2126 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
e16d030c 2127 EVENT_OFF(pend_grp->t_expire);
e309780f
SP
2128 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2129 }
2130
2131 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
e16d030c 2132 EVENT_OFF(pend_gsq->t_send);
e309780f
SP
2133 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2134 }
2135
2136 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
e16d030c 2137 EVENT_OFF(sg->t_sg_expire);
e309780f
SP
2138 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2139 &sg->sgaddr);
2140 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2141 &sg->sgaddr);
2142
2143 gm_sg_free(sg);
2144 }
2145 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2146 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2147 &subscriber->addr);
2148 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2149 }
2150}
2151
2152void gm_ifp_teardown(struct interface *ifp)
2153{
2154 struct pim_interface *pim_ifp = ifp->info;
2155 struct gm_if *gm_ifp;
5e5034b0
DL
2156
2157 if (!pim_ifp || !pim_ifp->mld)
2158 return;
2159
2160 gm_ifp = pim_ifp->mld;
2161 gm_ifp->stopping = true;
95b13dc5 2162 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
2163 zlog_debug(log_ifp("MLD stop"));
2164
e16d030c
DS
2165 EVENT_OFF(gm_ifp->t_query);
2166 EVENT_OFF(gm_ifp->t_other_querier);
2167 EVENT_OFF(gm_ifp->t_expire);
5e5034b0 2168
df655593
DL
2169 frr_with_privs (&pimd_privs) {
2170 struct ipv6_mreq mreq;
2171 int ret;
2172
2173 /* all-MLDv2 group */
2174 mreq.ipv6mr_multiaddr = gm_all_routers;
2175 mreq.ipv6mr_interface = ifp->ifindex;
2176 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2177 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2178 if (ret)
2179 zlog_err(
2180 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2181 ifp->name);
5e5034b0
DL
2182 }
2183
df655593
DL
2184 gm_vrf_socket_decref(gm_ifp->pim);
2185
e309780f 2186 gm_group_delete(gm_ifp);
5e5034b0
DL
2187
2188 gm_grp_pends_fini(gm_ifp->grp_pends);
2189 gm_packet_expires_fini(gm_ifp->expires);
2190 gm_subscribers_fini(gm_ifp->subscribers);
2191 gm_sgs_fini(gm_ifp->sgs);
2192
2193 XFREE(MTYPE_GM_IFACE, gm_ifp);
2194 pim_ifp->mld = NULL;
2195}
2196
2197static void gm_update_ll(struct interface *ifp)
2198{
2199 struct pim_interface *pim_ifp = ifp->info;
f4e8f5d4 2200 struct gm_if *gm_ifp = pim_ifp->mld;
5e5034b0
DL
2201 bool was_querier;
2202
2203 was_querier =
2204 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2205 !pim_addr_is_any(gm_ifp->querier);
2206
2207 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2208 if (was_querier)
2209 gm_ifp->querier = pim_ifp->ll_lowest;
e16d030c 2210 EVENT_OFF(gm_ifp->t_query);
5e5034b0
DL
2211
2212 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2213 if (was_querier)
2214 zlog_info(log_ifp(
2215 "lost link-local address, stopping querier"));
2216 return;
2217 }
2218
2219 if (was_querier)
2220 zlog_info(log_ifp("new link-local %pPA while querier"),
2221 &gm_ifp->cur_ll_lowest);
2222 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2223 pim_addr_is_any(gm_ifp->querier)) {
2224 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2225 &gm_ifp->cur_ll_lowest);
2226 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2227 } else
2228 return;
2229
5e5034b0 2230 gm_ifp->n_startup = gm_ifp->cur_qrv;
8c1186d3 2231 event_execute(router->master, gm_t_query, gm_ifp, 0);
5e5034b0
DL
2232}
2233
2234void gm_ifp_update(struct interface *ifp)
2235{
2236 struct pim_interface *pim_ifp = ifp->info;
2237 struct gm_if *gm_ifp;
2238 bool changed = false;
2239
2240 if (!pim_ifp)
2241 return;
2242 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2243 pim_ifp->mroute_vif_index < 0) {
2244 gm_ifp_teardown(ifp);
2245 return;
2246 }
2247
5c1b3cd2
A
2248 /*
2249 * If ipv6 mld is not enabled on interface, do not start mld activites.
2250 */
2251 if (!pim_ifp->gm_enable)
2252 return;
2253
2254 if (!pim_ifp->mld) {
2255 changed = true;
5e5034b0 2256 gm_start(ifp);
5c1b3cd2 2257 }
5e5034b0
DL
2258
2259 gm_ifp = pim_ifp->mld;
2260 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2261 gm_update_ll(ifp);
2262
2263 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2264
2265 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2266 gm_ifp->cur_query_intv = cfg_query_intv;
707a9e9a
A
2267 changed = true;
2268 }
2269
2270 unsigned int cfg_query_intv_trig =
2271 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2272
2273 if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
2274 gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
5e5034b0
DL
2275 changed = true;
2276 }
2277
914710d7
A
2278 unsigned int cfg_max_response =
2279 pim_ifp->gm_query_max_response_time_dsec * 100;
2280
2281 if (gm_ifp->cur_max_resp != cfg_max_response)
2282 gm_ifp->cur_max_resp = cfg_max_response;
2283
51b4991f
A
2284 if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
2285 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2286
5e5034b0
DL
2287 enum gm_version cfg_version;
2288
2289 if (pim_ifp->mld_version == 1)
2290 cfg_version = GM_MLDV1;
2291 else
2292 cfg_version = GM_MLDV2;
2293 if (gm_ifp->cur_version != cfg_version) {
2294 gm_ifp->cur_version = cfg_version;
2295 changed = true;
2296 }
2297
2298 if (changed) {
a96d64b0 2299 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
2300 zlog_debug(log_ifp(
2301 "MLD querier config changed, querying"));
2302 gm_bump_querier(gm_ifp);
2303 }
2304}
2305
d2951219
DL
2306/*
2307 * CLI (show commands only)
2308 */
5e5034b0
DL
2309
2310#include "lib/command.h"
2311
5e5034b0 2312#include "pimd/pim6_mld_clippy.c"
5e5034b0 2313
d2951219
DL
2314static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2315 int *err)
2316{
2317 struct vrf *ret;
2318
2319 if (!vrf_str)
2320 return vrf_lookup_by_id(VRF_DEFAULT);
2321 if (!strcmp(vrf_str, "all"))
2322 return NULL;
2323 ret = vrf_lookup_by_name(vrf_str);
2324 if (ret)
2325 return ret;
2326
2327 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2328 *err = CMD_WARNING;
2329 return NULL;
2330}
2331
2332static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2333{
2334 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2335 struct gm_if *gm_ifp;
2336 bool querier;
2337 size_t i;
2338
2339 if (!pim_ifp) {
2340 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2341 return;
2342 }
2343
2344 gm_ifp = pim_ifp->mld;
2345 if (!gm_ifp) {
2346 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2347 return;
2348 }
2349
2350 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2351
2352 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2353 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2354 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2355 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2356 querier ? " (this system)" : "");
2357 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2358 vty_out(vty, " Other querier timer: %pTH\n",
2359 gm_ifp->t_other_querier);
2360 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2361 vty_out(vty, " Query interval: %ums\n",
2362 gm_ifp->cur_query_intv);
2363 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2364 vty_out(vty, " Last member query intv.: %ums\n",
2365 gm_ifp->cur_query_intv_trig);
2366 vty_out(vty, " %u expiry timers from general queries:\n",
2367 gm_ifp->n_pending);
2368 for (i = 0; i < gm_ifp->n_pending; i++) {
2369 struct gm_general_pending *p = &gm_ifp->pending[i];
2370
2371 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2372 &p->query, &p->expiry);
2373 }
2374 vty_out(vty, " %zu expiry timers from *,G queries\n",
2375 gm_grp_pends_count(gm_ifp->grp_pends));
2376 vty_out(vty, " %zu expiry timers from S,G queries\n",
2377 gm_gsq_pends_count(gm_ifp->gsq_pends));
2378 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2379 gm_sgs_count(gm_ifp->sgs),
2380 gm_subscribers_count(gm_ifp->subscribers),
2381 gm_packet_expires_count(gm_ifp->expires));
2382 vty_out(vty, "\n");
2383}
2384
2385static void gm_show_if_one(struct vty *vty, struct interface *ifp,
cbb1e513 2386 json_object *js_if, struct ttable *tt)
d2951219
DL
2387{
2388 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2389 struct gm_if *gm_ifp = pim_ifp->mld;
2390 bool querier;
2391
d2951219
DL
2392 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2393
2394 if (js_if) {
cb406d5c 2395 json_object_string_add(js_if, "name", ifp->name);
3ab119a4
SP
2396 json_object_string_addf(js_if, "address", "%pPA",
2397 &pim_ifp->primary_address);
d2951219
DL
2398 json_object_string_add(js_if, "state", "up");
2399 json_object_string_addf(js_if, "version", "%d",
2400 gm_ifp->cur_version);
2401 json_object_string_addf(js_if, "upTime", "%pTVMs",
2402 &gm_ifp->started);
2403 json_object_boolean_add(js_if, "querier", querier);
2404 json_object_string_addf(js_if, "querierIp", "%pPA",
2405 &gm_ifp->querier);
2406 if (querier)
2407 json_object_string_addf(js_if, "queryTimer", "%pTH",
2408 gm_ifp->t_query);
2409 else
2410 json_object_string_addf(js_if, "otherQuerierTimer",
2411 "%pTH",
2412 gm_ifp->t_other_querier);
cb406d5c
A
2413 json_object_int_add(js_if, "timerRobustnessValue",
2414 gm_ifp->cur_qrv);
6b94500d
A
2415 json_object_int_add(js_if, "lastMemberQueryCount",
2416 gm_ifp->cur_lmqc);
cb406d5c
A
2417 json_object_int_add(js_if, "timerQueryIntervalMsec",
2418 gm_ifp->cur_query_intv);
2419 json_object_int_add(js_if, "timerQueryResponseTimerMsec",
2420 gm_ifp->cur_max_resp);
2421 json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
2422 gm_ifp->cur_query_intv_trig);
d2951219 2423 } else {
cbb1e513
SP
2424 ttable_add_row(tt, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
2425 ifp->name, "up", &pim_ifp->primary_address,
2426 gm_ifp->cur_version, querier ? "local" : "other",
2427 &gm_ifp->querier, gm_ifp->t_query,
2428 &gm_ifp->started);
d2951219
DL
2429 }
2430}
2431
2432static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2433 bool detail, json_object *js)
2434{
2435 struct interface *ifp;
cbb1e513 2436 json_object *js_vrf = NULL;
fe4db62c 2437 struct pim_interface *pim_ifp;
cbb1e513
SP
2438 struct ttable *tt = NULL;
2439 char *table = NULL;
d2951219
DL
2440
2441 if (js) {
2442 js_vrf = json_object_new_object();
2443 json_object_object_add(js, vrf->name, js_vrf);
2444 }
2445
cbb1e513
SP
2446 if (!js && !detail) {
2447 /* Prepare table. */
2448 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2449 ttable_add_row(
2450 tt,
2451 "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
2452 tt->style.cell.rpad = 2;
2453 tt->style.corner = '+';
2454 ttable_restyle(tt);
2455 }
2456
d2951219
DL
2457 FOR_ALL_INTERFACES (vrf, ifp) {
2458 json_object *js_if = NULL;
2459
2460 if (ifname && strcmp(ifp->name, ifname))
2461 continue;
2462 if (detail && !js) {
2463 gm_show_if_one_detail(vty, ifp);
2464 continue;
2465 }
2466
cbb1e513 2467 pim_ifp = ifp->info;
fe4db62c
SP
2468
2469 if (!pim_ifp || !pim_ifp->mld)
d2951219 2470 continue;
fe4db62c 2471
d2951219
DL
2472 if (js) {
2473 js_if = json_object_new_object();
35801e62
DS
2474 /*
2475 * If we have js as true and detail as false
2476 * and if Coverity thinks that js_if is NULL
2477 * because of a failed call to new then
2478 * when we call gm_show_if_one below
2479 * the tt can be deref'ed and as such
2480 * FRR will crash. But since we know
2481 * that json_object_new_object never fails
2482 * then let's tell Coverity that this assumption
2483 * is true. I'm not worried about fast path
2484 * here at all.
2485 */
2486 assert(js_if);
d2951219
DL
2487 json_object_object_add(js_vrf, ifp->name, js_if);
2488 }
2489
cbb1e513
SP
2490 gm_show_if_one(vty, ifp, js_if, tt);
2491 }
2492
2493 /* Dump the generated table. */
2494 if (!js && !detail) {
2495 table = ttable_dump(tt, "\n");
2496 vty_out(vty, "%s\n", table);
2497 XFREE(MTYPE_TMP, table);
2498 ttable_del(tt);
d2951219
DL
2499 }
2500}
2501
2502static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2503 bool detail, json_object *js)
2504{
d2951219
DL
2505 if (vrf)
2506 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2507 else
2508 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2509 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2510}
2511
2512DEFPY(gm_show_interface,
2513 gm_show_interface_cmd,
ad56b07c 2514 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
d2951219
DL
2515 SHOW_STR
2516 IPV6_STR
2517 MLD_STR
2518 VRF_FULL_CMD_HELP_STR
2519 "MLD interface information\n"
ad56b07c 2520 "Interface name\n"
d2951219
DL
2521 "Detailed output\n"
2522 JSON_STR)
2523{
2524 int ret = CMD_SUCCESS;
2525 struct vrf *vrf;
2526 json_object *js = NULL;
2527
2528 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2529 if (ret != CMD_SUCCESS)
2530 return ret;
2531
2532 if (json)
2533 js = json_object_new_object();
2534 gm_show_if(vty, vrf, ifname, !!detail, js);
2535 return vty_json(vty, js);
2536}
2537
2538static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2539 json_object *js_if)
2540{
2541 struct gm_if_stats *stats = &gm_ifp->stats;
2542 /* clang-format off */
2543 struct {
2544 const char *text;
2545 const char *js_key;
2546 uint64_t *val;
2547 } *item, items[] = {
2548 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2549 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2550 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2551
2552 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2553 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2554 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2555 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2556 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2557 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2558
2559 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2560 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2561 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2562 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2563 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2564 { "TX errors", "txErrors", &stats->tx_query_fail },
2565
d2951219
DL
2566 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2567 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2568 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2569 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2570 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2571 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2572 };
2573 /* clang-format on */
2574
2575 for (item = items; item < items + array_size(items); item++) {
2576 if (js_if)
2577 json_object_int_add(js_if, item->js_key, *item->val);
2578 else
2579 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2580 *item->val);
2581 }
2582}
2583
2584static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2585 const char *ifname, json_object *js)
2586{
2587 struct interface *ifp;
2588 json_object *js_vrf;
2589
2590 if (js) {
2591 js_vrf = json_object_new_object();
2592 json_object_object_add(js, vrf->name, js_vrf);
2593 }
2594
2595 FOR_ALL_INTERFACES (vrf, ifp) {
2596 struct pim_interface *pim_ifp;
2597 struct gm_if *gm_ifp;
2598 json_object *js_if = NULL;
2599
2600 if (ifname && strcmp(ifp->name, ifname))
2601 continue;
2602
2603 if (!ifp->info)
2604 continue;
2605 pim_ifp = ifp->info;
2606 if (!pim_ifp->mld)
2607 continue;
2608 gm_ifp = pim_ifp->mld;
2609
2610 if (js) {
2611 js_if = json_object_new_object();
2612 json_object_object_add(js_vrf, ifp->name, js_if);
2613 } else {
2614 vty_out(vty, "Interface: %s\n", ifp->name);
2615 }
2616 gm_show_stats_one(vty, gm_ifp, js_if);
2617 if (!js)
2618 vty_out(vty, "\n");
2619 }
2620}
2621
2622DEFPY(gm_show_interface_stats,
2623 gm_show_interface_stats_cmd,
2624 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2625 SHOW_STR
2626 IPV6_STR
2627 MLD_STR
2628 VRF_FULL_CMD_HELP_STR
2629 "MLD statistics\n"
2630 INTERFACE_STR
2631 "Interface name\n"
2632 JSON_STR)
2633{
2634 int ret = CMD_SUCCESS;
2635 struct vrf *vrf;
2636 json_object *js = NULL;
2637
2638 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2639 if (ret != CMD_SUCCESS)
2640 return ret;
2641
2642 if (json)
2643 js = json_object_new_object();
2644
2645 if (vrf)
2646 gm_show_stats_vrf(vty, vrf, ifname, js);
2647 else
2648 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2649 gm_show_stats_vrf(vty, vrf, ifname, js);
2650 return vty_json(vty, js);
2651}
2652
2653static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2654 const struct prefix_ipv6 *groups,
2655 const struct prefix_ipv6 *sources, bool detail,
2656 json_object *js_if)
2657{
2658 struct gm_sg *sg, *sg_start;
2659 json_object *js_group = NULL;
2660 pim_addr js_grpaddr = PIMADDR_ANY;
2661 struct gm_subscriber sub_ref = {}, *sub_untracked;
2662
2663 if (groups) {
2664 struct gm_sg sg_ref = {};
2665
2666 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2667 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2668 } else
2669 sg_start = gm_sgs_first(gm_ifp->sgs);
2670
2671 sub_ref.addr = gm_dummy_untracked;
2672 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2673 /* NB: sub_untracked may be NULL if no untracked joins exist */
2674
2675 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2676 struct timeval *recent = NULL, *untracked = NULL;
2677 json_object *js_src;
2678
2679 if (groups) {
2680 struct prefix grp_p;
2681
2682 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2683 if (!prefix_match(groups, &grp_p))
2684 break;
2685 }
2686
2687 if (sources) {
2688 struct prefix src_p;
2689
2690 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2691 if (!prefix_match(sources, &src_p))
2692 continue;
2693 }
2694
2695 if (sg->most_recent) {
2696 struct gm_packet_state *packet;
2697
2698 packet = gm_packet_sg2state(sg->most_recent);
2699 recent = &packet->received;
2700 }
2701
2702 if (sub_untracked) {
2703 struct gm_packet_state *packet;
2704 struct gm_packet_sg *item;
2705
2706 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2707 if (item) {
2708 packet = gm_packet_sg2state(item);
2709 untracked = &packet->received;
2710 }
2711 }
2712
2713 if (!js_if) {
2714 FMT_NSTD_BEGIN; /* %.0p */
2715 vty_out(vty,
2716 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2717 &sg->sgaddr.grp, &sg->sgaddr.src,
2718 gm_states[sg->state], recent, untracked,
2719 &sg->created);
2720
2721 if (!detail)
2722 continue;
2723
2724 struct gm_packet_sg *item;
2725 struct gm_packet_state *packet;
2726
2727 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2728 packet = gm_packet_sg2state(item);
2729
2730 if (packet->subscriber == sub_untracked)
2731 continue;
2732 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2733 &packet->subscriber->addr, "(JOIN)",
2734 &packet->received);
2735 }
2736 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2737 packet = gm_packet_sg2state(item);
2738
2739 if (packet->subscriber == sub_untracked)
2740 continue;
2741 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2742 &packet->subscriber->addr, "(PRUNE)",
2743 &packet->received);
2744 }
2745 FMT_NSTD_END; /* %.0p */
2746 continue;
2747 }
2748 /* if (js_if) */
2749
2750 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2751 js_group = json_object_new_object();
2752 json_object_object_addf(js_if, js_group, "%pPA",
2753 &sg->sgaddr.grp);
2754 js_grpaddr = sg->sgaddr.grp;
2755 }
2756
2757 js_src = json_object_new_object();
58971e15 2758 json_object_object_addf(js_group, js_src, "%pPAs",
d2951219
DL
2759 &sg->sgaddr.src);
2760
2761 json_object_string_add(js_src, "state", gm_states[sg->state]);
2762 json_object_string_addf(js_src, "created", "%pTVMs",
2763 &sg->created);
2764 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2765
2766 if (untracked)
2767 json_object_string_addf(js_src, "untrackedLastSeen",
2768 "%pTVMs", untracked);
2769 if (!detail)
2770 continue;
2771
2772 json_object *js_subs;
2773 struct gm_packet_sg *item;
2774 struct gm_packet_state *packet;
2775
2776 js_subs = json_object_new_object();
2777 json_object_object_add(js_src, "joinedBy", js_subs);
2778 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2779 packet = gm_packet_sg2state(item);
2780 if (packet->subscriber == sub_untracked)
2781 continue;
2782
2783 json_object *js_sub;
2784
2785 js_sub = json_object_new_object();
2786 json_object_object_addf(js_subs, js_sub, "%pPA",
2787 &packet->subscriber->addr);
2788 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2789 &packet->received);
2790 }
2791
2792 js_subs = json_object_new_object();
2793 json_object_object_add(js_src, "prunedBy", js_subs);
2794 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2795 packet = gm_packet_sg2state(item);
2796 if (packet->subscriber == sub_untracked)
2797 continue;
2798
2799 json_object *js_sub;
2800
2801 js_sub = json_object_new_object();
2802 json_object_object_addf(js_subs, js_sub, "%pPA",
2803 &packet->subscriber->addr);
2804 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2805 &packet->received);
2806 }
2807 }
2808}
2809
2810static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2811 const char *ifname,
2812 const struct prefix_ipv6 *groups,
2813 const struct prefix_ipv6 *sources, bool detail,
2814 json_object *js)
2815{
2816 struct interface *ifp;
2817 json_object *js_vrf;
2818
2819 if (js) {
2820 js_vrf = json_object_new_object();
58971e15 2821 json_object_string_add(js_vrf, "vrf", vrf->name);
d2951219
DL
2822 json_object_object_add(js, vrf->name, js_vrf);
2823 }
2824
2825 FOR_ALL_INTERFACES (vrf, ifp) {
2826 struct pim_interface *pim_ifp;
2827 struct gm_if *gm_ifp;
2828 json_object *js_if = NULL;
2829
2830 if (ifname && strcmp(ifp->name, ifname))
2831 continue;
2832
2833 if (!ifp->info)
2834 continue;
2835 pim_ifp = ifp->info;
2836 if (!pim_ifp->mld)
2837 continue;
2838 gm_ifp = pim_ifp->mld;
2839
2840 if (js) {
2841 js_if = json_object_new_object();
2842 json_object_object_add(js_vrf, ifp->name, js_if);
2843 }
2844
2845 if (!js && !ifname)
2846 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2847
2848 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2849 }
2850}
2851
2852DEFPY(gm_show_interface_joins,
2853 gm_show_interface_joins_cmd,
2854 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2855 SHOW_STR
2856 IPV6_STR
2857 MLD_STR
2858 VRF_FULL_CMD_HELP_STR
2859 "MLD joined groups & sources\n"
2860 INTERFACE_STR
2861 "Interface name\n"
2862 "Limit output to group range\n"
2863 "Show groups covered by this prefix\n"
2864 "Limit output to source range\n"
2865 "Show sources covered by this prefix\n"
2866 "Show details, including tracked receivers\n"
2867 JSON_STR)
2868{
2869 int ret = CMD_SUCCESS;
2870 struct vrf *vrf;
2871 json_object *js = NULL;
2872
2873 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2874 if (ret != CMD_SUCCESS)
2875 return ret;
2876
2877 if (json)
2878 js = json_object_new_object();
2879 else
2880 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2881 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2882
2883 if (vrf)
2884 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2885 js);
2886 else
2887 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2888 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2889 !!detail, js);
2890 return vty_json(vty, js);
2891}
2892
cdc1b770
SG
2893static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
2894{
2895 struct interface *ifp;
2896 struct ttable *tt = NULL;
2897 char *table;
2898 json_object *json = NULL;
2899 json_object *json_iface = NULL;
2900 json_object *json_group = NULL;
2901 json_object *json_groups = NULL;
2902 struct pim_instance *pim = vrf->info;
2903
2904 if (uj) {
2905 json = json_object_new_object();
2906 json_object_int_add(json, "totalGroups", pim->gm_group_count);
2907 json_object_int_add(json, "watermarkLimit",
2908 pim->gm_watermark_limit);
2909 } else {
2910 /* Prepare table. */
2911 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2912 ttable_add_row(tt, "Interface|Group|Version|Uptime");
2913 tt->style.cell.rpad = 2;
2914 tt->style.corner = '+';
2915 ttable_restyle(tt);
2916
2917 vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
2918 vty_out(vty, "Watermark warn limit(%s): %u\n",
2919 pim->gm_watermark_limit ? "Set" : "Not Set",
2920 pim->gm_watermark_limit);
2921 }
2922
2923 /* scan interfaces */
2924 FOR_ALL_INTERFACES (vrf, ifp) {
2925
2926 struct pim_interface *pim_ifp = ifp->info;
2927 struct gm_if *gm_ifp;
2928 struct gm_sg *sg;
2929
2930 if (!pim_ifp)
2931 continue;
2932
2933 gm_ifp = pim_ifp->mld;
2934 if (!gm_ifp)
2935 continue;
2936
2937 /* scan mld groups */
2938 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2939
2940 if (uj) {
2941 json_object_object_get_ex(json, ifp->name,
2942 &json_iface);
2943
2944 if (!json_iface) {
2945 json_iface = json_object_new_object();
2946 json_object_pim_ifp_add(json_iface,
2947 ifp);
2948 json_object_object_add(json, ifp->name,
2949 json_iface);
2950 json_groups = json_object_new_array();
2951 json_object_object_add(json_iface,
2952 "groups",
2953 json_groups);
2954 }
2955
2956 json_group = json_object_new_object();
2957 json_object_string_addf(json_group, "group",
2958 "%pPAs",
2959 &sg->sgaddr.grp);
2960
2961 json_object_int_add(json_group, "version",
2962 pim_ifp->mld_version);
2963 json_object_string_addf(json_group, "uptime",
2964 "%pTVMs", &sg->created);
2965 json_object_array_add(json_groups, json_group);
2966 } else {
2967 ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
2968 ifp->name, &sg->sgaddr.grp,
2969 pim_ifp->mld_version,
2970 &sg->created);
2971 }
2972 } /* scan gm groups */
2973 } /* scan interfaces */
2974
2975 if (uj)
2976 vty_json(vty, json);
2977 else {
2978 /* Dump the generated table. */
2979 table = ttable_dump(tt, "\n");
2980 vty_out(vty, "%s\n", table);
2981 XFREE(MTYPE_TMP, table);
2982 ttable_del(tt);
2983 }
2984}
2985
2986DEFPY(gm_show_mld_groups,
2987 gm_show_mld_groups_cmd,
2988 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2989 SHOW_STR
2990 IPV6_STR
2991 MLD_STR
2992 VRF_FULL_CMD_HELP_STR
2993 MLD_GROUP_STR
2994 JSON_STR)
2995{
2996 int ret = CMD_SUCCESS;
2997 struct vrf *vrf;
2998
2999 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
3000 if (ret != CMD_SUCCESS)
3001 return ret;
3002
3003 if (vrf)
3004 gm_show_groups(vty, vrf, !!json);
3005 else
3006 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
3007 gm_show_groups(vty, vrf, !!json);
3008
3009 return CMD_SUCCESS;
3010}
3011
5e5034b0
DL
3012DEFPY(gm_debug_show,
3013 gm_debug_show_cmd,
3014 "debug show mld interface IFNAME",
3015 DEBUG_STR
3016 SHOW_STR
a0dfca37 3017 MLD_STR
5e5034b0 3018 INTERFACE_STR
a0dfca37 3019 "interface name\n")
5e5034b0
DL
3020{
3021 struct interface *ifp;
3022 struct pim_interface *pim_ifp;
3023 struct gm_if *gm_ifp;
3024
3025 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
3026 if (!ifp) {
3027 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
3028 return CMD_WARNING;
3029 }
3030
3031 pim_ifp = ifp->info;
3032 if (!pim_ifp) {
3033 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
3034 return CMD_WARNING;
3035 }
3036
3037 gm_ifp = pim_ifp->mld;
3038 if (!gm_ifp) {
3039 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
3040 return CMD_WARNING;
3041 }
3042
3043 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
3044 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
3045 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
3046 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
5e5034b0
DL
3047 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
3048
3049 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
3050 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
3051 int64_t query, expiry;
3052
3053 query = monotime_since(&gm_ifp->pending[i].query, NULL);
3054 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
3055
3056 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
3057 i, query / 1000, expiry / 1000);
3058 }
3059
3060 struct gm_sg *sg;
3061 struct gm_packet_state *pkt;
3062 struct gm_packet_sg *item;
3063 struct gm_subscriber *subscriber;
3064
3065 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
3066 frr_each (gm_sgs, gm_ifp->sgs, sg) {
3067 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
3068 sg->t_sg_expire);
3069
3070 vty_out(vty, "\t @pos:%zu\n",
3071 gm_packet_sg_subs_count(sg->subs_positive));
3072 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
3073 pkt = gm_packet_sg2state(item);
3074
3075 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3076 item->is_src ? "S" : "",
3077 item->is_excl ? "E" : "",
3078 &pkt->subscriber->addr, pkt->subscriber, pkt,
3079 item->offset);
3080
3081 assert(item->sg == sg);
3082 }
3083 vty_out(vty, "\t @neg:%zu\n",
3084 gm_packet_sg_subs_count(sg->subs_negative));
3085 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
3086 pkt = gm_packet_sg2state(item);
3087
3088 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3089 item->is_src ? "S" : "",
3090 item->is_excl ? "E" : "",
3091 &pkt->subscriber->addr, pkt->subscriber, pkt,
3092 item->offset);
3093
3094 assert(item->sg == sg);
3095 }
3096 }
3097
3098 vty_out(vty, "\n%zu subscribers:\n",
3099 gm_subscribers_count(gm_ifp->subscribers));
3100 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
3101 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
3102 subscriber, gm_packets_count(subscriber->packets));
3103
3104 frr_each (gm_packets, subscriber->packets, pkt) {
3105 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
3106 pkt,
3107 monotime_since(&pkt->received, NULL) *
3108 0.000001f,
3109 pkt->n_active, pkt->n_sg);
3110
3111 for (size_t i = 0; i < pkt->n_sg; i++) {
3112 item = pkt->items + i;
3113
3114 vty_out(vty, "\t\t[%zu]", i);
3115
3116 if (!item->sg) {
3117 vty_out(vty, " inactive\n");
3118 continue;
3119 }
3120
3121 vty_out(vty, " %s%s %pSG nE=%u\n",
3122 item->is_src ? "S" : "",
3123 item->is_excl ? "E" : "",
3124 &item->sg->sgaddr, item->n_exclude);
3125 }
3126 }
3127 }
3128
3129 return CMD_SUCCESS;
3130}
3131
3132DEFPY(gm_debug_iface_cfg,
3133 gm_debug_iface_cfg_cmd,
3134 "debug ipv6 mld {"
3135 "robustness (0-7)|"
3136 "query-max-response-time (1-8387584)"
3137 "}",
3138 DEBUG_STR
3139 IPV6_STR
3140 "Multicast Listener Discovery\n"
3141 "QRV\nQRV\n"
3142 "maxresp\nmaxresp\n")
3143{
3144 VTY_DECLVAR_CONTEXT(interface, ifp);
3145 struct pim_interface *pim_ifp;
3146 struct gm_if *gm_ifp;
3147 bool changed = false;
3148
3149 pim_ifp = ifp->info;
3150 if (!pim_ifp) {
3151 vty_out(vty, "%% no PIM state for interface %pSQq\n",
3152 ifp->name);
3153 return CMD_WARNING;
3154 }
3155 gm_ifp = pim_ifp->mld;
3156 if (!gm_ifp) {
3157 vty_out(vty, "%% no MLD state for interface %pSQq\n",
3158 ifp->name);
3159 return CMD_WARNING;
3160 }
3161
3162 if (robustness_str && gm_ifp->cur_qrv != robustness) {
3163 gm_ifp->cur_qrv = robustness;
3164 changed = true;
3165 }
3166 if (query_max_response_time_str &&
3167 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
3168 gm_ifp->cur_max_resp = query_max_response_time;
3169 changed = true;
3170 }
3171
3172 if (changed) {
3173 vty_out(vty, "%% MLD querier config changed, bumping\n");
3174 gm_bump_querier(gm_ifp);
3175 }
3176 return CMD_SUCCESS;
3177}
3178
3179void gm_cli_init(void);
3180
3181void gm_cli_init(void)
3182{
d2951219
DL
3183 install_element(VIEW_NODE, &gm_show_interface_cmd);
3184 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3185 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
cdc1b770 3186 install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
d2951219 3187
5e5034b0
DL
3188 install_element(VIEW_NODE, &gm_debug_show_cmd);
3189 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3190}