]> git.proxmox.com Git - mirror_frr.git/blame - pimd/pim6_mld.c
*: Rename `struct thread` to `struct event`
[mirror_frr.git] / pimd / pim6_mld.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: GPL-2.0-or-later
5e5034b0
DL
2/*
3 * PIMv6 MLD querier
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
5e5034b0
DL
5 */
6
7/*
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
10 *
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
13 */
14
15#include <zebra.h>
16#include <netinet/ip6.h>
17
18#include "lib/memory.h"
19#include "lib/jhash.h"
20#include "lib/prefix.h"
21#include "lib/checksum.h"
cb37cb33 22#include "lib/event.h"
cdc1b770 23#include "termtable.h"
5e5034b0
DL
24
25#include "pimd/pim6_mld.h"
26#include "pimd/pim6_mld_protocol.h"
27#include "pimd/pim_memory.h"
28#include "pimd/pim_instance.h"
29#include "pimd/pim_iface.h"
cdc1b770
SG
30#include "pimd/pim6_cmd.h"
31#include "pimd/pim_cmd_common.h"
5e5034b0
DL
32#include "pimd/pim_util.h"
33#include "pimd/pim_tib.h"
34#include "pimd/pimd.h"
35
36#ifndef IPV6_MULTICAST_ALL
37#define IPV6_MULTICAST_ALL 29
38#endif
39
40DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
41DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
42DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
43DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
44DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
45DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
46DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
47
e6685141 48static void gm_t_query(struct event *t);
5e5034b0
DL
49static void gm_trigger_specific(struct gm_sg *sg);
50static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
51 struct timeval expire_wait);
52
53/* shorthand for log messages */
54#define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56#define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
58 &pkt_src->sin6_addr
59#define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
62
63/* clang-format off */
64#if PIM_IPV == 6
65static const pim_addr gm_all_hosts = {
66 .s6_addr = {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
69 },
70};
71static const pim_addr gm_all_routers = {
72 .s6_addr = {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
75 },
76};
77/* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
79 */
80static const pim_addr gm_dummy_untracked = {
81 .s6_addr = {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
84 },
85};
86#else
87/* 224.0.0.1 */
88static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
89/* 224.0.0.22 */
90static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
91static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
92#endif
93/* clang-format on */
94
95#define IPV6_MULTICAST_SCOPE_LINK 2
96
97static inline uint8_t in6_multicast_scope(const pim_addr *addr)
98{
99 return addr->s6_addr[1] & 0xf;
100}
101
102static inline bool in6_multicast_nofwd(const pim_addr *addr)
103{
104 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
105}
106
107/*
108 * (S,G) -> subscriber,(S,G)
109 */
110
111static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
112 const struct gm_packet_sg *b)
113{
114 const struct gm_packet_state *s_a, *s_b;
115
116 s_a = gm_packet_sg2state(a);
117 s_b = gm_packet_sg2state(b);
118 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
119}
120
121DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
122 gm_packet_sg_cmp);
123
124static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
125 enum gm_sub_sense sense,
126 struct gm_subscriber *sub)
127{
128 struct {
129 struct gm_packet_state hdr;
130 struct gm_packet_sg item;
131 } ref = {
132 /* clang-format off */
133 .hdr = {
134 .subscriber = sub,
135 },
136 .item = {
137 .offset = 0,
138 },
139 /* clang-format on */
140 };
141
142 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
143}
144
145/*
146 * interface -> (*,G),pending
147 */
148
149static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
150 const struct gm_grp_pending *b)
151{
152 return IPV6_ADDR_CMP(&a->grp, &b->grp);
153}
154
155DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
156 gm_grp_pending_cmp);
157
158/*
159 * interface -> ([S1,S2,...],G),pending
160 */
161
162static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
163 const struct gm_gsq_pending *b)
164{
165 if (a->s_bit != b->s_bit)
166 return numcmp(a->s_bit, b->s_bit);
167
168 return IPV6_ADDR_CMP(&a->grp, &b->grp);
169}
170
171static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
172{
173 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
174
175 return jhash(&a->grp, sizeof(a->grp), seed);
176}
177
178DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
179 gm_gsq_pending_hash);
180
181/*
182 * interface -> (S,G)
183 */
184
185static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
186{
187 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
188}
189
190DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
191
192static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
193 pim_addr src)
194{
195 struct gm_sg ref = {};
196
197 ref.sgaddr.grp = grp;
198 ref.sgaddr.src = src;
199 return gm_sgs_find(gm_ifp->sgs, &ref);
200}
201
202static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
203 pim_addr src)
204{
205 struct gm_sg *ret, *prev;
206
207 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
208 ret->sgaddr.grp = grp;
209 ret->sgaddr.src = src;
210 ret->iface = gm_ifp;
211 prev = gm_sgs_add(gm_ifp->sgs, ret);
212
213 if (prev) {
214 XFREE(MTYPE_GM_SG, ret);
215 ret = prev;
216 } else {
aa2f9349 217 monotime(&ret->created);
5e5034b0
DL
218 gm_packet_sg_subs_init(ret->subs_positive);
219 gm_packet_sg_subs_init(ret->subs_negative);
220 }
221 return ret;
222}
223
224/*
225 * interface -> packets, sorted by expiry (because add_tail insert order)
226 */
227
228DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
229
230/*
231 * subscriber -> packets
232 */
233
234DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
235
236/*
237 * interface -> subscriber
238 */
239
240static int gm_subscriber_cmp(const struct gm_subscriber *a,
241 const struct gm_subscriber *b)
242{
243 return IPV6_ADDR_CMP(&a->addr, &b->addr);
244}
245
246static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
247{
248 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
249}
250
251DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
252 gm_subscriber_hash);
253
254static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
255 pim_addr addr)
256{
257 struct gm_subscriber ref = {}, *ret;
258
259 ref.addr = addr;
260 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
261 if (ret)
262 ret->refcount++;
263 return ret;
264}
265
266static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
267 pim_addr addr)
268{
269 struct gm_subscriber ref = {}, *ret;
270
271 ref.addr = addr;
272 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
273
274 if (!ret) {
275 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
276 ret->iface = gm_ifp;
277 ret->addr = addr;
278 ret->refcount = 1;
aa2f9349 279 monotime(&ret->created);
5e5034b0
DL
280 gm_packets_init(ret->packets);
281
282 gm_subscribers_add(gm_ifp->subscribers, ret);
283 }
284 return ret;
285}
286
287static void gm_subscriber_drop(struct gm_subscriber **subp)
288{
289 struct gm_subscriber *sub = *subp;
290 struct gm_if *gm_ifp;
291
292 if (!sub)
293 return;
294 gm_ifp = sub->iface;
295
296 *subp = NULL;
297 sub->refcount--;
298
299 if (sub->refcount)
300 return;
301
302 gm_subscribers_del(gm_ifp->subscribers, sub);
303 XFREE(MTYPE_GM_SUBSCRIBER, sub);
304}
305
306/****************************************************************************/
307
308/* bundle query timer values for combined v1/v2 handling */
309struct gm_query_timers {
310 unsigned int qrv;
311 unsigned int max_resp_ms;
312 unsigned int qqic_ms;
313
314 struct timeval fuzz;
315 struct timeval expire_wait;
316};
317
318static void gm_expiry_calc(struct gm_query_timers *timers)
319{
320 unsigned int expire =
321 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
322 ldiv_t exp_div = ldiv(expire, 1000);
323
324 timers->expire_wait.tv_sec = exp_div.quot;
325 timers->expire_wait.tv_usec = exp_div.rem * 1000;
326 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
327}
328
329static void gm_sg_free(struct gm_sg *sg)
330{
331 /* t_sg_expiry is handled before this is reached */
332 THREAD_OFF(sg->t_sg_query);
333 gm_packet_sg_subs_fini(sg->subs_negative);
334 gm_packet_sg_subs_fini(sg->subs_positive);
335 XFREE(MTYPE_GM_SG, sg);
336}
337
338/* clang-format off */
339static const char *const gm_states[] = {
340 [GM_SG_NOINFO] = "NOINFO",
341 [GM_SG_JOIN] = "JOIN",
342 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
343 [GM_SG_PRUNE] = "PRUNE",
344 [GM_SG_NOPRUNE] = "NOPRUNE",
345 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
346};
347/* clang-format on */
348
349CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
350/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
351 * joined. Whether we actually want/need to support this is a separate
352 * question - it is almost never used. In fact this is exactly what RFC5790
353 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
354 */
355
356static void gm_sg_update(struct gm_sg *sg, bool has_expired)
357{
358 struct gm_if *gm_ifp = sg->iface;
359 enum gm_sg_state prev, desired;
360 bool new_join;
361 struct gm_sg *grp = NULL;
362
363 if (!pim_addr_is_any(sg->sgaddr.src))
364 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
365 else
366 assert(sg->state != GM_SG_PRUNE);
367
368 if (gm_packet_sg_subs_count(sg->subs_positive)) {
369 desired = GM_SG_JOIN;
370 assert(!sg->t_sg_expire);
371 } else if ((sg->state == GM_SG_JOIN ||
372 sg->state == GM_SG_JOIN_EXPIRING) &&
373 !has_expired)
374 desired = GM_SG_JOIN_EXPIRING;
375 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
376 desired = GM_SG_NOINFO;
377 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
378 gm_packet_sg_subs_count(sg->subs_negative)) {
379 if ((sg->state == GM_SG_NOPRUNE ||
380 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
381 !has_expired)
382 desired = GM_SG_NOPRUNE_EXPIRING;
383 else
384 desired = GM_SG_PRUNE;
385 } else if (gm_packet_sg_subs_count(sg->subs_negative))
386 desired = GM_SG_NOPRUNE;
387 else
388 desired = GM_SG_NOINFO;
389
390 if (desired != sg->state && !gm_ifp->stopping) {
95b13dc5 391 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
392 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
393 gm_states[desired]);
394
395 if (desired == GM_SG_JOIN_EXPIRING ||
396 desired == GM_SG_NOPRUNE_EXPIRING) {
397 struct gm_query_timers timers;
398
399 timers.qrv = gm_ifp->cur_qrv;
400 timers.max_resp_ms = gm_ifp->cur_max_resp;
401 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
402 timers.fuzz = gm_ifp->cfg_timing_fuzz;
403
404 gm_expiry_calc(&timers);
405 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
406
407 THREAD_OFF(sg->t_sg_query);
51b4991f 408 sg->n_query = gm_ifp->cur_lmqc;
5e5034b0
DL
409 sg->query_sbit = false;
410 gm_trigger_specific(sg);
411 }
412 }
413 prev = sg->state;
414 sg->state = desired;
415
416 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
417 new_join = false;
418 else
419 new_join = gm_sg_state_want_join(desired);
420
421 if (new_join && !sg->tib_joined) {
422 /* this will retry if join previously failed */
423 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
424 gm_ifp->ifp, &sg->oil);
425 if (!sg->tib_joined)
426 zlog_warn(
427 "MLD join for %pSG%%%s not propagated into TIB",
428 &sg->sgaddr, gm_ifp->ifp->name);
429 else
430 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
431 gm_ifp->ifp->name);
432
433 } else if (sg->tib_joined && !new_join) {
434 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
435
436 sg->oil = NULL;
437 sg->tib_joined = false;
438 }
439
440 if (desired == GM_SG_NOINFO) {
f4ac0a1c
DL
441 /* multiple paths can lead to the last state going away;
442 * t_sg_expire can still be running if we're arriving from
443 * another path.
444 */
445 if (has_expired)
446 THREAD_OFF(sg->t_sg_expire);
447
5e5034b0
DL
448 assertf((!sg->t_sg_expire &&
449 !gm_packet_sg_subs_count(sg->subs_positive) &&
450 !gm_packet_sg_subs_count(sg->subs_negative)),
451 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
452 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
453 sg->t_sg_expire, gm_states[prev], gm_states[desired],
454 gm_packet_sg_subs_count(sg->subs_positive),
455 gm_packet_sg_subs_count(sg->subs_negative), grp);
456
a96d64b0 457 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
458 zlog_debug(log_sg(sg, "dropping"));
459
460 gm_sgs_del(gm_ifp->sgs, sg);
461 gm_sg_free(sg);
462 }
463}
464
465/****************************************************************************/
466
467/* the following bunch of functions deals with transferring state from
468 * received packets into gm_packet_state. As a reminder, the querier is
469 * structured to keep all items received in one packet together, since they
470 * will share expiry timers and thus allows efficient handling.
471 */
472
473static void gm_packet_free(struct gm_packet_state *pkt)
474{
475 gm_packet_expires_del(pkt->iface->expires, pkt);
476 gm_packets_del(pkt->subscriber->packets, pkt);
477 gm_subscriber_drop(&pkt->subscriber);
478 XFREE(MTYPE_GM_STATE, pkt);
479}
480
481static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
482 struct gm_sg *sg, bool is_excl,
483 bool is_src)
484{
485 struct gm_packet_sg *item;
486
487 assert(pkt->n_active < pkt->n_sg);
488
489 item = &pkt->items[pkt->n_active];
490 item->sg = sg;
491 item->is_excl = is_excl;
492 item->is_src = is_src;
493 item->offset = pkt->n_active;
494
495 pkt->n_active++;
496 return item;
497}
498
499static bool gm_packet_sg_drop(struct gm_packet_sg *item)
500{
501 struct gm_packet_state *pkt;
502 size_t i;
503
504 assert(item->sg);
505
506 pkt = gm_packet_sg2state(item);
507 if (item->sg->most_recent == item)
508 item->sg->most_recent = NULL;
509
510 for (i = 0; i < item->n_exclude; i++) {
511 struct gm_packet_sg *excl_item;
512
513 excl_item = item + 1 + i;
514 if (!excl_item->sg)
515 continue;
516
517 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
518 excl_item->sg = NULL;
519 pkt->n_active--;
520
521 assert(pkt->n_active > 0);
522 }
523
524 if (item->is_excl && item->is_src)
525 gm_packet_sg_subs_del(item->sg->subs_negative, item);
526 else
527 gm_packet_sg_subs_del(item->sg->subs_positive, item);
528 item->sg = NULL;
529 pkt->n_active--;
530
531 if (!pkt->n_active) {
532 gm_packet_free(pkt);
533 return true;
534 }
535 return false;
536}
537
538static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
539{
540 for (size_t i = 0; i < pkt->n_sg; i++) {
541 struct gm_sg *sg = pkt->items[i].sg;
542 bool deleted;
543
544 if (!sg)
545 continue;
546
a96d64b0 547 if (trace && PIM_DEBUG_GM_TRACE)
5e5034b0
DL
548 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
549 &pkt->subscriber->addr);
550 deleted = gm_packet_sg_drop(&pkt->items[i]);
551
552 gm_sg_update(sg, true);
553 if (deleted)
554 break;
555 }
556}
557
558static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
559 struct gm_subscriber *subscriber,
560 pim_addr grp, pim_addr *srcs,
561 size_t n_src, enum gm_sub_sense sense)
562{
563 struct gm_sg *sg;
564 struct gm_packet_sg *old_src;
565 size_t i;
566
567 for (i = 0; i < n_src; i++) {
568 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
569 if (!sg)
570 continue;
571
572 old_src = gm_packet_sg_find(sg, sense, subscriber);
573 if (!old_src)
574 continue;
575
576 gm_packet_sg_drop(old_src);
577 gm_sg_update(sg, false);
578 }
579}
580
581static void gm_sg_expiry_cancel(struct gm_sg *sg)
582{
a96d64b0 583 if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
5e5034b0
DL
584 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
585 THREAD_OFF(sg->t_sg_expire);
586 sg->query_sbit = true;
587}
588
589/* first pass: process all changes resulting in removal of state:
590 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
591 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
592 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
593 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
594 * note *replacing* state is NOT considered *removing* state here
595 *
596 * everything else is thrown into pkt for creation of state in pass 2
597 */
598static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
599 struct mld_v2_rec_hdr *rechdr)
600{
601 /* NB: pkt->subscriber can be NULL here if the subscriber was not
602 * previously seen!
603 */
604 struct gm_subscriber *subscriber = pkt->subscriber;
605 struct gm_sg *grp;
606 struct gm_packet_sg *old_grp = NULL;
607 struct gm_packet_sg *item;
608 size_t n_src = ntohs(rechdr->n_src);
609 size_t j;
610 bool is_excl = false;
611
612 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
613 if (grp && subscriber)
614 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
615
616 assert(old_grp == NULL || old_grp->is_excl);
617
618 switch (rechdr->type) {
619 case MLD_RECTYPE_IS_EXCLUDE:
620 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
621 /* this always replaces or creates state */
622 is_excl = true;
623 if (!grp)
624 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
625
626 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
627 item->n_exclude = n_src;
628
629 /* [EXCL_INCL_SG_NOTE] referenced below
630 *
631 * in theory, we should drop any S,G that the host may have
632 * previously added in INCLUDE mode. In practice, this is both
633 * incredibly rare and entirely irrelevant. It only makes any
634 * difference if an S,G that the host previously had on the
635 * INCLUDE list is now on the blocked list for EXCLUDE, which
636 * we can cover in processing the S,G list in pass2_excl().
637 *
638 * Other S,G from the host are simply left to expire
639 * "naturally" through general expiry.
640 */
641 break;
642
643 case MLD_RECTYPE_IS_INCLUDE:
644 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
645 if (old_grp) {
646 /* INCLUDE has no *,G state, so old_grp here refers to
647 * previous EXCLUDE => delete it
648 */
649 gm_packet_sg_drop(old_grp);
650 gm_sg_update(grp, false);
651 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
652 }
653 break;
654
655 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
656 if (old_grp) {
657 /* remove S,Gs from EXCLUDE, and then we're done */
658 gm_packet_sg_remove_sources(pkt->iface, subscriber,
659 rechdr->grp, rechdr->srcs,
660 n_src, GM_SUB_NEG);
661 return;
662 }
663 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
664 * idential to IS_INCLUDE (because the list of sources in
665 * IS_INCLUDE is not exhaustive)
666 */
667 break;
668
669 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
670 if (old_grp) {
671 /* this is intentionally not implemented because it
672 * would be complicated as hell. we only take the list
673 * of blocked sources from full group state records
674 */
675 return;
676 }
677
678 if (subscriber)
679 gm_packet_sg_remove_sources(pkt->iface, subscriber,
680 rechdr->grp, rechdr->srcs,
681 n_src, GM_SUB_POS);
682 return;
683 }
684
685 for (j = 0; j < n_src; j++) {
686 struct gm_sg *sg;
687
688 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
689 if (!sg)
690 sg = gm_sg_make(pkt->iface, rechdr->grp,
691 rechdr->srcs[j]);
692
693 gm_packet_sg_setup(pkt, sg, is_excl, true);
694 }
695}
696
697/* second pass: creating/updating/refreshing state. All the items from the
698 * received packet have already been thrown into gm_packet_state.
699 */
700
701static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
702{
703 struct gm_packet_sg *item = &pkt->items[i];
704 struct gm_packet_sg *old = NULL;
705 struct gm_sg *sg = item->sg;
706
707 /* EXCLUDE state was already dropped in pass1 */
708 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
709
710 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
711 if (old)
712 gm_packet_sg_drop(old);
713
714 pkt->n_active++;
715 gm_packet_sg_subs_add(sg->subs_positive, item);
716
717 sg->most_recent = item;
718 gm_sg_expiry_cancel(sg);
719 gm_sg_update(sg, false);
720}
721
722static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
723{
724 struct gm_packet_sg *item = &pkt->items[offs];
725 struct gm_packet_sg *old_grp, *item_dup;
726 struct gm_sg *sg_grp = item->sg;
727 size_t i;
728
729 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
730 if (old_grp) {
731 for (i = 0; i < item->n_exclude; i++) {
732 struct gm_packet_sg *item_src, *old_src;
733
734 item_src = &pkt->items[offs + 1 + i];
735 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
736 pkt->subscriber);
737 if (old_src)
738 gm_packet_sg_drop(old_src);
739
740 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
741 * items left over if the host previously had INCLUDE
742 * mode going. Remove them here if we find any.
743 */
744 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
745 pkt->subscriber);
746 if (old_src)
747 gm_packet_sg_drop(old_src);
748 }
749
750 /* the previous loop has removed the S,G entries which are
751 * still excluded after this update. So anything left on the
752 * old item was previously excluded but is now included
753 * => need to trigger update on S,G
754 */
755 for (i = 0; i < old_grp->n_exclude; i++) {
756 struct gm_packet_sg *old_src;
757 struct gm_sg *old_sg_src;
758
759 old_src = old_grp + 1 + i;
760 old_sg_src = old_src->sg;
761 if (!old_sg_src)
762 continue;
763
764 gm_packet_sg_drop(old_src);
765 gm_sg_update(old_sg_src, false);
766 }
767
768 gm_packet_sg_drop(old_grp);
769 }
770
771 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
772 assert(!item_dup);
773 pkt->n_active++;
774
775 sg_grp->most_recent = item;
776 gm_sg_expiry_cancel(sg_grp);
777
778 for (i = 0; i < item->n_exclude; i++) {
779 struct gm_packet_sg *item_src;
780
781 item_src = &pkt->items[offs + 1 + i];
782 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
783 item_src);
784
785 if (item_dup)
786 item_src->sg = NULL;
787 else {
788 pkt->n_active++;
789 gm_sg_update(item_src->sg, false);
790 }
791 }
792
793 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
794 * to get lower PIM churn/flapping
795 */
796 gm_sg_update(sg_grp, false);
797}
798
799CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
800/* on receiving a query, we need to update our robustness/query interval to
801 * match, so we correctly process group/source specific queries after last
802 * member leaves
803 */
804
805static void gm_handle_v2_report(struct gm_if *gm_ifp,
806 const struct sockaddr_in6 *pkt_src, char *data,
807 size_t len)
808{
809 struct mld_v2_report_hdr *hdr;
810 size_t i, n_records, max_entries;
811 struct gm_packet_state *pkt;
812
813 if (len < sizeof(*hdr)) {
55eb347d 814 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
815 zlog_debug(log_pkt_src(
816 "malformed MLDv2 report (truncated header)"));
aa2f9349 817 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
818 return;
819 }
820
aa2f9349
DL
821 /* errors after this may at least partially process the packet */
822 gm_ifp->stats.rx_new_report++;
823
5e5034b0
DL
824 hdr = (struct mld_v2_report_hdr *)data;
825 data += sizeof(*hdr);
826 len -= sizeof(*hdr);
827
828 /* can't have more *,G and S,G items than there is space for ipv6
829 * addresses, so just use this to allocate temporary buffer
830 */
831 max_entries = len / sizeof(pim_addr);
832 pkt = XCALLOC(MTYPE_GM_STATE,
833 offsetof(struct gm_packet_state, items[max_entries]));
834 pkt->n_sg = max_entries;
835 pkt->iface = gm_ifp;
836 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
837
838 n_records = ntohs(hdr->n_records);
839
840 /* validate & remove state in v2_pass1() */
841 for (i = 0; i < n_records; i++) {
842 struct mld_v2_rec_hdr *rechdr;
843 size_t n_src, record_size;
844
845 if (len < sizeof(*rechdr)) {
846 zlog_warn(log_pkt_src(
847 "malformed MLDv2 report (truncated record header)"));
aa2f9349 848 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
849 break;
850 }
851
852 rechdr = (struct mld_v2_rec_hdr *)data;
853 data += sizeof(*rechdr);
854 len -= sizeof(*rechdr);
855
856 n_src = ntohs(rechdr->n_src);
857 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
858
859 if (len < record_size) {
860 zlog_warn(log_pkt_src(
861 "malformed MLDv2 report (truncated source list)"));
aa2f9349 862 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
863 break;
864 }
865 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
866 zlog_warn(
867 log_pkt_src(
868 "malformed MLDv2 report (invalid group %pI6)"),
869 &rechdr->grp);
aa2f9349 870 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
871 break;
872 }
873
874 data += record_size;
875 len -= record_size;
876
877 gm_handle_v2_pass1(pkt, rechdr);
878 }
879
880 if (!pkt->n_active) {
881 gm_subscriber_drop(&pkt->subscriber);
882 XFREE(MTYPE_GM_STATE, pkt);
883 return;
884 }
885
886 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
887 offsetof(struct gm_packet_state, items[pkt->n_active]));
888 pkt->n_sg = pkt->n_active;
889 pkt->n_active = 0;
890
891 monotime(&pkt->received);
892 if (!pkt->subscriber)
893 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
894 gm_packets_add_tail(pkt->subscriber->packets, pkt);
895 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
896
897 for (i = 0; i < pkt->n_sg; i++)
898 if (!pkt->items[i].is_excl)
899 gm_handle_v2_pass2_incl(pkt, i);
900 else {
901 gm_handle_v2_pass2_excl(pkt, i);
902 i += pkt->items[i].n_exclude;
903 }
904
905 if (pkt->n_active == 0)
906 gm_packet_free(pkt);
907}
908
909static void gm_handle_v1_report(struct gm_if *gm_ifp,
910 const struct sockaddr_in6 *pkt_src, char *data,
911 size_t len)
912{
913 struct mld_v1_pkt *hdr;
914 struct gm_packet_state *pkt;
915 struct gm_sg *grp;
916 struct gm_packet_sg *item;
917 size_t max_entries;
918
919 if (len < sizeof(*hdr)) {
55eb347d 920 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
921 zlog_debug(log_pkt_src(
922 "malformed MLDv1 report (truncated)"));
aa2f9349 923 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
924 return;
925 }
926
aa2f9349
DL
927 gm_ifp->stats.rx_old_report++;
928
5e5034b0
DL
929 hdr = (struct mld_v1_pkt *)data;
930
931 max_entries = 1;
932 pkt = XCALLOC(MTYPE_GM_STATE,
933 offsetof(struct gm_packet_state, items[max_entries]));
934 pkt->n_sg = max_entries;
935 pkt->iface = gm_ifp;
936 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
937
938 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
939
940 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
941 if (!grp)
942 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
943
944 item = gm_packet_sg_setup(pkt, grp, true, false);
945 item->n_exclude = 0;
946 CPP_NOTICE("set v1-seen timer on grp here");
947
948 /* } */
949
950 /* pass2 will count n_active back up to 1. Also since a v1 report
951 * has exactly 1 group, we can skip the realloc() that v2 needs here.
952 */
953 assert(pkt->n_active == 1);
954 pkt->n_sg = pkt->n_active;
955 pkt->n_active = 0;
956
957 monotime(&pkt->received);
958 if (!pkt->subscriber)
959 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
960 gm_packets_add_tail(pkt->subscriber->packets, pkt);
961 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
962
963 /* pass2 covers installing state & removing old state; all the v1
964 * compat is handled at this point.
965 *
966 * Note that "old state" may be v2; subscribers will switch from v2
967 * reports to v1 reports when the querier changes from v2 to v1. So,
968 * limiting this to v1 would be wrong.
969 */
970 gm_handle_v2_pass2_excl(pkt, 0);
971
972 if (pkt->n_active == 0)
973 gm_packet_free(pkt);
974}
975
976static void gm_handle_v1_leave(struct gm_if *gm_ifp,
977 const struct sockaddr_in6 *pkt_src, char *data,
978 size_t len)
979{
980 struct mld_v1_pkt *hdr;
981 struct gm_subscriber *subscriber;
982 struct gm_sg *grp;
983 struct gm_packet_sg *old_grp;
984
985 if (len < sizeof(*hdr)) {
55eb347d 986 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
987 zlog_debug(log_pkt_src(
988 "malformed MLDv1 leave (truncated)"));
aa2f9349 989 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
990 return;
991 }
992
aa2f9349
DL
993 gm_ifp->stats.rx_old_leave++;
994
5e5034b0
DL
995 hdr = (struct mld_v1_pkt *)data;
996
997 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
998 if (!subscriber)
999 return;
1000
1001 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1002
1003 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
1004 if (grp) {
1005 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
1006 if (old_grp) {
1007 gm_packet_sg_drop(old_grp);
1008 gm_sg_update(grp, false);
1009 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1010 }
1011 }
1012
1013 /* } */
1014
1015 /* nothing more to do here, pass2 is no-op for leaves */
1016 gm_subscriber_drop(&subscriber);
1017}
1018
1019/* for each general query received (or sent), a timer is started to expire
1020 * _everything_ at the appropriate time (including robustness multiplier).
1021 *
1022 * So when this timer hits, all packets - with all of their items - that were
1023 * received *before* the query are aged out, and state updated accordingly.
1024 * Note that when we receive a refresh/update, the previous/old packet is
1025 * already dropped and replaced with a new one, so in normal steady-state
1026 * operation, this timer won't be doing anything.
1027 *
1028 * Additionally, if a subscriber actively leaves a group, that goes through
1029 * its own path too and won't hit this. This is really only triggered when a
1030 * host straight up disappears.
1031 */
e6685141 1032static void gm_t_expire(struct event *t)
5e5034b0
DL
1033{
1034 struct gm_if *gm_ifp = THREAD_ARG(t);
1035 struct gm_packet_state *pkt;
1036
1037 zlog_info(log_ifp("general expiry timer"));
1038
1039 while (gm_ifp->n_pending) {
1040 struct gm_general_pending *pend = gm_ifp->pending;
1041 struct timeval remain;
1042 int64_t remain_ms;
1043
1044 remain_ms = monotime_until(&pend->expiry, &remain);
1045 if (remain_ms > 0) {
95b13dc5 1046 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1047 zlog_debug(
1048 log_ifp("next general expiry in %" PRId64 "ms"),
1049 remain_ms / 1000);
1050
1051 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1052 &remain, &gm_ifp->t_expire);
1053 return;
1054 }
1055
1056 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1057 if (timercmp(&pkt->received, &pend->query, >=))
1058 break;
1059
55eb347d 1060 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1061 zlog_debug(log_ifp("expire packet %p"), pkt);
1062 gm_packet_drop(pkt, true);
1063 }
1064
1065 gm_ifp->n_pending--;
1066 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1067 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1068 }
1069
95b13dc5 1070 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1071 zlog_debug(log_ifp("next general expiry waiting for query"));
1072}
1073
1074/* NB: the receive handlers will also run when sending packets, since we
1075 * receive our own packets back in.
1076 */
1077static void gm_handle_q_general(struct gm_if *gm_ifp,
1078 struct gm_query_timers *timers)
1079{
1080 struct timeval now, expiry;
1081 struct gm_general_pending *pend;
1082
1083 monotime(&now);
1084 timeradd(&now, &timers->expire_wait, &expiry);
1085
1086 while (gm_ifp->n_pending) {
1087 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1088
1089 if (timercmp(&pend->expiry, &expiry, <))
1090 break;
1091
1092 /* if we end up here, the last item in pending[] has an expiry
1093 * later than the expiry for this query. But our query time
1094 * (now) is later than that of the item (because, well, that's
1095 * how time works.) This makes this query meaningless since
1096 * it's "supersetted" within the preexisting query
1097 */
1098
a96d64b0 1099 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1100 zlog_debug(
1101 log_ifp("zapping supersetted general timer %pTVMu"),
1102 &pend->expiry);
1103
1104 gm_ifp->n_pending--;
1105 if (!gm_ifp->n_pending)
1106 THREAD_OFF(gm_ifp->t_expire);
1107 }
1108
1109 /* people might be messing with their configs or something */
1110 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1111 return;
1112
1113 pend = &gm_ifp->pending[gm_ifp->n_pending];
1114 pend->query = now;
1115 pend->expiry = expiry;
1116
1117 if (!gm_ifp->n_pending++) {
a96d64b0 1118 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1119 zlog_debug(
1120 log_ifp("starting general timer @ 0: %pTVMu"),
1121 &pend->expiry);
1122 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1123 &timers->expire_wait, &gm_ifp->t_expire);
a96d64b0 1124 } else if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1125 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1126 gm_ifp->n_pending, &pend->expiry);
1127}
1128
e6685141 1129static void gm_t_sg_expire(struct event *t)
5e5034b0
DL
1130{
1131 struct gm_sg *sg = THREAD_ARG(t);
1132 struct gm_if *gm_ifp = sg->iface;
1133 struct gm_packet_sg *item;
1134
1135 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1136 sg->state == GM_SG_NOPRUNE_EXPIRING,
1137 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1138
1139 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1140 /* this will also drop EXCLUDE mode S,G lists together with
1141 * the *,G entry
1142 */
1143 gm_packet_sg_drop(item);
1144
1145 /* subs_negative items are only timed out together with the *,G entry
1146 * since we won't get any reports for a group-and-source query
1147 */
1148 gm_sg_update(sg, true);
1149}
1150
1151static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1152 struct timeval ref)
1153{
1154 struct gm_packet_state *pkt;
1155
1156 if (!sg->most_recent) {
1157 struct gm_packet_state *best_pkt = NULL;
1158 struct gm_packet_sg *item;
1159
1160 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1161 pkt = gm_packet_sg2state(item);
1162
1163 if (!best_pkt ||
1164 timercmp(&pkt->received, &best_pkt->received, >)) {
1165 best_pkt = pkt;
1166 sg->most_recent = item;
1167 }
1168 }
1169 }
1170 if (sg->most_recent) {
1171 struct timeval fuzz;
1172
1173 pkt = gm_packet_sg2state(sg->most_recent);
1174
1175 /* this shouldn't happen on plain old real ethernet segment,
1176 * but on something like a VXLAN or VPLS it is very possible
1177 * that we get a report before the query that triggered it.
1178 * (imagine a triangle scenario with 3 datacenters, it's very
1179 * possible A->B + B->C is faster than A->C due to odd routing)
1180 *
1181 * This makes a little tolerance allowance to handle that case.
1182 */
1183 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1184
1185 if (timercmp(&fuzz, &ref, >))
1186 return true;
1187 }
1188 return false;
1189}
1190
1191static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1192 struct timeval expire_wait)
1193{
1194 struct timeval now;
1195
1196 if (!sg)
1197 return;
1198 if (sg->state == GM_SG_PRUNE)
1199 return;
1200
1201 monotime(&now);
1202 if (gm_sg_check_recent(gm_ifp, sg, now))
1203 return;
1204
a96d64b0 1205 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1206 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1207
1208 if (sg->t_sg_expire) {
1209 struct timeval remain;
1210
1211 remain = thread_timer_remain(sg->t_sg_expire);
1212 if (timercmp(&remain, &expire_wait, <=))
1213 return;
1214
1215 THREAD_OFF(sg->t_sg_expire);
1216 }
1217
1218 thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1219 &sg->t_sg_expire);
1220}
1221
1222static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1223 struct gm_query_timers *timers, pim_addr grp,
1224 const pim_addr *srcs, size_t n_src)
1225{
1226 struct gm_sg *sg;
1227 size_t i;
1228
1229 for (i = 0; i < n_src; i++) {
1230 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1231 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1232 }
1233}
1234
e6685141 1235static void gm_t_grp_expire(struct event *t)
5e5034b0
DL
1236{
1237 /* if we're here, that means when we received the group-specific query
1238 * there was one or more active S,G for this group. For *,G the timer
1239 * in sg->t_sg_expire is running separately and gets cancelled when we
1240 * receive a report, so that work is left to gm_t_sg_expire and we
1241 * shouldn't worry about it here.
1242 */
1243 struct gm_grp_pending *pend = THREAD_ARG(t);
1244 struct gm_if *gm_ifp = pend->iface;
1245 struct gm_sg *sg, *sg_start, sg_ref = {};
1246
95b13dc5 1247 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1248 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1249
1250 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1251 * could technically be gt to skip a possible *,G
1252 */
1253 sg_ref.sgaddr.grp = pend->grp;
1254 sg_ref.sgaddr.src = PIMADDR_ANY;
1255 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1256
1257 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1258 struct gm_packet_sg *item;
1259
1260 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1261 break;
1262 if (pim_addr_is_any(sg->sgaddr.src))
1263 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1264 continue;
1265 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1266 continue;
1267
1268 /* we may also have a group-source-specific query going on in
1269 * parallel. But if we received nothing for the *,G query,
1270 * the S,G query is kinda irrelevant.
1271 */
1272 THREAD_OFF(sg->t_sg_expire);
1273
1274 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1275 /* this will also drop the EXCLUDE S,G lists */
1276 gm_packet_sg_drop(item);
1277
1278 gm_sg_update(sg, true);
1279 }
1280
1281 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1282 XFREE(MTYPE_GM_GRP_PENDING, pend);
1283}
1284
1285static void gm_handle_q_group(struct gm_if *gm_ifp,
1286 struct gm_query_timers *timers, pim_addr grp)
1287{
1288 struct gm_sg *sg, sg_ref = {};
1289 struct gm_grp_pending *pend, pend_ref = {};
1290
1291 sg_ref.sgaddr.grp = grp;
1292 sg_ref.sgaddr.src = PIMADDR_ANY;
1293 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1294 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1295
1296 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1297 /* we have nothing at all for this group - don't waste RAM */
1298 return;
1299
1300 if (pim_addr_is_any(sg->sgaddr.src)) {
1301 /* actually found *,G entry here */
a96d64b0 1302 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1303 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1304 &grp);
1305 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1306
1307 sg = gm_sgs_next(gm_ifp->sgs, sg);
1308 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1309 /* no S,G for this group */
1310 return;
1311 }
1312
1313 pend_ref.grp = grp;
1314 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1315
1316 if (pend) {
1317 struct timeval remain;
1318
1319 remain = thread_timer_remain(pend->t_expire);
1320 if (timercmp(&remain, &timers->expire_wait, <=))
1321 return;
1322
1323 THREAD_OFF(pend->t_expire);
1324 } else {
1325 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1326 pend->grp = grp;
1327 pend->iface = gm_ifp;
1328 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1329 }
1330
1331 monotime(&pend->query);
1332 thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
1333 &timers->expire_wait, &pend->t_expire);
1334
a96d64b0 1335 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1336 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1337 pend->t_expire);
1338}
1339
1340static void gm_bump_querier(struct gm_if *gm_ifp)
1341{
1342 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1343
1344 THREAD_OFF(gm_ifp->t_query);
1345
1346 if (pim_addr_is_any(pim_ifp->ll_lowest))
1347 return;
1348 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1349 return;
1350
1351 gm_ifp->n_startup = gm_ifp->cur_qrv;
1352
1353 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1354}
1355
e6685141 1356static void gm_t_other_querier(struct event *t)
5e5034b0
DL
1357{
1358 struct gm_if *gm_ifp = THREAD_ARG(t);
1359 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1360
1361 zlog_info(log_ifp("other querier timer expired"));
1362
1363 gm_ifp->querier = pim_ifp->ll_lowest;
1364 gm_ifp->n_startup = gm_ifp->cur_qrv;
1365
1366 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1367}
1368
1369static void gm_handle_query(struct gm_if *gm_ifp,
1370 const struct sockaddr_in6 *pkt_src,
1371 pim_addr *pkt_dst, char *data, size_t len)
1372{
1373 struct mld_v2_query_hdr *hdr;
1374 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1375 struct gm_query_timers timers;
1376 bool general_query;
1377
1378 if (len < sizeof(struct mld_v2_query_hdr) &&
1379 len != sizeof(struct mld_v1_pkt)) {
1380 zlog_warn(log_pkt_src("invalid query size"));
aa2f9349 1381 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1382 return;
1383 }
1384
1385 hdr = (struct mld_v2_query_hdr *)data;
1386 general_query = pim_addr_is_any(hdr->grp);
1387
1388 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1389 zlog_warn(log_pkt_src(
1390 "malformed MLDv2 query (invalid group %pI6)"),
1391 &hdr->grp);
aa2f9349 1392 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1393 return;
1394 }
1395
1396 if (len >= sizeof(struct mld_v2_query_hdr)) {
1397 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1398
1399 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1400 zlog_warn(log_pkt_src(
1401 "malformed MLDv2 query (truncated source list)"));
aa2f9349 1402 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1403 return;
1404 }
1405
1406 if (general_query && src_space) {
1407 zlog_warn(log_pkt_src(
1408 "malformed MLDv2 query (general query with non-empty source list)"));
aa2f9349 1409 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1410 return;
1411 }
1412 }
1413
1414 /* accepting queries unicast to us (or addressed to a wrong group)
1415 * can mess up querier election as well as cause us to terminate
1416 * traffic (since after a unicast query no reports will be coming in)
1417 */
1418 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1419 if (pim_addr_is_any(hdr->grp)) {
1420 zlog_warn(
1421 log_pkt_src(
1422 "wrong destination %pPA for general query"),
1423 pkt_dst);
aa2f9349 1424 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1425 return;
1426 }
1427
1428 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
aa2f9349 1429 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1430 zlog_warn(
1431 log_pkt_src(
1432 "wrong destination %pPA for group specific query"),
1433 pkt_dst);
1434 return;
1435 }
1436 }
1437
1438 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
95b13dc5 1439 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1440 zlog_debug(
1441 log_pkt_src("replacing elected querier %pPA"),
1442 &gm_ifp->querier);
1443
1444 gm_ifp->querier = pkt_src->sin6_addr;
1445 }
1446
1447 if (len == sizeof(struct mld_v1_pkt)) {
1448 timers.qrv = gm_ifp->cur_qrv;
1449 timers.max_resp_ms = hdr->max_resp_code;
1450 timers.qqic_ms = gm_ifp->cur_query_intv;
1451 } else {
1452 timers.qrv = (hdr->flags & 0x7) ?: 8;
1453 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1454 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1455 }
1456 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1457
1458 gm_expiry_calc(&timers);
1459
a96d64b0 1460 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1461 zlog_debug(
1462 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1463 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1464 &timers.expire_wait);
1465
1466 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1467 unsigned int other_ms;
1468
1469 THREAD_OFF(gm_ifp->t_query);
1470 THREAD_OFF(gm_ifp->t_other_querier);
1471
1472 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1473 thread_add_timer_msec(router->master, gm_t_other_querier,
1474 gm_ifp, other_ms,
1475 &gm_ifp->t_other_querier);
1476 }
1477
1478 if (len == sizeof(struct mld_v1_pkt)) {
aa2f9349 1479 if (general_query) {
5e5034b0 1480 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1481 gm_ifp->stats.rx_query_old_general++;
1482 } else {
5e5034b0 1483 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1484 gm_ifp->stats.rx_query_old_group++;
1485 }
5e5034b0
DL
1486 return;
1487 }
1488
1489 /* v2 query - [S]uppress bit */
aa2f9349
DL
1490 if (hdr->flags & 0x8) {
1491 gm_ifp->stats.rx_query_new_sbit++;
5e5034b0 1492 return;
aa2f9349 1493 }
5e5034b0 1494
aa2f9349 1495 if (general_query) {
5e5034b0 1496 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1497 gm_ifp->stats.rx_query_new_general++;
1498 } else if (!ntohs(hdr->n_src)) {
5e5034b0 1499 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1500 gm_ifp->stats.rx_query_new_group++;
1501 } else {
5e5034b0
DL
1502 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1503 ntohs(hdr->n_src));
aa2f9349
DL
1504 gm_ifp->stats.rx_query_new_groupsrc++;
1505 }
5e5034b0
DL
1506}
1507
1508static void gm_rx_process(struct gm_if *gm_ifp,
1509 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1510 void *data, size_t pktlen)
1511{
1512 struct icmp6_plain_hdr *icmp6 = data;
1513 uint16_t pkt_csum, ref_csum;
1514 struct ipv6_ph ph6 = {
1515 .src = pkt_src->sin6_addr,
1516 .dst = *pkt_dst,
1517 .ulpl = htons(pktlen),
1518 .next_hdr = IPPROTO_ICMPV6,
1519 };
1520
1521 pkt_csum = icmp6->icmp6_cksum;
1522 icmp6->icmp6_cksum = 0;
1523 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1524
1525 if (pkt_csum != ref_csum) {
1526 zlog_warn(
1527 log_pkt_src(
1528 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1529 pkt_dst, pkt_csum, ref_csum);
aa2f9349 1530 gm_ifp->stats.rx_drop_csum++;
5e5034b0
DL
1531 return;
1532 }
1533
1534 data = (icmp6 + 1);
1535 pktlen -= sizeof(*icmp6);
1536
1537 switch (icmp6->icmp6_type) {
1538 case ICMP6_MLD_QUERY:
1539 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1540 break;
1541 case ICMP6_MLD_V1_REPORT:
1542 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1543 break;
1544 case ICMP6_MLD_V1_DONE:
1545 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1546 break;
1547 case ICMP6_MLD_V2_REPORT:
1548 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1549 break;
1550 }
1551}
1552
1553static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1554 uint16_t alert_type)
1555{
1556 uint8_t *hopopt_end;
1557
1558 if (hopopt_len < 8)
1559 return false;
1560 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1561 return false;
1562
1563 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1564 hopopts += 2;
1565
1566 while (hopopts < hopopt_end) {
1567 if (hopopts[0] == IP6OPT_PAD1) {
1568 hopopts++;
1569 continue;
1570 }
1571
1572 if (hopopts > hopopt_end - 2)
1573 break;
1574 if (hopopts > hopopt_end - 2 - hopopts[1])
1575 break;
1576
1577 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1578 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1579
1580 if (have_type == alert_type)
1581 return true;
1582 }
1583
1584 hopopts += 2 + hopopts[1];
1585 }
1586 return false;
1587}
1588
e6685141 1589static void gm_t_recv(struct event *t)
5e5034b0 1590{
df655593 1591 struct pim_instance *pim = THREAD_ARG(t);
5e5034b0
DL
1592 union {
1593 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1594 CMSG_SPACE(256) /* hop options */ +
1595 CMSG_SPACE(sizeof(int)) /* hopcount */];
1596 struct cmsghdr align;
1597 } cmsgbuf;
1598 struct cmsghdr *cmsg;
1599 struct in6_pktinfo *pktinfo = NULL;
1600 uint8_t *hopopts = NULL;
1601 size_t hopopt_len = 0;
1602 int *hoplimit = NULL;
1603 char rxbuf[2048];
1604 struct msghdr mh[1] = {};
1605 struct iovec iov[1];
5784a878 1606 struct sockaddr_in6 pkt_src[1] = {};
5e5034b0
DL
1607 ssize_t nread;
1608 size_t pktlen;
1609
df655593
DL
1610 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1611 &pim->t_gm_recv);
5e5034b0
DL
1612
1613 iov->iov_base = rxbuf;
1614 iov->iov_len = sizeof(rxbuf);
1615
1616 mh->msg_name = pkt_src;
1617 mh->msg_namelen = sizeof(pkt_src);
1618 mh->msg_control = cmsgbuf.buf;
1619 mh->msg_controllen = sizeof(cmsgbuf.buf);
1620 mh->msg_iov = iov;
1621 mh->msg_iovlen = array_size(iov);
1622 mh->msg_flags = 0;
1623
df655593 1624 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
5e5034b0 1625 if (nread <= 0) {
df655593
DL
1626 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1627 pim->gm_rx_drop_sys++;
5e5034b0
DL
1628 return;
1629 }
1630
1631 if ((size_t)nread > sizeof(rxbuf)) {
1632 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1633 iov->iov_len = nread;
1634 }
df655593 1635 nread = recvmsg(pim->gm_socket, mh, 0);
5e5034b0 1636 if (nread <= 0) {
df655593
DL
1637 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1638 pim->gm_rx_drop_sys++;
5e5034b0
DL
1639 goto out_free;
1640 }
1641
df655593
DL
1642 struct interface *ifp;
1643
1644 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1645 if (!ifp || !ifp->info)
1646 goto out_free;
1647
1648 struct pim_interface *pim_ifp = ifp->info;
1649 struct gm_if *gm_ifp = pim_ifp->mld;
1650
1651 if (!gm_ifp)
5e5034b0
DL
1652 goto out_free;
1653
1654 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1655 if (cmsg->cmsg_level != SOL_IPV6)
1656 continue;
1657
1658 switch (cmsg->cmsg_type) {
1659 case IPV6_PKTINFO:
1660 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1661 break;
1662 case IPV6_HOPOPTS:
1663 hopopts = CMSG_DATA(cmsg);
1664 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1665 break;
1666 case IPV6_HOPLIMIT:
1667 hoplimit = (int *)CMSG_DATA(cmsg);
1668 break;
1669 }
1670 }
1671
1672 if (!pktinfo || !hoplimit) {
1673 zlog_err(log_ifp(
1674 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
df655593 1675 pim->gm_rx_drop_sys++;
5e5034b0
DL
1676 goto out_free;
1677 }
1678
1679 if (*hoplimit != 1) {
1680 zlog_err(log_pkt_src("packet with hop limit != 1"));
aa2f9349
DL
1681 /* spoofing attempt => count on srcaddr counter */
1682 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1683 goto out_free;
1684 }
1685
1686 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1687 zlog_err(log_pkt_src(
1688 "packet without IPv6 Router Alert MLD option"));
aa2f9349 1689 gm_ifp->stats.rx_drop_ra++;
5e5034b0
DL
1690 goto out_free;
1691 }
1692
1693 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1694 /* reports from :: happen in normal operation for DAD, so
1695 * don't spam log messages about this
1696 */
1697 goto out_free;
1698
1699 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1700 zlog_warn(log_pkt_src("packet from invalid source address"));
aa2f9349 1701 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1702 goto out_free;
1703 }
1704
1705 pktlen = nread;
1706 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1707 zlog_warn(log_pkt_src("truncated packet"));
aa2f9349 1708 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1709 goto out_free;
1710 }
1711
1712 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1713 pktlen);
1714
1715out_free:
1716 if (iov->iov_base != rxbuf)
1717 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1718}
1719
1720static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1721 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1722{
1723 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1724 struct sockaddr_in6 dstaddr = {
1725 .sin6_family = AF_INET6,
1726 .sin6_scope_id = gm_ifp->ifp->ifindex,
1727 };
1728 struct {
1729 struct icmp6_plain_hdr hdr;
1730 struct mld_v2_query_hdr v2_query;
1731 } query = {
1732 /* clang-format off */
1733 .hdr = {
1734 .icmp6_type = ICMP6_MLD_QUERY,
1735 .icmp6_code = 0,
1736 },
1737 .v2_query = {
1738 .grp = grp,
1739 },
1740 /* clang-format on */
1741 };
1742 struct ipv6_ph ph6 = {
1743 .src = pim_ifp->ll_lowest,
1744 .ulpl = htons(sizeof(query)),
1745 .next_hdr = IPPROTO_ICMPV6,
1746 };
1747 union {
df655593
DL
1748 char buf[CMSG_SPACE(8) /* hop options */ +
1749 CMSG_SPACE(sizeof(struct in6_pktinfo))];
5e5034b0
DL
1750 struct cmsghdr align;
1751 } cmsg = {};
1752 struct cmsghdr *cmh;
1753 struct msghdr mh[1] = {};
1754 struct iovec iov[3];
1755 size_t iov_len;
1756 ssize_t ret, expect_ret;
1757 uint8_t *dp;
df655593 1758 struct in6_pktinfo *pktinfo;
5e5034b0
DL
1759
1760 if (if_is_loopback(gm_ifp->ifp)) {
1761 /* Linux is a bit odd with multicast on loopback */
1762 ph6.src = in6addr_loopback;
1763 dstaddr.sin6_addr = in6addr_loopback;
1764 } else if (pim_addr_is_any(grp))
1765 dstaddr.sin6_addr = gm_all_hosts;
1766 else
1767 dstaddr.sin6_addr = grp;
1768
1769 query.v2_query.max_resp_code =
1770 mld_max_resp_encode(gm_ifp->cur_max_resp);
1771 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1772 if (s_bit)
1773 query.v2_query.flags |= 0x08;
1774 query.v2_query.qqic =
1775 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1776 query.v2_query.n_src = htons(n_srcs);
1777
1778 ph6.dst = dstaddr.sin6_addr;
1779
1780 /* ph6 not included in sendmsg */
1781 iov[0].iov_base = &ph6;
1782 iov[0].iov_len = sizeof(ph6);
1783 iov[1].iov_base = &query;
1784 if (gm_ifp->cur_version == GM_MLDV1) {
1785 iov_len = 2;
1786 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1787 } else if (!n_srcs) {
1788 iov_len = 2;
1789 iov[1].iov_len = sizeof(query);
1790 } else {
1791 iov[1].iov_len = sizeof(query);
1792 iov[2].iov_base = (void *)srcs;
1793 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1794 iov_len = 3;
1795 }
1796
1797 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1798
55eb347d 1799 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1800 zlog_debug(
1801 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1802 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1803
1804 mh->msg_name = &dstaddr;
1805 mh->msg_namelen = sizeof(dstaddr);
1806 mh->msg_iov = iov + 1;
1807 mh->msg_iovlen = iov_len - 1;
1808 mh->msg_control = &cmsg;
1809 mh->msg_controllen = sizeof(cmsg.buf);
df655593 1810
5e5034b0
DL
1811 cmh = CMSG_FIRSTHDR(mh);
1812 cmh->cmsg_level = IPPROTO_IPV6;
1813 cmh->cmsg_type = IPV6_HOPOPTS;
1814 cmh->cmsg_len = CMSG_LEN(8);
1815 dp = CMSG_DATA(cmh);
1816 *dp++ = 0; /* next header */
1817 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1818 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1819 *dp++ = 2; /* length */
1820 *dp++ = 0; /* value (2 bytes) */
1821 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1822 *dp++ = 0; /* pad0 */
1823 *dp++ = 0; /* pad0 */
1824
df655593
DL
1825 cmh = CMSG_NXTHDR(mh, cmh);
1826 cmh->cmsg_level = IPPROTO_IPV6;
1827 cmh->cmsg_type = IPV6_PKTINFO;
1828 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1829 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1830 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1831 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1832
5e5034b0
DL
1833 expect_ret = iov[1].iov_len;
1834 if (iov_len == 3)
1835 expect_ret += iov[2].iov_len;
1836
1837 frr_with_privs (&pimd_privs) {
df655593 1838 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
5e5034b0
DL
1839 }
1840
aa2f9349 1841 if (ret != expect_ret) {
5e5034b0 1842 zlog_warn(log_ifp("failed to send query: %m"));
aa2f9349
DL
1843 gm_ifp->stats.tx_query_fail++;
1844 } else {
1845 if (gm_ifp->cur_version == GM_MLDV1) {
1846 if (pim_addr_is_any(grp))
1847 gm_ifp->stats.tx_query_old_general++;
1848 else
1849 gm_ifp->stats.tx_query_old_group++;
1850 } else {
1851 if (pim_addr_is_any(grp))
1852 gm_ifp->stats.tx_query_new_general++;
1853 else if (!n_srcs)
1854 gm_ifp->stats.tx_query_new_group++;
1855 else
1856 gm_ifp->stats.tx_query_new_groupsrc++;
1857 }
1858 }
5e5034b0
DL
1859}
1860
e6685141 1861static void gm_t_query(struct event *t)
5e5034b0
DL
1862{
1863 struct gm_if *gm_ifp = THREAD_ARG(t);
1864 unsigned int timer_ms = gm_ifp->cur_query_intv;
1865
1866 if (gm_ifp->n_startup) {
1867 timer_ms /= 4;
1868 gm_ifp->n_startup--;
1869 }
1870
1871 thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1872 &gm_ifp->t_query);
1873
1874 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1875}
1876
e6685141 1877static void gm_t_sg_query(struct event *t)
5e5034b0
DL
1878{
1879 struct gm_sg *sg = THREAD_ARG(t);
1880
1881 gm_trigger_specific(sg);
1882}
1883
1884/* S,G specific queries (triggered by a member leaving) get a little slack
1885 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1886 */
1887static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1888{
1889 struct gm_if *gm_ifp = pend_gsq->iface;
1890
1891 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1892 pend_gsq->s_bit);
1893
1894 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1895 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1896}
1897
e6685141 1898static void gm_t_gsq_pend(struct event *t)
5e5034b0
DL
1899{
1900 struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
1901
1902 gm_send_specific(pend_gsq);
1903}
1904
1905static void gm_trigger_specific(struct gm_sg *sg)
1906{
1907 struct gm_if *gm_ifp = sg->iface;
1908 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1909 struct gm_gsq_pending *pend_gsq, ref = {};
1910
1911 sg->n_query--;
1912 if (sg->n_query)
1913 thread_add_timer_msec(router->master, gm_t_sg_query, sg,
1914 gm_ifp->cur_query_intv_trig,
1915 &sg->t_sg_query);
1916
1917 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1918 return;
df655593 1919 if (gm_ifp->pim->gm_socket == -1)
5e5034b0
DL
1920 return;
1921
a96d64b0 1922 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1923 zlog_debug(log_sg(sg, "triggered query"));
1924
1925 if (pim_addr_is_any(sg->sgaddr.src)) {
1926 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1927 return;
1928 }
1929
1930 ref.grp = sg->sgaddr.grp;
1931 ref.s_bit = sg->query_sbit;
1932
1933 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1934 if (!pend_gsq) {
1935 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1936 pend_gsq->grp = sg->sgaddr.grp;
1937 pend_gsq->s_bit = sg->query_sbit;
1938 pend_gsq->iface = gm_ifp;
1939 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1940
1941 thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1942 &gm_ifp->cfg_timing_fuzz,
1943 &pend_gsq->t_send);
1944 }
1945
1946 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1947
1948 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1949 pend_gsq->n_src++;
1950
1951 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1952 THREAD_OFF(pend_gsq->t_send);
1953 gm_send_specific(pend_gsq);
1954 pend_gsq = NULL;
1955 }
1956}
1957
df655593 1958static void gm_vrf_socket_incref(struct pim_instance *pim)
5e5034b0 1959{
df655593 1960 struct vrf *vrf = pim->vrf;
5e5034b0
DL
1961 int ret, intval;
1962 struct icmp6_filter filter[1];
1963
df655593
DL
1964 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1965 return;
5e5034b0
DL
1966
1967 ICMP6_FILTER_SETBLOCKALL(filter);
1968 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1969 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1970 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1971 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1972
1973 frr_with_privs (&pimd_privs) {
df655593
DL
1974 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1975 vrf->vrf_id, vrf->name);
1976 if (pim->gm_socket < 0) {
1977 zlog_err("(VRF %s) could not create MLD socket: %m",
1978 vrf->name);
5e5034b0
DL
1979 return;
1980 }
1981
df655593
DL
1982 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
1983 filter, sizeof(filter));
5e5034b0 1984 if (ret)
df655593
DL
1985 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1986 vrf->name);
5e5034b0
DL
1987
1988 intval = 1;
df655593 1989 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
5e5034b0
DL
1990 &intval, sizeof(intval));
1991 if (ret)
df655593
DL
1992 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1993 vrf->name);
5e5034b0
DL
1994
1995 intval = 1;
df655593 1996 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
5e5034b0
DL
1997 &intval, sizeof(intval));
1998 if (ret)
df655593
DL
1999 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
2000 vrf->name);
5e5034b0
DL
2001
2002 intval = 1;
df655593 2003 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
5e5034b0
DL
2004 &intval, sizeof(intval));
2005 if (ret)
df655593
DL
2006 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2007 vrf->name);
5e5034b0
DL
2008
2009 intval = 1;
df655593 2010 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
5e5034b0
DL
2011 &intval, sizeof(intval));
2012 if (ret)
2013 zlog_err(
df655593
DL
2014 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2015 vrf->name);
5e5034b0
DL
2016
2017 intval = 1;
df655593 2018 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
5e5034b0
DL
2019 &intval, sizeof(intval));
2020 if (ret)
df655593
DL
2021 zlog_err(
2022 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2023 vrf->name);
5e5034b0
DL
2024
2025 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2026 * RX filtering in Linux. It only means "receive all groups
2027 * that something on the system has joined". To actually
2028 * receive *all* MLD packets - which is what we need -
2029 * multicast routing must be enabled on the interface. And
2030 * this only works for MLD packets specifically.
2031 *
2032 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2033 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2034 *
2035 * Also note that the code there explicitly checks for the IPv6
2036 * router alert MLD option (which is required by the RFC to be
2037 * on MLD packets.) That implies trying to support hosts which
2038 * erroneously don't add that option is just not possible.
2039 */
2040 intval = 1;
df655593 2041 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
5e5034b0
DL
2042 &intval, sizeof(intval));
2043 if (ret)
2044 zlog_info(
df655593
DL
2045 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2046 vrf->name);
2047 }
5e5034b0 2048
df655593
DL
2049 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2050 &pim->t_gm_recv);
2051}
2052
2053static void gm_vrf_socket_decref(struct pim_instance *pim)
2054{
2055 if (--pim->gm_socket_if_count)
2056 return;
2057
2058 THREAD_OFF(pim->t_gm_recv);
2059 close(pim->gm_socket);
2060 pim->gm_socket = -1;
2061}
2062
2063static void gm_start(struct interface *ifp)
2064{
2065 struct pim_interface *pim_ifp = ifp->info;
2066 struct gm_if *gm_ifp;
2067
2068 assert(pim_ifp);
2069 assert(pim_ifp->pim);
2070 assert(pim_ifp->mroute_vif_index >= 0);
2071 assert(!pim_ifp->mld);
2072
2073 gm_vrf_socket_incref(pim_ifp->pim);
2074
2075 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2076 gm_ifp->ifp = ifp;
2077 pim_ifp->mld = gm_ifp;
2078 gm_ifp->pim = pim_ifp->pim;
2079 monotime(&gm_ifp->started);
2080
2081 zlog_info(log_ifp("starting MLD"));
2082
2083 if (pim_ifp->mld_version == 1)
2084 gm_ifp->cur_version = GM_MLDV1;
2085 else
2086 gm_ifp->cur_version = GM_MLDV2;
2087
914710d7 2088 gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
df655593 2089 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
707a9e9a
A
2090 gm_ifp->cur_query_intv_trig =
2091 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
914710d7 2092 gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
51b4991f 2093 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
df655593
DL
2094
2095 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2096 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2097
2098 gm_sgs_init(gm_ifp->sgs);
2099 gm_subscribers_init(gm_ifp->subscribers);
2100 gm_packet_expires_init(gm_ifp->expires);
2101 gm_grp_pends_init(gm_ifp->grp_pends);
2102 gm_gsq_pends_init(gm_ifp->gsq_pends);
2103
2104 frr_with_privs (&pimd_privs) {
5e5034b0 2105 struct ipv6_mreq mreq;
df655593 2106 int ret;
5e5034b0
DL
2107
2108 /* all-MLDv2 group */
2109 mreq.ipv6mr_multiaddr = gm_all_routers;
2110 mreq.ipv6mr_interface = ifp->ifindex;
df655593
DL
2111 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2112 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
5e5034b0
DL
2113 if (ret)
2114 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2115 ifp->name);
2116 }
5e5034b0
DL
2117}
2118
e309780f 2119void gm_group_delete(struct gm_if *gm_ifp)
5e5034b0 2120{
e309780f 2121 struct gm_sg *sg;
5e5034b0
DL
2122 struct gm_packet_state *pkt;
2123 struct gm_grp_pending *pend_grp;
2124 struct gm_gsq_pending *pend_gsq;
2125 struct gm_subscriber *subscriber;
e309780f
SP
2126
2127 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2128 gm_packet_drop(pkt, false);
2129
2130 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2131 THREAD_OFF(pend_grp->t_expire);
2132 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2133 }
2134
2135 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2136 THREAD_OFF(pend_gsq->t_send);
2137 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2138 }
2139
2140 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2141 THREAD_OFF(sg->t_sg_expire);
2142 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2143 &sg->sgaddr);
2144 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2145 &sg->sgaddr);
2146
2147 gm_sg_free(sg);
2148 }
2149 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2150 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2151 &subscriber->addr);
2152 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2153 }
2154}
2155
2156void gm_ifp_teardown(struct interface *ifp)
2157{
2158 struct pim_interface *pim_ifp = ifp->info;
2159 struct gm_if *gm_ifp;
5e5034b0
DL
2160
2161 if (!pim_ifp || !pim_ifp->mld)
2162 return;
2163
2164 gm_ifp = pim_ifp->mld;
2165 gm_ifp->stopping = true;
95b13dc5 2166 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
2167 zlog_debug(log_ifp("MLD stop"));
2168
2169 THREAD_OFF(gm_ifp->t_query);
2170 THREAD_OFF(gm_ifp->t_other_querier);
5e5034b0
DL
2171 THREAD_OFF(gm_ifp->t_expire);
2172
df655593
DL
2173 frr_with_privs (&pimd_privs) {
2174 struct ipv6_mreq mreq;
2175 int ret;
2176
2177 /* all-MLDv2 group */
2178 mreq.ipv6mr_multiaddr = gm_all_routers;
2179 mreq.ipv6mr_interface = ifp->ifindex;
2180 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2181 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2182 if (ret)
2183 zlog_err(
2184 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2185 ifp->name);
5e5034b0
DL
2186 }
2187
df655593
DL
2188 gm_vrf_socket_decref(gm_ifp->pim);
2189
e309780f 2190 gm_group_delete(gm_ifp);
5e5034b0
DL
2191
2192 gm_grp_pends_fini(gm_ifp->grp_pends);
2193 gm_packet_expires_fini(gm_ifp->expires);
2194 gm_subscribers_fini(gm_ifp->subscribers);
2195 gm_sgs_fini(gm_ifp->sgs);
2196
2197 XFREE(MTYPE_GM_IFACE, gm_ifp);
2198 pim_ifp->mld = NULL;
2199}
2200
2201static void gm_update_ll(struct interface *ifp)
2202{
2203 struct pim_interface *pim_ifp = ifp->info;
f4e8f5d4 2204 struct gm_if *gm_ifp = pim_ifp->mld;
5e5034b0
DL
2205 bool was_querier;
2206
2207 was_querier =
2208 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2209 !pim_addr_is_any(gm_ifp->querier);
2210
2211 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2212 if (was_querier)
2213 gm_ifp->querier = pim_ifp->ll_lowest;
2214 THREAD_OFF(gm_ifp->t_query);
2215
2216 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2217 if (was_querier)
2218 zlog_info(log_ifp(
2219 "lost link-local address, stopping querier"));
2220 return;
2221 }
2222
2223 if (was_querier)
2224 zlog_info(log_ifp("new link-local %pPA while querier"),
2225 &gm_ifp->cur_ll_lowest);
2226 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2227 pim_addr_is_any(gm_ifp->querier)) {
2228 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2229 &gm_ifp->cur_ll_lowest);
2230 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2231 } else
2232 return;
2233
5e5034b0
DL
2234 gm_ifp->n_startup = gm_ifp->cur_qrv;
2235 thread_execute(router->master, gm_t_query, gm_ifp, 0);
2236}
2237
2238void gm_ifp_update(struct interface *ifp)
2239{
2240 struct pim_interface *pim_ifp = ifp->info;
2241 struct gm_if *gm_ifp;
2242 bool changed = false;
2243
2244 if (!pim_ifp)
2245 return;
2246 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2247 pim_ifp->mroute_vif_index < 0) {
2248 gm_ifp_teardown(ifp);
2249 return;
2250 }
2251
5c1b3cd2
A
2252 /*
2253 * If ipv6 mld is not enabled on interface, do not start mld activites.
2254 */
2255 if (!pim_ifp->gm_enable)
2256 return;
2257
2258 if (!pim_ifp->mld) {
2259 changed = true;
5e5034b0 2260 gm_start(ifp);
5c1b3cd2 2261 }
5e5034b0
DL
2262
2263 gm_ifp = pim_ifp->mld;
2264 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2265 gm_update_ll(ifp);
2266
2267 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2268
2269 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2270 gm_ifp->cur_query_intv = cfg_query_intv;
707a9e9a
A
2271 changed = true;
2272 }
2273
2274 unsigned int cfg_query_intv_trig =
2275 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2276
2277 if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
2278 gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
5e5034b0
DL
2279 changed = true;
2280 }
2281
914710d7
A
2282 unsigned int cfg_max_response =
2283 pim_ifp->gm_query_max_response_time_dsec * 100;
2284
2285 if (gm_ifp->cur_max_resp != cfg_max_response)
2286 gm_ifp->cur_max_resp = cfg_max_response;
2287
51b4991f
A
2288 if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
2289 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2290
5e5034b0
DL
2291 enum gm_version cfg_version;
2292
2293 if (pim_ifp->mld_version == 1)
2294 cfg_version = GM_MLDV1;
2295 else
2296 cfg_version = GM_MLDV2;
2297 if (gm_ifp->cur_version != cfg_version) {
2298 gm_ifp->cur_version = cfg_version;
2299 changed = true;
2300 }
2301
2302 if (changed) {
a96d64b0 2303 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
2304 zlog_debug(log_ifp(
2305 "MLD querier config changed, querying"));
2306 gm_bump_querier(gm_ifp);
2307 }
2308}
2309
d2951219
DL
2310/*
2311 * CLI (show commands only)
2312 */
5e5034b0
DL
2313
2314#include "lib/command.h"
2315
5e5034b0 2316#include "pimd/pim6_mld_clippy.c"
5e5034b0 2317
d2951219
DL
2318static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2319 int *err)
2320{
2321 struct vrf *ret;
2322
2323 if (!vrf_str)
2324 return vrf_lookup_by_id(VRF_DEFAULT);
2325 if (!strcmp(vrf_str, "all"))
2326 return NULL;
2327 ret = vrf_lookup_by_name(vrf_str);
2328 if (ret)
2329 return ret;
2330
2331 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2332 *err = CMD_WARNING;
2333 return NULL;
2334}
2335
2336static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2337{
2338 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2339 struct gm_if *gm_ifp;
2340 bool querier;
2341 size_t i;
2342
2343 if (!pim_ifp) {
2344 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2345 return;
2346 }
2347
2348 gm_ifp = pim_ifp->mld;
2349 if (!gm_ifp) {
2350 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2351 return;
2352 }
2353
2354 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2355
2356 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2357 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2358 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2359 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2360 querier ? " (this system)" : "");
2361 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2362 vty_out(vty, " Other querier timer: %pTH\n",
2363 gm_ifp->t_other_querier);
2364 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2365 vty_out(vty, " Query interval: %ums\n",
2366 gm_ifp->cur_query_intv);
2367 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2368 vty_out(vty, " Last member query intv.: %ums\n",
2369 gm_ifp->cur_query_intv_trig);
2370 vty_out(vty, " %u expiry timers from general queries:\n",
2371 gm_ifp->n_pending);
2372 for (i = 0; i < gm_ifp->n_pending; i++) {
2373 struct gm_general_pending *p = &gm_ifp->pending[i];
2374
2375 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2376 &p->query, &p->expiry);
2377 }
2378 vty_out(vty, " %zu expiry timers from *,G queries\n",
2379 gm_grp_pends_count(gm_ifp->grp_pends));
2380 vty_out(vty, " %zu expiry timers from S,G queries\n",
2381 gm_gsq_pends_count(gm_ifp->gsq_pends));
2382 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2383 gm_sgs_count(gm_ifp->sgs),
2384 gm_subscribers_count(gm_ifp->subscribers),
2385 gm_packet_expires_count(gm_ifp->expires));
2386 vty_out(vty, "\n");
2387}
2388
2389static void gm_show_if_one(struct vty *vty, struct interface *ifp,
cbb1e513 2390 json_object *js_if, struct ttable *tt)
d2951219
DL
2391{
2392 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2393 struct gm_if *gm_ifp = pim_ifp->mld;
2394 bool querier;
2395
d2951219
DL
2396 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2397
2398 if (js_if) {
cb406d5c 2399 json_object_string_add(js_if, "name", ifp->name);
3ab119a4
SP
2400 json_object_string_addf(js_if, "address", "%pPA",
2401 &pim_ifp->primary_address);
d2951219
DL
2402 json_object_string_add(js_if, "state", "up");
2403 json_object_string_addf(js_if, "version", "%d",
2404 gm_ifp->cur_version);
2405 json_object_string_addf(js_if, "upTime", "%pTVMs",
2406 &gm_ifp->started);
2407 json_object_boolean_add(js_if, "querier", querier);
2408 json_object_string_addf(js_if, "querierIp", "%pPA",
2409 &gm_ifp->querier);
2410 if (querier)
2411 json_object_string_addf(js_if, "queryTimer", "%pTH",
2412 gm_ifp->t_query);
2413 else
2414 json_object_string_addf(js_if, "otherQuerierTimer",
2415 "%pTH",
2416 gm_ifp->t_other_querier);
cb406d5c
A
2417 json_object_int_add(js_if, "timerRobustnessValue",
2418 gm_ifp->cur_qrv);
6b94500d
A
2419 json_object_int_add(js_if, "lastMemberQueryCount",
2420 gm_ifp->cur_lmqc);
cb406d5c
A
2421 json_object_int_add(js_if, "timerQueryIntervalMsec",
2422 gm_ifp->cur_query_intv);
2423 json_object_int_add(js_if, "timerQueryResponseTimerMsec",
2424 gm_ifp->cur_max_resp);
2425 json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
2426 gm_ifp->cur_query_intv_trig);
d2951219 2427 } else {
cbb1e513
SP
2428 ttable_add_row(tt, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
2429 ifp->name, "up", &pim_ifp->primary_address,
2430 gm_ifp->cur_version, querier ? "local" : "other",
2431 &gm_ifp->querier, gm_ifp->t_query,
2432 &gm_ifp->started);
d2951219
DL
2433 }
2434}
2435
2436static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2437 bool detail, json_object *js)
2438{
2439 struct interface *ifp;
cbb1e513 2440 json_object *js_vrf = NULL;
fe4db62c 2441 struct pim_interface *pim_ifp;
cbb1e513
SP
2442 struct ttable *tt = NULL;
2443 char *table = NULL;
d2951219
DL
2444
2445 if (js) {
2446 js_vrf = json_object_new_object();
2447 json_object_object_add(js, vrf->name, js_vrf);
2448 }
2449
cbb1e513
SP
2450 if (!js && !detail) {
2451 /* Prepare table. */
2452 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2453 ttable_add_row(
2454 tt,
2455 "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
2456 tt->style.cell.rpad = 2;
2457 tt->style.corner = '+';
2458 ttable_restyle(tt);
2459 }
2460
d2951219
DL
2461 FOR_ALL_INTERFACES (vrf, ifp) {
2462 json_object *js_if = NULL;
2463
2464 if (ifname && strcmp(ifp->name, ifname))
2465 continue;
2466 if (detail && !js) {
2467 gm_show_if_one_detail(vty, ifp);
2468 continue;
2469 }
2470
cbb1e513 2471 pim_ifp = ifp->info;
fe4db62c
SP
2472
2473 if (!pim_ifp || !pim_ifp->mld)
d2951219 2474 continue;
fe4db62c 2475
d2951219
DL
2476 if (js) {
2477 js_if = json_object_new_object();
2478 json_object_object_add(js_vrf, ifp->name, js_if);
2479 }
2480
cbb1e513
SP
2481 gm_show_if_one(vty, ifp, js_if, tt);
2482 }
2483
2484 /* Dump the generated table. */
2485 if (!js && !detail) {
2486 table = ttable_dump(tt, "\n");
2487 vty_out(vty, "%s\n", table);
2488 XFREE(MTYPE_TMP, table);
2489 ttable_del(tt);
d2951219
DL
2490 }
2491}
2492
2493static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2494 bool detail, json_object *js)
2495{
d2951219
DL
2496 if (vrf)
2497 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2498 else
2499 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2500 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2501}
2502
2503DEFPY(gm_show_interface,
2504 gm_show_interface_cmd,
ad56b07c 2505 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
d2951219
DL
2506 SHOW_STR
2507 IPV6_STR
2508 MLD_STR
2509 VRF_FULL_CMD_HELP_STR
2510 "MLD interface information\n"
ad56b07c 2511 "Interface name\n"
d2951219
DL
2512 "Detailed output\n"
2513 JSON_STR)
2514{
2515 int ret = CMD_SUCCESS;
2516 struct vrf *vrf;
2517 json_object *js = NULL;
2518
2519 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2520 if (ret != CMD_SUCCESS)
2521 return ret;
2522
2523 if (json)
2524 js = json_object_new_object();
2525 gm_show_if(vty, vrf, ifname, !!detail, js);
2526 return vty_json(vty, js);
2527}
2528
2529static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2530 json_object *js_if)
2531{
2532 struct gm_if_stats *stats = &gm_ifp->stats;
2533 /* clang-format off */
2534 struct {
2535 const char *text;
2536 const char *js_key;
2537 uint64_t *val;
2538 } *item, items[] = {
2539 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2540 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2541 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2542
2543 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2544 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2545 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2546 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2547 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2548 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2549
2550 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2551 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2552 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2553 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2554 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2555 { "TX errors", "txErrors", &stats->tx_query_fail },
2556
d2951219
DL
2557 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2558 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2559 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2560 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2561 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2562 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2563 };
2564 /* clang-format on */
2565
2566 for (item = items; item < items + array_size(items); item++) {
2567 if (js_if)
2568 json_object_int_add(js_if, item->js_key, *item->val);
2569 else
2570 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2571 *item->val);
2572 }
2573}
2574
2575static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2576 const char *ifname, json_object *js)
2577{
2578 struct interface *ifp;
2579 json_object *js_vrf;
2580
2581 if (js) {
2582 js_vrf = json_object_new_object();
2583 json_object_object_add(js, vrf->name, js_vrf);
2584 }
2585
2586 FOR_ALL_INTERFACES (vrf, ifp) {
2587 struct pim_interface *pim_ifp;
2588 struct gm_if *gm_ifp;
2589 json_object *js_if = NULL;
2590
2591 if (ifname && strcmp(ifp->name, ifname))
2592 continue;
2593
2594 if (!ifp->info)
2595 continue;
2596 pim_ifp = ifp->info;
2597 if (!pim_ifp->mld)
2598 continue;
2599 gm_ifp = pim_ifp->mld;
2600
2601 if (js) {
2602 js_if = json_object_new_object();
2603 json_object_object_add(js_vrf, ifp->name, js_if);
2604 } else {
2605 vty_out(vty, "Interface: %s\n", ifp->name);
2606 }
2607 gm_show_stats_one(vty, gm_ifp, js_if);
2608 if (!js)
2609 vty_out(vty, "\n");
2610 }
2611}
2612
2613DEFPY(gm_show_interface_stats,
2614 gm_show_interface_stats_cmd,
2615 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2616 SHOW_STR
2617 IPV6_STR
2618 MLD_STR
2619 VRF_FULL_CMD_HELP_STR
2620 "MLD statistics\n"
2621 INTERFACE_STR
2622 "Interface name\n"
2623 JSON_STR)
2624{
2625 int ret = CMD_SUCCESS;
2626 struct vrf *vrf;
2627 json_object *js = NULL;
2628
2629 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2630 if (ret != CMD_SUCCESS)
2631 return ret;
2632
2633 if (json)
2634 js = json_object_new_object();
2635
2636 if (vrf)
2637 gm_show_stats_vrf(vty, vrf, ifname, js);
2638 else
2639 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2640 gm_show_stats_vrf(vty, vrf, ifname, js);
2641 return vty_json(vty, js);
2642}
2643
2644static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2645 const struct prefix_ipv6 *groups,
2646 const struct prefix_ipv6 *sources, bool detail,
2647 json_object *js_if)
2648{
2649 struct gm_sg *sg, *sg_start;
2650 json_object *js_group = NULL;
2651 pim_addr js_grpaddr = PIMADDR_ANY;
2652 struct gm_subscriber sub_ref = {}, *sub_untracked;
2653
2654 if (groups) {
2655 struct gm_sg sg_ref = {};
2656
2657 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2658 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2659 } else
2660 sg_start = gm_sgs_first(gm_ifp->sgs);
2661
2662 sub_ref.addr = gm_dummy_untracked;
2663 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2664 /* NB: sub_untracked may be NULL if no untracked joins exist */
2665
2666 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2667 struct timeval *recent = NULL, *untracked = NULL;
2668 json_object *js_src;
2669
2670 if (groups) {
2671 struct prefix grp_p;
2672
2673 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2674 if (!prefix_match(groups, &grp_p))
2675 break;
2676 }
2677
2678 if (sources) {
2679 struct prefix src_p;
2680
2681 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2682 if (!prefix_match(sources, &src_p))
2683 continue;
2684 }
2685
2686 if (sg->most_recent) {
2687 struct gm_packet_state *packet;
2688
2689 packet = gm_packet_sg2state(sg->most_recent);
2690 recent = &packet->received;
2691 }
2692
2693 if (sub_untracked) {
2694 struct gm_packet_state *packet;
2695 struct gm_packet_sg *item;
2696
2697 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2698 if (item) {
2699 packet = gm_packet_sg2state(item);
2700 untracked = &packet->received;
2701 }
2702 }
2703
2704 if (!js_if) {
2705 FMT_NSTD_BEGIN; /* %.0p */
2706 vty_out(vty,
2707 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2708 &sg->sgaddr.grp, &sg->sgaddr.src,
2709 gm_states[sg->state], recent, untracked,
2710 &sg->created);
2711
2712 if (!detail)
2713 continue;
2714
2715 struct gm_packet_sg *item;
2716 struct gm_packet_state *packet;
2717
2718 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2719 packet = gm_packet_sg2state(item);
2720
2721 if (packet->subscriber == sub_untracked)
2722 continue;
2723 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2724 &packet->subscriber->addr, "(JOIN)",
2725 &packet->received);
2726 }
2727 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2728 packet = gm_packet_sg2state(item);
2729
2730 if (packet->subscriber == sub_untracked)
2731 continue;
2732 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2733 &packet->subscriber->addr, "(PRUNE)",
2734 &packet->received);
2735 }
2736 FMT_NSTD_END; /* %.0p */
2737 continue;
2738 }
2739 /* if (js_if) */
2740
2741 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2742 js_group = json_object_new_object();
2743 json_object_object_addf(js_if, js_group, "%pPA",
2744 &sg->sgaddr.grp);
2745 js_grpaddr = sg->sgaddr.grp;
2746 }
2747
2748 js_src = json_object_new_object();
58971e15 2749 json_object_object_addf(js_group, js_src, "%pPAs",
d2951219
DL
2750 &sg->sgaddr.src);
2751
2752 json_object_string_add(js_src, "state", gm_states[sg->state]);
2753 json_object_string_addf(js_src, "created", "%pTVMs",
2754 &sg->created);
2755 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2756
2757 if (untracked)
2758 json_object_string_addf(js_src, "untrackedLastSeen",
2759 "%pTVMs", untracked);
2760 if (!detail)
2761 continue;
2762
2763 json_object *js_subs;
2764 struct gm_packet_sg *item;
2765 struct gm_packet_state *packet;
2766
2767 js_subs = json_object_new_object();
2768 json_object_object_add(js_src, "joinedBy", js_subs);
2769 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2770 packet = gm_packet_sg2state(item);
2771 if (packet->subscriber == sub_untracked)
2772 continue;
2773
2774 json_object *js_sub;
2775
2776 js_sub = json_object_new_object();
2777 json_object_object_addf(js_subs, js_sub, "%pPA",
2778 &packet->subscriber->addr);
2779 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2780 &packet->received);
2781 }
2782
2783 js_subs = json_object_new_object();
2784 json_object_object_add(js_src, "prunedBy", js_subs);
2785 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2786 packet = gm_packet_sg2state(item);
2787 if (packet->subscriber == sub_untracked)
2788 continue;
2789
2790 json_object *js_sub;
2791
2792 js_sub = json_object_new_object();
2793 json_object_object_addf(js_subs, js_sub, "%pPA",
2794 &packet->subscriber->addr);
2795 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2796 &packet->received);
2797 }
2798 }
2799}
2800
2801static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2802 const char *ifname,
2803 const struct prefix_ipv6 *groups,
2804 const struct prefix_ipv6 *sources, bool detail,
2805 json_object *js)
2806{
2807 struct interface *ifp;
2808 json_object *js_vrf;
2809
2810 if (js) {
2811 js_vrf = json_object_new_object();
58971e15 2812 json_object_string_add(js_vrf, "vrf", vrf->name);
d2951219
DL
2813 json_object_object_add(js, vrf->name, js_vrf);
2814 }
2815
2816 FOR_ALL_INTERFACES (vrf, ifp) {
2817 struct pim_interface *pim_ifp;
2818 struct gm_if *gm_ifp;
2819 json_object *js_if = NULL;
2820
2821 if (ifname && strcmp(ifp->name, ifname))
2822 continue;
2823
2824 if (!ifp->info)
2825 continue;
2826 pim_ifp = ifp->info;
2827 if (!pim_ifp->mld)
2828 continue;
2829 gm_ifp = pim_ifp->mld;
2830
2831 if (js) {
2832 js_if = json_object_new_object();
2833 json_object_object_add(js_vrf, ifp->name, js_if);
2834 }
2835
2836 if (!js && !ifname)
2837 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2838
2839 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2840 }
2841}
2842
2843DEFPY(gm_show_interface_joins,
2844 gm_show_interface_joins_cmd,
2845 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2846 SHOW_STR
2847 IPV6_STR
2848 MLD_STR
2849 VRF_FULL_CMD_HELP_STR
2850 "MLD joined groups & sources\n"
2851 INTERFACE_STR
2852 "Interface name\n"
2853 "Limit output to group range\n"
2854 "Show groups covered by this prefix\n"
2855 "Limit output to source range\n"
2856 "Show sources covered by this prefix\n"
2857 "Show details, including tracked receivers\n"
2858 JSON_STR)
2859{
2860 int ret = CMD_SUCCESS;
2861 struct vrf *vrf;
2862 json_object *js = NULL;
2863
2864 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2865 if (ret != CMD_SUCCESS)
2866 return ret;
2867
2868 if (json)
2869 js = json_object_new_object();
2870 else
2871 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2872 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2873
2874 if (vrf)
2875 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2876 js);
2877 else
2878 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2879 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2880 !!detail, js);
2881 return vty_json(vty, js);
2882}
2883
cdc1b770
SG
2884static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
2885{
2886 struct interface *ifp;
2887 struct ttable *tt = NULL;
2888 char *table;
2889 json_object *json = NULL;
2890 json_object *json_iface = NULL;
2891 json_object *json_group = NULL;
2892 json_object *json_groups = NULL;
2893 struct pim_instance *pim = vrf->info;
2894
2895 if (uj) {
2896 json = json_object_new_object();
2897 json_object_int_add(json, "totalGroups", pim->gm_group_count);
2898 json_object_int_add(json, "watermarkLimit",
2899 pim->gm_watermark_limit);
2900 } else {
2901 /* Prepare table. */
2902 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2903 ttable_add_row(tt, "Interface|Group|Version|Uptime");
2904 tt->style.cell.rpad = 2;
2905 tt->style.corner = '+';
2906 ttable_restyle(tt);
2907
2908 vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
2909 vty_out(vty, "Watermark warn limit(%s): %u\n",
2910 pim->gm_watermark_limit ? "Set" : "Not Set",
2911 pim->gm_watermark_limit);
2912 }
2913
2914 /* scan interfaces */
2915 FOR_ALL_INTERFACES (vrf, ifp) {
2916
2917 struct pim_interface *pim_ifp = ifp->info;
2918 struct gm_if *gm_ifp;
2919 struct gm_sg *sg;
2920
2921 if (!pim_ifp)
2922 continue;
2923
2924 gm_ifp = pim_ifp->mld;
2925 if (!gm_ifp)
2926 continue;
2927
2928 /* scan mld groups */
2929 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2930
2931 if (uj) {
2932 json_object_object_get_ex(json, ifp->name,
2933 &json_iface);
2934
2935 if (!json_iface) {
2936 json_iface = json_object_new_object();
2937 json_object_pim_ifp_add(json_iface,
2938 ifp);
2939 json_object_object_add(json, ifp->name,
2940 json_iface);
2941 json_groups = json_object_new_array();
2942 json_object_object_add(json_iface,
2943 "groups",
2944 json_groups);
2945 }
2946
2947 json_group = json_object_new_object();
2948 json_object_string_addf(json_group, "group",
2949 "%pPAs",
2950 &sg->sgaddr.grp);
2951
2952 json_object_int_add(json_group, "version",
2953 pim_ifp->mld_version);
2954 json_object_string_addf(json_group, "uptime",
2955 "%pTVMs", &sg->created);
2956 json_object_array_add(json_groups, json_group);
2957 } else {
2958 ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
2959 ifp->name, &sg->sgaddr.grp,
2960 pim_ifp->mld_version,
2961 &sg->created);
2962 }
2963 } /* scan gm groups */
2964 } /* scan interfaces */
2965
2966 if (uj)
2967 vty_json(vty, json);
2968 else {
2969 /* Dump the generated table. */
2970 table = ttable_dump(tt, "\n");
2971 vty_out(vty, "%s\n", table);
2972 XFREE(MTYPE_TMP, table);
2973 ttable_del(tt);
2974 }
2975}
2976
2977DEFPY(gm_show_mld_groups,
2978 gm_show_mld_groups_cmd,
2979 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2980 SHOW_STR
2981 IPV6_STR
2982 MLD_STR
2983 VRF_FULL_CMD_HELP_STR
2984 MLD_GROUP_STR
2985 JSON_STR)
2986{
2987 int ret = CMD_SUCCESS;
2988 struct vrf *vrf;
2989
2990 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2991 if (ret != CMD_SUCCESS)
2992 return ret;
2993
2994 if (vrf)
2995 gm_show_groups(vty, vrf, !!json);
2996 else
2997 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2998 gm_show_groups(vty, vrf, !!json);
2999
3000 return CMD_SUCCESS;
3001}
3002
5e5034b0
DL
3003DEFPY(gm_debug_show,
3004 gm_debug_show_cmd,
3005 "debug show mld interface IFNAME",
3006 DEBUG_STR
3007 SHOW_STR
a0dfca37 3008 MLD_STR
5e5034b0 3009 INTERFACE_STR
a0dfca37 3010 "interface name\n")
5e5034b0
DL
3011{
3012 struct interface *ifp;
3013 struct pim_interface *pim_ifp;
3014 struct gm_if *gm_ifp;
3015
3016 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
3017 if (!ifp) {
3018 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
3019 return CMD_WARNING;
3020 }
3021
3022 pim_ifp = ifp->info;
3023 if (!pim_ifp) {
3024 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
3025 return CMD_WARNING;
3026 }
3027
3028 gm_ifp = pim_ifp->mld;
3029 if (!gm_ifp) {
3030 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
3031 return CMD_WARNING;
3032 }
3033
3034 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
3035 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
3036 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
3037 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
5e5034b0
DL
3038 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
3039
3040 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
3041 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
3042 int64_t query, expiry;
3043
3044 query = monotime_since(&gm_ifp->pending[i].query, NULL);
3045 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
3046
3047 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
3048 i, query / 1000, expiry / 1000);
3049 }
3050
3051 struct gm_sg *sg;
3052 struct gm_packet_state *pkt;
3053 struct gm_packet_sg *item;
3054 struct gm_subscriber *subscriber;
3055
3056 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
3057 frr_each (gm_sgs, gm_ifp->sgs, sg) {
3058 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
3059 sg->t_sg_expire);
3060
3061 vty_out(vty, "\t @pos:%zu\n",
3062 gm_packet_sg_subs_count(sg->subs_positive));
3063 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
3064 pkt = gm_packet_sg2state(item);
3065
3066 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3067 item->is_src ? "S" : "",
3068 item->is_excl ? "E" : "",
3069 &pkt->subscriber->addr, pkt->subscriber, pkt,
3070 item->offset);
3071
3072 assert(item->sg == sg);
3073 }
3074 vty_out(vty, "\t @neg:%zu\n",
3075 gm_packet_sg_subs_count(sg->subs_negative));
3076 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
3077 pkt = gm_packet_sg2state(item);
3078
3079 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3080 item->is_src ? "S" : "",
3081 item->is_excl ? "E" : "",
3082 &pkt->subscriber->addr, pkt->subscriber, pkt,
3083 item->offset);
3084
3085 assert(item->sg == sg);
3086 }
3087 }
3088
3089 vty_out(vty, "\n%zu subscribers:\n",
3090 gm_subscribers_count(gm_ifp->subscribers));
3091 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
3092 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
3093 subscriber, gm_packets_count(subscriber->packets));
3094
3095 frr_each (gm_packets, subscriber->packets, pkt) {
3096 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
3097 pkt,
3098 monotime_since(&pkt->received, NULL) *
3099 0.000001f,
3100 pkt->n_active, pkt->n_sg);
3101
3102 for (size_t i = 0; i < pkt->n_sg; i++) {
3103 item = pkt->items + i;
3104
3105 vty_out(vty, "\t\t[%zu]", i);
3106
3107 if (!item->sg) {
3108 vty_out(vty, " inactive\n");
3109 continue;
3110 }
3111
3112 vty_out(vty, " %s%s %pSG nE=%u\n",
3113 item->is_src ? "S" : "",
3114 item->is_excl ? "E" : "",
3115 &item->sg->sgaddr, item->n_exclude);
3116 }
3117 }
3118 }
3119
3120 return CMD_SUCCESS;
3121}
3122
3123DEFPY(gm_debug_iface_cfg,
3124 gm_debug_iface_cfg_cmd,
3125 "debug ipv6 mld {"
3126 "robustness (0-7)|"
3127 "query-max-response-time (1-8387584)"
3128 "}",
3129 DEBUG_STR
3130 IPV6_STR
3131 "Multicast Listener Discovery\n"
3132 "QRV\nQRV\n"
3133 "maxresp\nmaxresp\n")
3134{
3135 VTY_DECLVAR_CONTEXT(interface, ifp);
3136 struct pim_interface *pim_ifp;
3137 struct gm_if *gm_ifp;
3138 bool changed = false;
3139
3140 pim_ifp = ifp->info;
3141 if (!pim_ifp) {
3142 vty_out(vty, "%% no PIM state for interface %pSQq\n",
3143 ifp->name);
3144 return CMD_WARNING;
3145 }
3146 gm_ifp = pim_ifp->mld;
3147 if (!gm_ifp) {
3148 vty_out(vty, "%% no MLD state for interface %pSQq\n",
3149 ifp->name);
3150 return CMD_WARNING;
3151 }
3152
3153 if (robustness_str && gm_ifp->cur_qrv != robustness) {
3154 gm_ifp->cur_qrv = robustness;
3155 changed = true;
3156 }
3157 if (query_max_response_time_str &&
3158 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
3159 gm_ifp->cur_max_resp = query_max_response_time;
3160 changed = true;
3161 }
3162
3163 if (changed) {
3164 vty_out(vty, "%% MLD querier config changed, bumping\n");
3165 gm_bump_querier(gm_ifp);
3166 }
3167 return CMD_SUCCESS;
3168}
3169
3170void gm_cli_init(void);
3171
3172void gm_cli_init(void)
3173{
d2951219
DL
3174 install_element(VIEW_NODE, &gm_show_interface_cmd);
3175 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3176 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
cdc1b770 3177 install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
d2951219 3178
5e5034b0
DL
3179 install_element(VIEW_NODE, &gm_debug_show_cmd);
3180 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3181}