]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim6_mld.c
pim6d: Fix missing parameters in "show ipv6 mld interface" command
[mirror_frr.git] / pimd / pim6_mld.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PIMv6 MLD querier
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
5 */
6
7 /*
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
10 *
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
13 */
14
15 #include <zebra.h>
16 #include <netinet/ip6.h>
17
18 #include "lib/memory.h"
19 #include "lib/jhash.h"
20 #include "lib/prefix.h"
21 #include "lib/checksum.h"
22 #include "lib/thread.h"
23 #include "termtable.h"
24
25 #include "pimd/pim6_mld.h"
26 #include "pimd/pim6_mld_protocol.h"
27 #include "pimd/pim_memory.h"
28 #include "pimd/pim_instance.h"
29 #include "pimd/pim_iface.h"
30 #include "pimd/pim6_cmd.h"
31 #include "pimd/pim_cmd_common.h"
32 #include "pimd/pim_util.h"
33 #include "pimd/pim_tib.h"
34 #include "pimd/pimd.h"
35
36 #ifndef IPV6_MULTICAST_ALL
37 #define IPV6_MULTICAST_ALL 29
38 #endif
39
40 DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
41 DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
42 DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
43 DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
44 DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
45 DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
46 DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
47
48 static void gm_t_query(struct thread *t);
49 static void gm_trigger_specific(struct gm_sg *sg);
50 static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
51 struct timeval expire_wait);
52
53 /* shorthand for log messages */
54 #define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56 #define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
58 &pkt_src->sin6_addr
59 #define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
62
63 /* clang-format off */
64 #if PIM_IPV == 6
65 static const pim_addr gm_all_hosts = {
66 .s6_addr = {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
69 },
70 };
71 static const pim_addr gm_all_routers = {
72 .s6_addr = {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
75 },
76 };
77 /* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
79 */
80 static const pim_addr gm_dummy_untracked = {
81 .s6_addr = {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
84 },
85 };
86 #else
87 /* 224.0.0.1 */
88 static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
89 /* 224.0.0.22 */
90 static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
91 static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
92 #endif
93 /* clang-format on */
94
95 #define IPV6_MULTICAST_SCOPE_LINK 2
96
97 static inline uint8_t in6_multicast_scope(const pim_addr *addr)
98 {
99 return addr->s6_addr[1] & 0xf;
100 }
101
102 static inline bool in6_multicast_nofwd(const pim_addr *addr)
103 {
104 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
105 }
106
107 /*
108 * (S,G) -> subscriber,(S,G)
109 */
110
111 static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
112 const struct gm_packet_sg *b)
113 {
114 const struct gm_packet_state *s_a, *s_b;
115
116 s_a = gm_packet_sg2state(a);
117 s_b = gm_packet_sg2state(b);
118 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
119 }
120
121 DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
122 gm_packet_sg_cmp);
123
124 static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
125 enum gm_sub_sense sense,
126 struct gm_subscriber *sub)
127 {
128 struct {
129 struct gm_packet_state hdr;
130 struct gm_packet_sg item;
131 } ref = {
132 /* clang-format off */
133 .hdr = {
134 .subscriber = sub,
135 },
136 .item = {
137 .offset = 0,
138 },
139 /* clang-format on */
140 };
141
142 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
143 }
144
145 /*
146 * interface -> (*,G),pending
147 */
148
149 static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
150 const struct gm_grp_pending *b)
151 {
152 return IPV6_ADDR_CMP(&a->grp, &b->grp);
153 }
154
155 DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
156 gm_grp_pending_cmp);
157
158 /*
159 * interface -> ([S1,S2,...],G),pending
160 */
161
162 static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
163 const struct gm_gsq_pending *b)
164 {
165 if (a->s_bit != b->s_bit)
166 return numcmp(a->s_bit, b->s_bit);
167
168 return IPV6_ADDR_CMP(&a->grp, &b->grp);
169 }
170
171 static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
172 {
173 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
174
175 return jhash(&a->grp, sizeof(a->grp), seed);
176 }
177
178 DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
179 gm_gsq_pending_hash);
180
181 /*
182 * interface -> (S,G)
183 */
184
185 static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
186 {
187 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
188 }
189
190 DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
191
192 static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
193 pim_addr src)
194 {
195 struct gm_sg ref = {};
196
197 ref.sgaddr.grp = grp;
198 ref.sgaddr.src = src;
199 return gm_sgs_find(gm_ifp->sgs, &ref);
200 }
201
202 static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
203 pim_addr src)
204 {
205 struct gm_sg *ret, *prev;
206
207 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
208 ret->sgaddr.grp = grp;
209 ret->sgaddr.src = src;
210 ret->iface = gm_ifp;
211 prev = gm_sgs_add(gm_ifp->sgs, ret);
212
213 if (prev) {
214 XFREE(MTYPE_GM_SG, ret);
215 ret = prev;
216 } else {
217 monotime(&ret->created);
218 gm_packet_sg_subs_init(ret->subs_positive);
219 gm_packet_sg_subs_init(ret->subs_negative);
220 }
221 return ret;
222 }
223
224 /*
225 * interface -> packets, sorted by expiry (because add_tail insert order)
226 */
227
228 DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
229
230 /*
231 * subscriber -> packets
232 */
233
234 DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
235
236 /*
237 * interface -> subscriber
238 */
239
240 static int gm_subscriber_cmp(const struct gm_subscriber *a,
241 const struct gm_subscriber *b)
242 {
243 return IPV6_ADDR_CMP(&a->addr, &b->addr);
244 }
245
246 static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
247 {
248 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
249 }
250
251 DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
252 gm_subscriber_hash);
253
254 static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
255 pim_addr addr)
256 {
257 struct gm_subscriber ref = {}, *ret;
258
259 ref.addr = addr;
260 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
261 if (ret)
262 ret->refcount++;
263 return ret;
264 }
265
266 static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
267 pim_addr addr)
268 {
269 struct gm_subscriber ref = {}, *ret;
270
271 ref.addr = addr;
272 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
273
274 if (!ret) {
275 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
276 ret->iface = gm_ifp;
277 ret->addr = addr;
278 ret->refcount = 1;
279 monotime(&ret->created);
280 gm_packets_init(ret->packets);
281
282 gm_subscribers_add(gm_ifp->subscribers, ret);
283 }
284 return ret;
285 }
286
287 static void gm_subscriber_drop(struct gm_subscriber **subp)
288 {
289 struct gm_subscriber *sub = *subp;
290 struct gm_if *gm_ifp;
291
292 if (!sub)
293 return;
294 gm_ifp = sub->iface;
295
296 *subp = NULL;
297 sub->refcount--;
298
299 if (sub->refcount)
300 return;
301
302 gm_subscribers_del(gm_ifp->subscribers, sub);
303 XFREE(MTYPE_GM_SUBSCRIBER, sub);
304 }
305
306 /****************************************************************************/
307
308 /* bundle query timer values for combined v1/v2 handling */
309 struct gm_query_timers {
310 unsigned int qrv;
311 unsigned int max_resp_ms;
312 unsigned int qqic_ms;
313
314 struct timeval fuzz;
315 struct timeval expire_wait;
316 };
317
318 static void gm_expiry_calc(struct gm_query_timers *timers)
319 {
320 unsigned int expire =
321 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
322 ldiv_t exp_div = ldiv(expire, 1000);
323
324 timers->expire_wait.tv_sec = exp_div.quot;
325 timers->expire_wait.tv_usec = exp_div.rem * 1000;
326 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
327 }
328
329 static void gm_sg_free(struct gm_sg *sg)
330 {
331 /* t_sg_expiry is handled before this is reached */
332 THREAD_OFF(sg->t_sg_query);
333 gm_packet_sg_subs_fini(sg->subs_negative);
334 gm_packet_sg_subs_fini(sg->subs_positive);
335 XFREE(MTYPE_GM_SG, sg);
336 }
337
338 /* clang-format off */
339 static const char *const gm_states[] = {
340 [GM_SG_NOINFO] = "NOINFO",
341 [GM_SG_JOIN] = "JOIN",
342 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
343 [GM_SG_PRUNE] = "PRUNE",
344 [GM_SG_NOPRUNE] = "NOPRUNE",
345 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
346 };
347 /* clang-format on */
348
349 CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
350 /* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
351 * joined. Whether we actually want/need to support this is a separate
352 * question - it is almost never used. In fact this is exactly what RFC5790
353 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
354 */
355
356 static void gm_sg_update(struct gm_sg *sg, bool has_expired)
357 {
358 struct gm_if *gm_ifp = sg->iface;
359 enum gm_sg_state prev, desired;
360 bool new_join;
361 struct gm_sg *grp = NULL;
362
363 if (!pim_addr_is_any(sg->sgaddr.src))
364 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
365 else
366 assert(sg->state != GM_SG_PRUNE);
367
368 if (gm_packet_sg_subs_count(sg->subs_positive)) {
369 desired = GM_SG_JOIN;
370 assert(!sg->t_sg_expire);
371 } else if ((sg->state == GM_SG_JOIN ||
372 sg->state == GM_SG_JOIN_EXPIRING) &&
373 !has_expired)
374 desired = GM_SG_JOIN_EXPIRING;
375 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
376 desired = GM_SG_NOINFO;
377 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
378 gm_packet_sg_subs_count(sg->subs_negative)) {
379 if ((sg->state == GM_SG_NOPRUNE ||
380 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
381 !has_expired)
382 desired = GM_SG_NOPRUNE_EXPIRING;
383 else
384 desired = GM_SG_PRUNE;
385 } else if (gm_packet_sg_subs_count(sg->subs_negative))
386 desired = GM_SG_NOPRUNE;
387 else
388 desired = GM_SG_NOINFO;
389
390 if (desired != sg->state && !gm_ifp->stopping) {
391 if (PIM_DEBUG_GM_EVENTS)
392 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
393 gm_states[desired]);
394
395 if (desired == GM_SG_JOIN_EXPIRING ||
396 desired == GM_SG_NOPRUNE_EXPIRING) {
397 struct gm_query_timers timers;
398
399 timers.qrv = gm_ifp->cur_qrv;
400 timers.max_resp_ms = gm_ifp->cur_max_resp;
401 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
402 timers.fuzz = gm_ifp->cfg_timing_fuzz;
403
404 gm_expiry_calc(&timers);
405 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
406
407 THREAD_OFF(sg->t_sg_query);
408 sg->n_query = gm_ifp->cur_lmqc;
409 sg->query_sbit = false;
410 gm_trigger_specific(sg);
411 }
412 }
413 prev = sg->state;
414 sg->state = desired;
415
416 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
417 new_join = false;
418 else
419 new_join = gm_sg_state_want_join(desired);
420
421 if (new_join && !sg->tib_joined) {
422 /* this will retry if join previously failed */
423 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
424 gm_ifp->ifp, &sg->oil);
425 if (!sg->tib_joined)
426 zlog_warn(
427 "MLD join for %pSG%%%s not propagated into TIB",
428 &sg->sgaddr, gm_ifp->ifp->name);
429 else
430 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
431 gm_ifp->ifp->name);
432
433 } else if (sg->tib_joined && !new_join) {
434 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
435
436 sg->oil = NULL;
437 sg->tib_joined = false;
438 }
439
440 if (desired == GM_SG_NOINFO) {
441 assertf((!sg->t_sg_expire &&
442 !gm_packet_sg_subs_count(sg->subs_positive) &&
443 !gm_packet_sg_subs_count(sg->subs_negative)),
444 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
445 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
446 sg->t_sg_expire, gm_states[prev], gm_states[desired],
447 gm_packet_sg_subs_count(sg->subs_positive),
448 gm_packet_sg_subs_count(sg->subs_negative), grp);
449
450 if (PIM_DEBUG_GM_TRACE)
451 zlog_debug(log_sg(sg, "dropping"));
452
453 gm_sgs_del(gm_ifp->sgs, sg);
454 gm_sg_free(sg);
455 }
456 }
457
458 /****************************************************************************/
459
460 /* the following bunch of functions deals with transferring state from
461 * received packets into gm_packet_state. As a reminder, the querier is
462 * structured to keep all items received in one packet together, since they
463 * will share expiry timers and thus allows efficient handling.
464 */
465
466 static void gm_packet_free(struct gm_packet_state *pkt)
467 {
468 gm_packet_expires_del(pkt->iface->expires, pkt);
469 gm_packets_del(pkt->subscriber->packets, pkt);
470 gm_subscriber_drop(&pkt->subscriber);
471 XFREE(MTYPE_GM_STATE, pkt);
472 }
473
474 static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
475 struct gm_sg *sg, bool is_excl,
476 bool is_src)
477 {
478 struct gm_packet_sg *item;
479
480 assert(pkt->n_active < pkt->n_sg);
481
482 item = &pkt->items[pkt->n_active];
483 item->sg = sg;
484 item->is_excl = is_excl;
485 item->is_src = is_src;
486 item->offset = pkt->n_active;
487
488 pkt->n_active++;
489 return item;
490 }
491
492 static bool gm_packet_sg_drop(struct gm_packet_sg *item)
493 {
494 struct gm_packet_state *pkt;
495 size_t i;
496
497 assert(item->sg);
498
499 pkt = gm_packet_sg2state(item);
500 if (item->sg->most_recent == item)
501 item->sg->most_recent = NULL;
502
503 for (i = 0; i < item->n_exclude; i++) {
504 struct gm_packet_sg *excl_item;
505
506 excl_item = item + 1 + i;
507 if (!excl_item->sg)
508 continue;
509
510 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
511 excl_item->sg = NULL;
512 pkt->n_active--;
513
514 assert(pkt->n_active > 0);
515 }
516
517 if (item->is_excl && item->is_src)
518 gm_packet_sg_subs_del(item->sg->subs_negative, item);
519 else
520 gm_packet_sg_subs_del(item->sg->subs_positive, item);
521 item->sg = NULL;
522 pkt->n_active--;
523
524 if (!pkt->n_active) {
525 gm_packet_free(pkt);
526 return true;
527 }
528 return false;
529 }
530
531 static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
532 {
533 for (size_t i = 0; i < pkt->n_sg; i++) {
534 struct gm_sg *sg = pkt->items[i].sg;
535 bool deleted;
536
537 if (!sg)
538 continue;
539
540 if (trace && PIM_DEBUG_GM_TRACE)
541 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
542 &pkt->subscriber->addr);
543 deleted = gm_packet_sg_drop(&pkt->items[i]);
544
545 gm_sg_update(sg, true);
546 if (deleted)
547 break;
548 }
549 }
550
551 static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
552 struct gm_subscriber *subscriber,
553 pim_addr grp, pim_addr *srcs,
554 size_t n_src, enum gm_sub_sense sense)
555 {
556 struct gm_sg *sg;
557 struct gm_packet_sg *old_src;
558 size_t i;
559
560 for (i = 0; i < n_src; i++) {
561 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
562 if (!sg)
563 continue;
564
565 old_src = gm_packet_sg_find(sg, sense, subscriber);
566 if (!old_src)
567 continue;
568
569 gm_packet_sg_drop(old_src);
570 gm_sg_update(sg, false);
571 }
572 }
573
574 static void gm_sg_expiry_cancel(struct gm_sg *sg)
575 {
576 if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
577 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
578 THREAD_OFF(sg->t_sg_expire);
579 sg->query_sbit = true;
580 }
581
582 /* first pass: process all changes resulting in removal of state:
583 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
584 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
585 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
586 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
587 * note *replacing* state is NOT considered *removing* state here
588 *
589 * everything else is thrown into pkt for creation of state in pass 2
590 */
591 static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
592 struct mld_v2_rec_hdr *rechdr)
593 {
594 /* NB: pkt->subscriber can be NULL here if the subscriber was not
595 * previously seen!
596 */
597 struct gm_subscriber *subscriber = pkt->subscriber;
598 struct gm_sg *grp;
599 struct gm_packet_sg *old_grp = NULL;
600 struct gm_packet_sg *item;
601 size_t n_src = ntohs(rechdr->n_src);
602 size_t j;
603 bool is_excl = false;
604
605 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
606 if (grp && subscriber)
607 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
608
609 assert(old_grp == NULL || old_grp->is_excl);
610
611 switch (rechdr->type) {
612 case MLD_RECTYPE_IS_EXCLUDE:
613 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
614 /* this always replaces or creates state */
615 is_excl = true;
616 if (!grp)
617 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
618
619 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
620 item->n_exclude = n_src;
621
622 /* [EXCL_INCL_SG_NOTE] referenced below
623 *
624 * in theory, we should drop any S,G that the host may have
625 * previously added in INCLUDE mode. In practice, this is both
626 * incredibly rare and entirely irrelevant. It only makes any
627 * difference if an S,G that the host previously had on the
628 * INCLUDE list is now on the blocked list for EXCLUDE, which
629 * we can cover in processing the S,G list in pass2_excl().
630 *
631 * Other S,G from the host are simply left to expire
632 * "naturally" through general expiry.
633 */
634 break;
635
636 case MLD_RECTYPE_IS_INCLUDE:
637 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
638 if (old_grp) {
639 /* INCLUDE has no *,G state, so old_grp here refers to
640 * previous EXCLUDE => delete it
641 */
642 gm_packet_sg_drop(old_grp);
643 gm_sg_update(grp, false);
644 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
645 }
646 break;
647
648 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
649 if (old_grp) {
650 /* remove S,Gs from EXCLUDE, and then we're done */
651 gm_packet_sg_remove_sources(pkt->iface, subscriber,
652 rechdr->grp, rechdr->srcs,
653 n_src, GM_SUB_NEG);
654 return;
655 }
656 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
657 * idential to IS_INCLUDE (because the list of sources in
658 * IS_INCLUDE is not exhaustive)
659 */
660 break;
661
662 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
663 if (old_grp) {
664 /* this is intentionally not implemented because it
665 * would be complicated as hell. we only take the list
666 * of blocked sources from full group state records
667 */
668 return;
669 }
670
671 if (subscriber)
672 gm_packet_sg_remove_sources(pkt->iface, subscriber,
673 rechdr->grp, rechdr->srcs,
674 n_src, GM_SUB_POS);
675 return;
676 }
677
678 for (j = 0; j < n_src; j++) {
679 struct gm_sg *sg;
680
681 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
682 if (!sg)
683 sg = gm_sg_make(pkt->iface, rechdr->grp,
684 rechdr->srcs[j]);
685
686 gm_packet_sg_setup(pkt, sg, is_excl, true);
687 }
688 }
689
690 /* second pass: creating/updating/refreshing state. All the items from the
691 * received packet have already been thrown into gm_packet_state.
692 */
693
694 static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
695 {
696 struct gm_packet_sg *item = &pkt->items[i];
697 struct gm_packet_sg *old = NULL;
698 struct gm_sg *sg = item->sg;
699
700 /* EXCLUDE state was already dropped in pass1 */
701 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
702
703 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
704 if (old)
705 gm_packet_sg_drop(old);
706
707 pkt->n_active++;
708 gm_packet_sg_subs_add(sg->subs_positive, item);
709
710 sg->most_recent = item;
711 gm_sg_expiry_cancel(sg);
712 gm_sg_update(sg, false);
713 }
714
715 static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
716 {
717 struct gm_packet_sg *item = &pkt->items[offs];
718 struct gm_packet_sg *old_grp, *item_dup;
719 struct gm_sg *sg_grp = item->sg;
720 size_t i;
721
722 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
723 if (old_grp) {
724 for (i = 0; i < item->n_exclude; i++) {
725 struct gm_packet_sg *item_src, *old_src;
726
727 item_src = &pkt->items[offs + 1 + i];
728 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
729 pkt->subscriber);
730 if (old_src)
731 gm_packet_sg_drop(old_src);
732
733 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
734 * items left over if the host previously had INCLUDE
735 * mode going. Remove them here if we find any.
736 */
737 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
738 pkt->subscriber);
739 if (old_src)
740 gm_packet_sg_drop(old_src);
741 }
742
743 /* the previous loop has removed the S,G entries which are
744 * still excluded after this update. So anything left on the
745 * old item was previously excluded but is now included
746 * => need to trigger update on S,G
747 */
748 for (i = 0; i < old_grp->n_exclude; i++) {
749 struct gm_packet_sg *old_src;
750 struct gm_sg *old_sg_src;
751
752 old_src = old_grp + 1 + i;
753 old_sg_src = old_src->sg;
754 if (!old_sg_src)
755 continue;
756
757 gm_packet_sg_drop(old_src);
758 gm_sg_update(old_sg_src, false);
759 }
760
761 gm_packet_sg_drop(old_grp);
762 }
763
764 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
765 assert(!item_dup);
766 pkt->n_active++;
767
768 sg_grp->most_recent = item;
769 gm_sg_expiry_cancel(sg_grp);
770
771 for (i = 0; i < item->n_exclude; i++) {
772 struct gm_packet_sg *item_src;
773
774 item_src = &pkt->items[offs + 1 + i];
775 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
776 item_src);
777
778 if (item_dup)
779 item_src->sg = NULL;
780 else {
781 pkt->n_active++;
782 gm_sg_update(item_src->sg, false);
783 }
784 }
785
786 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
787 * to get lower PIM churn/flapping
788 */
789 gm_sg_update(sg_grp, false);
790 }
791
792 CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
793 /* on receiving a query, we need to update our robustness/query interval to
794 * match, so we correctly process group/source specific queries after last
795 * member leaves
796 */
797
798 static void gm_handle_v2_report(struct gm_if *gm_ifp,
799 const struct sockaddr_in6 *pkt_src, char *data,
800 size_t len)
801 {
802 struct mld_v2_report_hdr *hdr;
803 size_t i, n_records, max_entries;
804 struct gm_packet_state *pkt;
805
806 if (len < sizeof(*hdr)) {
807 if (PIM_DEBUG_GM_PACKETS)
808 zlog_debug(log_pkt_src(
809 "malformed MLDv2 report (truncated header)"));
810 gm_ifp->stats.rx_drop_malformed++;
811 return;
812 }
813
814 /* errors after this may at least partially process the packet */
815 gm_ifp->stats.rx_new_report++;
816
817 hdr = (struct mld_v2_report_hdr *)data;
818 data += sizeof(*hdr);
819 len -= sizeof(*hdr);
820
821 /* can't have more *,G and S,G items than there is space for ipv6
822 * addresses, so just use this to allocate temporary buffer
823 */
824 max_entries = len / sizeof(pim_addr);
825 pkt = XCALLOC(MTYPE_GM_STATE,
826 offsetof(struct gm_packet_state, items[max_entries]));
827 pkt->n_sg = max_entries;
828 pkt->iface = gm_ifp;
829 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
830
831 n_records = ntohs(hdr->n_records);
832
833 /* validate & remove state in v2_pass1() */
834 for (i = 0; i < n_records; i++) {
835 struct mld_v2_rec_hdr *rechdr;
836 size_t n_src, record_size;
837
838 if (len < sizeof(*rechdr)) {
839 zlog_warn(log_pkt_src(
840 "malformed MLDv2 report (truncated record header)"));
841 gm_ifp->stats.rx_trunc_report++;
842 break;
843 }
844
845 rechdr = (struct mld_v2_rec_hdr *)data;
846 data += sizeof(*rechdr);
847 len -= sizeof(*rechdr);
848
849 n_src = ntohs(rechdr->n_src);
850 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
851
852 if (len < record_size) {
853 zlog_warn(log_pkt_src(
854 "malformed MLDv2 report (truncated source list)"));
855 gm_ifp->stats.rx_trunc_report++;
856 break;
857 }
858 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
859 zlog_warn(
860 log_pkt_src(
861 "malformed MLDv2 report (invalid group %pI6)"),
862 &rechdr->grp);
863 gm_ifp->stats.rx_trunc_report++;
864 break;
865 }
866
867 data += record_size;
868 len -= record_size;
869
870 gm_handle_v2_pass1(pkt, rechdr);
871 }
872
873 if (!pkt->n_active) {
874 gm_subscriber_drop(&pkt->subscriber);
875 XFREE(MTYPE_GM_STATE, pkt);
876 return;
877 }
878
879 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
880 offsetof(struct gm_packet_state, items[pkt->n_active]));
881 pkt->n_sg = pkt->n_active;
882 pkt->n_active = 0;
883
884 monotime(&pkt->received);
885 if (!pkt->subscriber)
886 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
887 gm_packets_add_tail(pkt->subscriber->packets, pkt);
888 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
889
890 for (i = 0; i < pkt->n_sg; i++)
891 if (!pkt->items[i].is_excl)
892 gm_handle_v2_pass2_incl(pkt, i);
893 else {
894 gm_handle_v2_pass2_excl(pkt, i);
895 i += pkt->items[i].n_exclude;
896 }
897
898 if (pkt->n_active == 0)
899 gm_packet_free(pkt);
900 }
901
902 static void gm_handle_v1_report(struct gm_if *gm_ifp,
903 const struct sockaddr_in6 *pkt_src, char *data,
904 size_t len)
905 {
906 struct mld_v1_pkt *hdr;
907 struct gm_packet_state *pkt;
908 struct gm_sg *grp;
909 struct gm_packet_sg *item;
910 size_t max_entries;
911
912 if (len < sizeof(*hdr)) {
913 if (PIM_DEBUG_GM_PACKETS)
914 zlog_debug(log_pkt_src(
915 "malformed MLDv1 report (truncated)"));
916 gm_ifp->stats.rx_drop_malformed++;
917 return;
918 }
919
920 gm_ifp->stats.rx_old_report++;
921
922 hdr = (struct mld_v1_pkt *)data;
923
924 max_entries = 1;
925 pkt = XCALLOC(MTYPE_GM_STATE,
926 offsetof(struct gm_packet_state, items[max_entries]));
927 pkt->n_sg = max_entries;
928 pkt->iface = gm_ifp;
929 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
930
931 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
932
933 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
934 if (!grp)
935 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
936
937 item = gm_packet_sg_setup(pkt, grp, true, false);
938 item->n_exclude = 0;
939 CPP_NOTICE("set v1-seen timer on grp here");
940
941 /* } */
942
943 /* pass2 will count n_active back up to 1. Also since a v1 report
944 * has exactly 1 group, we can skip the realloc() that v2 needs here.
945 */
946 assert(pkt->n_active == 1);
947 pkt->n_sg = pkt->n_active;
948 pkt->n_active = 0;
949
950 monotime(&pkt->received);
951 if (!pkt->subscriber)
952 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
953 gm_packets_add_tail(pkt->subscriber->packets, pkt);
954 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
955
956 /* pass2 covers installing state & removing old state; all the v1
957 * compat is handled at this point.
958 *
959 * Note that "old state" may be v2; subscribers will switch from v2
960 * reports to v1 reports when the querier changes from v2 to v1. So,
961 * limiting this to v1 would be wrong.
962 */
963 gm_handle_v2_pass2_excl(pkt, 0);
964
965 if (pkt->n_active == 0)
966 gm_packet_free(pkt);
967 }
968
969 static void gm_handle_v1_leave(struct gm_if *gm_ifp,
970 const struct sockaddr_in6 *pkt_src, char *data,
971 size_t len)
972 {
973 struct mld_v1_pkt *hdr;
974 struct gm_subscriber *subscriber;
975 struct gm_sg *grp;
976 struct gm_packet_sg *old_grp;
977
978 if (len < sizeof(*hdr)) {
979 if (PIM_DEBUG_GM_PACKETS)
980 zlog_debug(log_pkt_src(
981 "malformed MLDv1 leave (truncated)"));
982 gm_ifp->stats.rx_drop_malformed++;
983 return;
984 }
985
986 gm_ifp->stats.rx_old_leave++;
987
988 hdr = (struct mld_v1_pkt *)data;
989
990 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
991 if (!subscriber)
992 return;
993
994 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
995
996 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
997 if (grp) {
998 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
999 if (old_grp) {
1000 gm_packet_sg_drop(old_grp);
1001 gm_sg_update(grp, false);
1002 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1003 }
1004 }
1005
1006 /* } */
1007
1008 /* nothing more to do here, pass2 is no-op for leaves */
1009 gm_subscriber_drop(&subscriber);
1010 }
1011
1012 /* for each general query received (or sent), a timer is started to expire
1013 * _everything_ at the appropriate time (including robustness multiplier).
1014 *
1015 * So when this timer hits, all packets - with all of their items - that were
1016 * received *before* the query are aged out, and state updated accordingly.
1017 * Note that when we receive a refresh/update, the previous/old packet is
1018 * already dropped and replaced with a new one, so in normal steady-state
1019 * operation, this timer won't be doing anything.
1020 *
1021 * Additionally, if a subscriber actively leaves a group, that goes through
1022 * its own path too and won't hit this. This is really only triggered when a
1023 * host straight up disappears.
1024 */
1025 static void gm_t_expire(struct thread *t)
1026 {
1027 struct gm_if *gm_ifp = THREAD_ARG(t);
1028 struct gm_packet_state *pkt;
1029
1030 zlog_info(log_ifp("general expiry timer"));
1031
1032 while (gm_ifp->n_pending) {
1033 struct gm_general_pending *pend = gm_ifp->pending;
1034 struct timeval remain;
1035 int64_t remain_ms;
1036
1037 remain_ms = monotime_until(&pend->expiry, &remain);
1038 if (remain_ms > 0) {
1039 if (PIM_DEBUG_GM_EVENTS)
1040 zlog_debug(
1041 log_ifp("next general expiry in %" PRId64 "ms"),
1042 remain_ms / 1000);
1043
1044 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1045 &remain, &gm_ifp->t_expire);
1046 return;
1047 }
1048
1049 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1050 if (timercmp(&pkt->received, &pend->query, >=))
1051 break;
1052
1053 if (PIM_DEBUG_GM_PACKETS)
1054 zlog_debug(log_ifp("expire packet %p"), pkt);
1055 gm_packet_drop(pkt, true);
1056 }
1057
1058 gm_ifp->n_pending--;
1059 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1060 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1061 }
1062
1063 if (PIM_DEBUG_GM_EVENTS)
1064 zlog_debug(log_ifp("next general expiry waiting for query"));
1065 }
1066
1067 /* NB: the receive handlers will also run when sending packets, since we
1068 * receive our own packets back in.
1069 */
1070 static void gm_handle_q_general(struct gm_if *gm_ifp,
1071 struct gm_query_timers *timers)
1072 {
1073 struct timeval now, expiry;
1074 struct gm_general_pending *pend;
1075
1076 monotime(&now);
1077 timeradd(&now, &timers->expire_wait, &expiry);
1078
1079 while (gm_ifp->n_pending) {
1080 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1081
1082 if (timercmp(&pend->expiry, &expiry, <))
1083 break;
1084
1085 /* if we end up here, the last item in pending[] has an expiry
1086 * later than the expiry for this query. But our query time
1087 * (now) is later than that of the item (because, well, that's
1088 * how time works.) This makes this query meaningless since
1089 * it's "supersetted" within the preexisting query
1090 */
1091
1092 if (PIM_DEBUG_GM_TRACE_DETAIL)
1093 zlog_debug(
1094 log_ifp("zapping supersetted general timer %pTVMu"),
1095 &pend->expiry);
1096
1097 gm_ifp->n_pending--;
1098 if (!gm_ifp->n_pending)
1099 THREAD_OFF(gm_ifp->t_expire);
1100 }
1101
1102 /* people might be messing with their configs or something */
1103 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1104 return;
1105
1106 pend = &gm_ifp->pending[gm_ifp->n_pending];
1107 pend->query = now;
1108 pend->expiry = expiry;
1109
1110 if (!gm_ifp->n_pending++) {
1111 if (PIM_DEBUG_GM_TRACE)
1112 zlog_debug(
1113 log_ifp("starting general timer @ 0: %pTVMu"),
1114 &pend->expiry);
1115 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1116 &timers->expire_wait, &gm_ifp->t_expire);
1117 } else if (PIM_DEBUG_GM_TRACE)
1118 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1119 gm_ifp->n_pending, &pend->expiry);
1120 }
1121
1122 static void gm_t_sg_expire(struct thread *t)
1123 {
1124 struct gm_sg *sg = THREAD_ARG(t);
1125 struct gm_if *gm_ifp = sg->iface;
1126 struct gm_packet_sg *item;
1127
1128 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1129 sg->state == GM_SG_NOPRUNE_EXPIRING,
1130 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1131
1132 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1133 /* this will also drop EXCLUDE mode S,G lists together with
1134 * the *,G entry
1135 */
1136 gm_packet_sg_drop(item);
1137
1138 /* subs_negative items are only timed out together with the *,G entry
1139 * since we won't get any reports for a group-and-source query
1140 */
1141 gm_sg_update(sg, true);
1142 }
1143
1144 static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1145 struct timeval ref)
1146 {
1147 struct gm_packet_state *pkt;
1148
1149 if (!sg->most_recent) {
1150 struct gm_packet_state *best_pkt = NULL;
1151 struct gm_packet_sg *item;
1152
1153 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1154 pkt = gm_packet_sg2state(item);
1155
1156 if (!best_pkt ||
1157 timercmp(&pkt->received, &best_pkt->received, >)) {
1158 best_pkt = pkt;
1159 sg->most_recent = item;
1160 }
1161 }
1162 }
1163 if (sg->most_recent) {
1164 struct timeval fuzz;
1165
1166 pkt = gm_packet_sg2state(sg->most_recent);
1167
1168 /* this shouldn't happen on plain old real ethernet segment,
1169 * but on something like a VXLAN or VPLS it is very possible
1170 * that we get a report before the query that triggered it.
1171 * (imagine a triangle scenario with 3 datacenters, it's very
1172 * possible A->B + B->C is faster than A->C due to odd routing)
1173 *
1174 * This makes a little tolerance allowance to handle that case.
1175 */
1176 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1177
1178 if (timercmp(&fuzz, &ref, >))
1179 return true;
1180 }
1181 return false;
1182 }
1183
1184 static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1185 struct timeval expire_wait)
1186 {
1187 struct timeval now;
1188
1189 if (!sg)
1190 return;
1191 if (sg->state == GM_SG_PRUNE)
1192 return;
1193
1194 monotime(&now);
1195 if (gm_sg_check_recent(gm_ifp, sg, now))
1196 return;
1197
1198 if (PIM_DEBUG_GM_TRACE)
1199 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1200
1201 if (sg->t_sg_expire) {
1202 struct timeval remain;
1203
1204 remain = thread_timer_remain(sg->t_sg_expire);
1205 if (timercmp(&remain, &expire_wait, <=))
1206 return;
1207
1208 THREAD_OFF(sg->t_sg_expire);
1209 }
1210
1211 thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1212 &sg->t_sg_expire);
1213 }
1214
1215 static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1216 struct gm_query_timers *timers, pim_addr grp,
1217 const pim_addr *srcs, size_t n_src)
1218 {
1219 struct gm_sg *sg;
1220 size_t i;
1221
1222 for (i = 0; i < n_src; i++) {
1223 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1224 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1225 }
1226 }
1227
1228 static void gm_t_grp_expire(struct thread *t)
1229 {
1230 /* if we're here, that means when we received the group-specific query
1231 * there was one or more active S,G for this group. For *,G the timer
1232 * in sg->t_sg_expire is running separately and gets cancelled when we
1233 * receive a report, so that work is left to gm_t_sg_expire and we
1234 * shouldn't worry about it here.
1235 */
1236 struct gm_grp_pending *pend = THREAD_ARG(t);
1237 struct gm_if *gm_ifp = pend->iface;
1238 struct gm_sg *sg, *sg_start, sg_ref = {};
1239
1240 if (PIM_DEBUG_GM_EVENTS)
1241 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1242
1243 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1244 * could technically be gt to skip a possible *,G
1245 */
1246 sg_ref.sgaddr.grp = pend->grp;
1247 sg_ref.sgaddr.src = PIMADDR_ANY;
1248 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1249
1250 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1251 struct gm_packet_sg *item;
1252
1253 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1254 break;
1255 if (pim_addr_is_any(sg->sgaddr.src))
1256 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1257 continue;
1258 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1259 continue;
1260
1261 /* we may also have a group-source-specific query going on in
1262 * parallel. But if we received nothing for the *,G query,
1263 * the S,G query is kinda irrelevant.
1264 */
1265 THREAD_OFF(sg->t_sg_expire);
1266
1267 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1268 /* this will also drop the EXCLUDE S,G lists */
1269 gm_packet_sg_drop(item);
1270
1271 gm_sg_update(sg, true);
1272 }
1273
1274 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1275 XFREE(MTYPE_GM_GRP_PENDING, pend);
1276 }
1277
1278 static void gm_handle_q_group(struct gm_if *gm_ifp,
1279 struct gm_query_timers *timers, pim_addr grp)
1280 {
1281 struct gm_sg *sg, sg_ref = {};
1282 struct gm_grp_pending *pend, pend_ref = {};
1283
1284 sg_ref.sgaddr.grp = grp;
1285 sg_ref.sgaddr.src = PIMADDR_ANY;
1286 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1287 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1288
1289 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1290 /* we have nothing at all for this group - don't waste RAM */
1291 return;
1292
1293 if (pim_addr_is_any(sg->sgaddr.src)) {
1294 /* actually found *,G entry here */
1295 if (PIM_DEBUG_GM_TRACE)
1296 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1297 &grp);
1298 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1299
1300 sg = gm_sgs_next(gm_ifp->sgs, sg);
1301 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1302 /* no S,G for this group */
1303 return;
1304 }
1305
1306 pend_ref.grp = grp;
1307 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1308
1309 if (pend) {
1310 struct timeval remain;
1311
1312 remain = thread_timer_remain(pend->t_expire);
1313 if (timercmp(&remain, &timers->expire_wait, <=))
1314 return;
1315
1316 THREAD_OFF(pend->t_expire);
1317 } else {
1318 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1319 pend->grp = grp;
1320 pend->iface = gm_ifp;
1321 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1322 }
1323
1324 monotime(&pend->query);
1325 thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
1326 &timers->expire_wait, &pend->t_expire);
1327
1328 if (PIM_DEBUG_GM_TRACE)
1329 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1330 pend->t_expire);
1331 }
1332
1333 static void gm_bump_querier(struct gm_if *gm_ifp)
1334 {
1335 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1336
1337 THREAD_OFF(gm_ifp->t_query);
1338
1339 if (pim_addr_is_any(pim_ifp->ll_lowest))
1340 return;
1341 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1342 return;
1343
1344 gm_ifp->n_startup = gm_ifp->cur_qrv;
1345
1346 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1347 }
1348
1349 static void gm_t_other_querier(struct thread *t)
1350 {
1351 struct gm_if *gm_ifp = THREAD_ARG(t);
1352 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1353
1354 zlog_info(log_ifp("other querier timer expired"));
1355
1356 gm_ifp->querier = pim_ifp->ll_lowest;
1357 gm_ifp->n_startup = gm_ifp->cur_qrv;
1358
1359 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1360 }
1361
1362 static void gm_handle_query(struct gm_if *gm_ifp,
1363 const struct sockaddr_in6 *pkt_src,
1364 pim_addr *pkt_dst, char *data, size_t len)
1365 {
1366 struct mld_v2_query_hdr *hdr;
1367 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1368 struct gm_query_timers timers;
1369 bool general_query;
1370
1371 if (len < sizeof(struct mld_v2_query_hdr) &&
1372 len != sizeof(struct mld_v1_pkt)) {
1373 zlog_warn(log_pkt_src("invalid query size"));
1374 gm_ifp->stats.rx_drop_malformed++;
1375 return;
1376 }
1377
1378 hdr = (struct mld_v2_query_hdr *)data;
1379 general_query = pim_addr_is_any(hdr->grp);
1380
1381 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1382 zlog_warn(log_pkt_src(
1383 "malformed MLDv2 query (invalid group %pI6)"),
1384 &hdr->grp);
1385 gm_ifp->stats.rx_drop_malformed++;
1386 return;
1387 }
1388
1389 if (len >= sizeof(struct mld_v2_query_hdr)) {
1390 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1391
1392 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1393 zlog_warn(log_pkt_src(
1394 "malformed MLDv2 query (truncated source list)"));
1395 gm_ifp->stats.rx_drop_malformed++;
1396 return;
1397 }
1398
1399 if (general_query && src_space) {
1400 zlog_warn(log_pkt_src(
1401 "malformed MLDv2 query (general query with non-empty source list)"));
1402 gm_ifp->stats.rx_drop_malformed++;
1403 return;
1404 }
1405 }
1406
1407 /* accepting queries unicast to us (or addressed to a wrong group)
1408 * can mess up querier election as well as cause us to terminate
1409 * traffic (since after a unicast query no reports will be coming in)
1410 */
1411 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1412 if (pim_addr_is_any(hdr->grp)) {
1413 zlog_warn(
1414 log_pkt_src(
1415 "wrong destination %pPA for general query"),
1416 pkt_dst);
1417 gm_ifp->stats.rx_drop_dstaddr++;
1418 return;
1419 }
1420
1421 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
1422 gm_ifp->stats.rx_drop_dstaddr++;
1423 zlog_warn(
1424 log_pkt_src(
1425 "wrong destination %pPA for group specific query"),
1426 pkt_dst);
1427 return;
1428 }
1429 }
1430
1431 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
1432 if (PIM_DEBUG_GM_EVENTS)
1433 zlog_debug(
1434 log_pkt_src("replacing elected querier %pPA"),
1435 &gm_ifp->querier);
1436
1437 gm_ifp->querier = pkt_src->sin6_addr;
1438 }
1439
1440 if (len == sizeof(struct mld_v1_pkt)) {
1441 timers.qrv = gm_ifp->cur_qrv;
1442 timers.max_resp_ms = hdr->max_resp_code;
1443 timers.qqic_ms = gm_ifp->cur_query_intv;
1444 } else {
1445 timers.qrv = (hdr->flags & 0x7) ?: 8;
1446 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1447 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1448 }
1449 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1450
1451 gm_expiry_calc(&timers);
1452
1453 if (PIM_DEBUG_GM_TRACE_DETAIL)
1454 zlog_debug(
1455 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1456 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1457 &timers.expire_wait);
1458
1459 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1460 unsigned int other_ms;
1461
1462 THREAD_OFF(gm_ifp->t_query);
1463 THREAD_OFF(gm_ifp->t_other_querier);
1464
1465 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1466 thread_add_timer_msec(router->master, gm_t_other_querier,
1467 gm_ifp, other_ms,
1468 &gm_ifp->t_other_querier);
1469 }
1470
1471 if (len == sizeof(struct mld_v1_pkt)) {
1472 if (general_query) {
1473 gm_handle_q_general(gm_ifp, &timers);
1474 gm_ifp->stats.rx_query_old_general++;
1475 } else {
1476 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
1477 gm_ifp->stats.rx_query_old_group++;
1478 }
1479 return;
1480 }
1481
1482 /* v2 query - [S]uppress bit */
1483 if (hdr->flags & 0x8) {
1484 gm_ifp->stats.rx_query_new_sbit++;
1485 return;
1486 }
1487
1488 if (general_query) {
1489 gm_handle_q_general(gm_ifp, &timers);
1490 gm_ifp->stats.rx_query_new_general++;
1491 } else if (!ntohs(hdr->n_src)) {
1492 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
1493 gm_ifp->stats.rx_query_new_group++;
1494 } else {
1495 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1496 ntohs(hdr->n_src));
1497 gm_ifp->stats.rx_query_new_groupsrc++;
1498 }
1499 }
1500
1501 static void gm_rx_process(struct gm_if *gm_ifp,
1502 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1503 void *data, size_t pktlen)
1504 {
1505 struct icmp6_plain_hdr *icmp6 = data;
1506 uint16_t pkt_csum, ref_csum;
1507 struct ipv6_ph ph6 = {
1508 .src = pkt_src->sin6_addr,
1509 .dst = *pkt_dst,
1510 .ulpl = htons(pktlen),
1511 .next_hdr = IPPROTO_ICMPV6,
1512 };
1513
1514 pkt_csum = icmp6->icmp6_cksum;
1515 icmp6->icmp6_cksum = 0;
1516 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1517
1518 if (pkt_csum != ref_csum) {
1519 zlog_warn(
1520 log_pkt_src(
1521 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1522 pkt_dst, pkt_csum, ref_csum);
1523 gm_ifp->stats.rx_drop_csum++;
1524 return;
1525 }
1526
1527 data = (icmp6 + 1);
1528 pktlen -= sizeof(*icmp6);
1529
1530 switch (icmp6->icmp6_type) {
1531 case ICMP6_MLD_QUERY:
1532 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1533 break;
1534 case ICMP6_MLD_V1_REPORT:
1535 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1536 break;
1537 case ICMP6_MLD_V1_DONE:
1538 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1539 break;
1540 case ICMP6_MLD_V2_REPORT:
1541 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1542 break;
1543 }
1544 }
1545
1546 static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1547 uint16_t alert_type)
1548 {
1549 uint8_t *hopopt_end;
1550
1551 if (hopopt_len < 8)
1552 return false;
1553 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1554 return false;
1555
1556 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1557 hopopts += 2;
1558
1559 while (hopopts < hopopt_end) {
1560 if (hopopts[0] == IP6OPT_PAD1) {
1561 hopopts++;
1562 continue;
1563 }
1564
1565 if (hopopts > hopopt_end - 2)
1566 break;
1567 if (hopopts > hopopt_end - 2 - hopopts[1])
1568 break;
1569
1570 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1571 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1572
1573 if (have_type == alert_type)
1574 return true;
1575 }
1576
1577 hopopts += 2 + hopopts[1];
1578 }
1579 return false;
1580 }
1581
1582 static void gm_t_recv(struct thread *t)
1583 {
1584 struct pim_instance *pim = THREAD_ARG(t);
1585 union {
1586 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1587 CMSG_SPACE(256) /* hop options */ +
1588 CMSG_SPACE(sizeof(int)) /* hopcount */];
1589 struct cmsghdr align;
1590 } cmsgbuf;
1591 struct cmsghdr *cmsg;
1592 struct in6_pktinfo *pktinfo = NULL;
1593 uint8_t *hopopts = NULL;
1594 size_t hopopt_len = 0;
1595 int *hoplimit = NULL;
1596 char rxbuf[2048];
1597 struct msghdr mh[1] = {};
1598 struct iovec iov[1];
1599 struct sockaddr_in6 pkt_src[1] = {};
1600 ssize_t nread;
1601 size_t pktlen;
1602
1603 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1604 &pim->t_gm_recv);
1605
1606 iov->iov_base = rxbuf;
1607 iov->iov_len = sizeof(rxbuf);
1608
1609 mh->msg_name = pkt_src;
1610 mh->msg_namelen = sizeof(pkt_src);
1611 mh->msg_control = cmsgbuf.buf;
1612 mh->msg_controllen = sizeof(cmsgbuf.buf);
1613 mh->msg_iov = iov;
1614 mh->msg_iovlen = array_size(iov);
1615 mh->msg_flags = 0;
1616
1617 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
1618 if (nread <= 0) {
1619 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1620 pim->gm_rx_drop_sys++;
1621 return;
1622 }
1623
1624 if ((size_t)nread > sizeof(rxbuf)) {
1625 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1626 iov->iov_len = nread;
1627 }
1628 nread = recvmsg(pim->gm_socket, mh, 0);
1629 if (nread <= 0) {
1630 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1631 pim->gm_rx_drop_sys++;
1632 goto out_free;
1633 }
1634
1635 struct interface *ifp;
1636
1637 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1638 if (!ifp || !ifp->info)
1639 goto out_free;
1640
1641 struct pim_interface *pim_ifp = ifp->info;
1642 struct gm_if *gm_ifp = pim_ifp->mld;
1643
1644 if (!gm_ifp)
1645 goto out_free;
1646
1647 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1648 if (cmsg->cmsg_level != SOL_IPV6)
1649 continue;
1650
1651 switch (cmsg->cmsg_type) {
1652 case IPV6_PKTINFO:
1653 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1654 break;
1655 case IPV6_HOPOPTS:
1656 hopopts = CMSG_DATA(cmsg);
1657 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1658 break;
1659 case IPV6_HOPLIMIT:
1660 hoplimit = (int *)CMSG_DATA(cmsg);
1661 break;
1662 }
1663 }
1664
1665 if (!pktinfo || !hoplimit) {
1666 zlog_err(log_ifp(
1667 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
1668 pim->gm_rx_drop_sys++;
1669 goto out_free;
1670 }
1671
1672 if (*hoplimit != 1) {
1673 zlog_err(log_pkt_src("packet with hop limit != 1"));
1674 /* spoofing attempt => count on srcaddr counter */
1675 gm_ifp->stats.rx_drop_srcaddr++;
1676 goto out_free;
1677 }
1678
1679 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1680 zlog_err(log_pkt_src(
1681 "packet without IPv6 Router Alert MLD option"));
1682 gm_ifp->stats.rx_drop_ra++;
1683 goto out_free;
1684 }
1685
1686 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1687 /* reports from :: happen in normal operation for DAD, so
1688 * don't spam log messages about this
1689 */
1690 goto out_free;
1691
1692 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1693 zlog_warn(log_pkt_src("packet from invalid source address"));
1694 gm_ifp->stats.rx_drop_srcaddr++;
1695 goto out_free;
1696 }
1697
1698 pktlen = nread;
1699 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1700 zlog_warn(log_pkt_src("truncated packet"));
1701 gm_ifp->stats.rx_drop_malformed++;
1702 goto out_free;
1703 }
1704
1705 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1706 pktlen);
1707
1708 out_free:
1709 if (iov->iov_base != rxbuf)
1710 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1711 }
1712
1713 static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1714 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1715 {
1716 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1717 struct sockaddr_in6 dstaddr = {
1718 .sin6_family = AF_INET6,
1719 .sin6_scope_id = gm_ifp->ifp->ifindex,
1720 };
1721 struct {
1722 struct icmp6_plain_hdr hdr;
1723 struct mld_v2_query_hdr v2_query;
1724 } query = {
1725 /* clang-format off */
1726 .hdr = {
1727 .icmp6_type = ICMP6_MLD_QUERY,
1728 .icmp6_code = 0,
1729 },
1730 .v2_query = {
1731 .grp = grp,
1732 },
1733 /* clang-format on */
1734 };
1735 struct ipv6_ph ph6 = {
1736 .src = pim_ifp->ll_lowest,
1737 .ulpl = htons(sizeof(query)),
1738 .next_hdr = IPPROTO_ICMPV6,
1739 };
1740 union {
1741 char buf[CMSG_SPACE(8) /* hop options */ +
1742 CMSG_SPACE(sizeof(struct in6_pktinfo))];
1743 struct cmsghdr align;
1744 } cmsg = {};
1745 struct cmsghdr *cmh;
1746 struct msghdr mh[1] = {};
1747 struct iovec iov[3];
1748 size_t iov_len;
1749 ssize_t ret, expect_ret;
1750 uint8_t *dp;
1751 struct in6_pktinfo *pktinfo;
1752
1753 if (if_is_loopback(gm_ifp->ifp)) {
1754 /* Linux is a bit odd with multicast on loopback */
1755 ph6.src = in6addr_loopback;
1756 dstaddr.sin6_addr = in6addr_loopback;
1757 } else if (pim_addr_is_any(grp))
1758 dstaddr.sin6_addr = gm_all_hosts;
1759 else
1760 dstaddr.sin6_addr = grp;
1761
1762 query.v2_query.max_resp_code =
1763 mld_max_resp_encode(gm_ifp->cur_max_resp);
1764 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1765 if (s_bit)
1766 query.v2_query.flags |= 0x08;
1767 query.v2_query.qqic =
1768 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1769 query.v2_query.n_src = htons(n_srcs);
1770
1771 ph6.dst = dstaddr.sin6_addr;
1772
1773 /* ph6 not included in sendmsg */
1774 iov[0].iov_base = &ph6;
1775 iov[0].iov_len = sizeof(ph6);
1776 iov[1].iov_base = &query;
1777 if (gm_ifp->cur_version == GM_MLDV1) {
1778 iov_len = 2;
1779 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1780 } else if (!n_srcs) {
1781 iov_len = 2;
1782 iov[1].iov_len = sizeof(query);
1783 } else {
1784 iov[1].iov_len = sizeof(query);
1785 iov[2].iov_base = (void *)srcs;
1786 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1787 iov_len = 3;
1788 }
1789
1790 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1791
1792 if (PIM_DEBUG_GM_PACKETS)
1793 zlog_debug(
1794 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1795 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1796
1797 mh->msg_name = &dstaddr;
1798 mh->msg_namelen = sizeof(dstaddr);
1799 mh->msg_iov = iov + 1;
1800 mh->msg_iovlen = iov_len - 1;
1801 mh->msg_control = &cmsg;
1802 mh->msg_controllen = sizeof(cmsg.buf);
1803
1804 cmh = CMSG_FIRSTHDR(mh);
1805 cmh->cmsg_level = IPPROTO_IPV6;
1806 cmh->cmsg_type = IPV6_HOPOPTS;
1807 cmh->cmsg_len = CMSG_LEN(8);
1808 dp = CMSG_DATA(cmh);
1809 *dp++ = 0; /* next header */
1810 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1811 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1812 *dp++ = 2; /* length */
1813 *dp++ = 0; /* value (2 bytes) */
1814 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1815 *dp++ = 0; /* pad0 */
1816 *dp++ = 0; /* pad0 */
1817
1818 cmh = CMSG_NXTHDR(mh, cmh);
1819 cmh->cmsg_level = IPPROTO_IPV6;
1820 cmh->cmsg_type = IPV6_PKTINFO;
1821 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1822 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1823 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1824 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1825
1826 expect_ret = iov[1].iov_len;
1827 if (iov_len == 3)
1828 expect_ret += iov[2].iov_len;
1829
1830 frr_with_privs (&pimd_privs) {
1831 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
1832 }
1833
1834 if (ret != expect_ret) {
1835 zlog_warn(log_ifp("failed to send query: %m"));
1836 gm_ifp->stats.tx_query_fail++;
1837 } else {
1838 if (gm_ifp->cur_version == GM_MLDV1) {
1839 if (pim_addr_is_any(grp))
1840 gm_ifp->stats.tx_query_old_general++;
1841 else
1842 gm_ifp->stats.tx_query_old_group++;
1843 } else {
1844 if (pim_addr_is_any(grp))
1845 gm_ifp->stats.tx_query_new_general++;
1846 else if (!n_srcs)
1847 gm_ifp->stats.tx_query_new_group++;
1848 else
1849 gm_ifp->stats.tx_query_new_groupsrc++;
1850 }
1851 }
1852 }
1853
1854 static void gm_t_query(struct thread *t)
1855 {
1856 struct gm_if *gm_ifp = THREAD_ARG(t);
1857 unsigned int timer_ms = gm_ifp->cur_query_intv;
1858
1859 if (gm_ifp->n_startup) {
1860 timer_ms /= 4;
1861 gm_ifp->n_startup--;
1862 }
1863
1864 thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1865 &gm_ifp->t_query);
1866
1867 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1868 }
1869
1870 static void gm_t_sg_query(struct thread *t)
1871 {
1872 struct gm_sg *sg = THREAD_ARG(t);
1873
1874 gm_trigger_specific(sg);
1875 }
1876
1877 /* S,G specific queries (triggered by a member leaving) get a little slack
1878 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1879 */
1880 static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1881 {
1882 struct gm_if *gm_ifp = pend_gsq->iface;
1883
1884 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1885 pend_gsq->s_bit);
1886
1887 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1888 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1889 }
1890
1891 static void gm_t_gsq_pend(struct thread *t)
1892 {
1893 struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
1894
1895 gm_send_specific(pend_gsq);
1896 }
1897
1898 static void gm_trigger_specific(struct gm_sg *sg)
1899 {
1900 struct gm_if *gm_ifp = sg->iface;
1901 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1902 struct gm_gsq_pending *pend_gsq, ref = {};
1903
1904 sg->n_query--;
1905 if (sg->n_query)
1906 thread_add_timer_msec(router->master, gm_t_sg_query, sg,
1907 gm_ifp->cur_query_intv_trig,
1908 &sg->t_sg_query);
1909
1910 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1911 return;
1912 if (gm_ifp->pim->gm_socket == -1)
1913 return;
1914
1915 if (PIM_DEBUG_GM_TRACE)
1916 zlog_debug(log_sg(sg, "triggered query"));
1917
1918 if (pim_addr_is_any(sg->sgaddr.src)) {
1919 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1920 return;
1921 }
1922
1923 ref.grp = sg->sgaddr.grp;
1924 ref.s_bit = sg->query_sbit;
1925
1926 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1927 if (!pend_gsq) {
1928 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1929 pend_gsq->grp = sg->sgaddr.grp;
1930 pend_gsq->s_bit = sg->query_sbit;
1931 pend_gsq->iface = gm_ifp;
1932 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1933
1934 thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1935 &gm_ifp->cfg_timing_fuzz,
1936 &pend_gsq->t_send);
1937 }
1938
1939 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1940
1941 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1942 pend_gsq->n_src++;
1943
1944 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1945 THREAD_OFF(pend_gsq->t_send);
1946 gm_send_specific(pend_gsq);
1947 pend_gsq = NULL;
1948 }
1949 }
1950
1951 static void gm_vrf_socket_incref(struct pim_instance *pim)
1952 {
1953 struct vrf *vrf = pim->vrf;
1954 int ret, intval;
1955 struct icmp6_filter filter[1];
1956
1957 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1958 return;
1959
1960 ICMP6_FILTER_SETBLOCKALL(filter);
1961 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1962 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1963 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1964 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1965
1966 frr_with_privs (&pimd_privs) {
1967 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1968 vrf->vrf_id, vrf->name);
1969 if (pim->gm_socket < 0) {
1970 zlog_err("(VRF %s) could not create MLD socket: %m",
1971 vrf->name);
1972 return;
1973 }
1974
1975 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
1976 filter, sizeof(filter));
1977 if (ret)
1978 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1979 vrf->name);
1980
1981 intval = 1;
1982 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
1983 &intval, sizeof(intval));
1984 if (ret)
1985 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1986 vrf->name);
1987
1988 intval = 1;
1989 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
1990 &intval, sizeof(intval));
1991 if (ret)
1992 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
1993 vrf->name);
1994
1995 intval = 1;
1996 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
1997 &intval, sizeof(intval));
1998 if (ret)
1999 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2000 vrf->name);
2001
2002 intval = 1;
2003 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
2004 &intval, sizeof(intval));
2005 if (ret)
2006 zlog_err(
2007 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2008 vrf->name);
2009
2010 intval = 1;
2011 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
2012 &intval, sizeof(intval));
2013 if (ret)
2014 zlog_err(
2015 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2016 vrf->name);
2017
2018 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2019 * RX filtering in Linux. It only means "receive all groups
2020 * that something on the system has joined". To actually
2021 * receive *all* MLD packets - which is what we need -
2022 * multicast routing must be enabled on the interface. And
2023 * this only works for MLD packets specifically.
2024 *
2025 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2026 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2027 *
2028 * Also note that the code there explicitly checks for the IPv6
2029 * router alert MLD option (which is required by the RFC to be
2030 * on MLD packets.) That implies trying to support hosts which
2031 * erroneously don't add that option is just not possible.
2032 */
2033 intval = 1;
2034 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
2035 &intval, sizeof(intval));
2036 if (ret)
2037 zlog_info(
2038 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2039 vrf->name);
2040 }
2041
2042 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2043 &pim->t_gm_recv);
2044 }
2045
2046 static void gm_vrf_socket_decref(struct pim_instance *pim)
2047 {
2048 if (--pim->gm_socket_if_count)
2049 return;
2050
2051 THREAD_OFF(pim->t_gm_recv);
2052 close(pim->gm_socket);
2053 pim->gm_socket = -1;
2054 }
2055
2056 static void gm_start(struct interface *ifp)
2057 {
2058 struct pim_interface *pim_ifp = ifp->info;
2059 struct gm_if *gm_ifp;
2060
2061 assert(pim_ifp);
2062 assert(pim_ifp->pim);
2063 assert(pim_ifp->mroute_vif_index >= 0);
2064 assert(!pim_ifp->mld);
2065
2066 gm_vrf_socket_incref(pim_ifp->pim);
2067
2068 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2069 gm_ifp->ifp = ifp;
2070 pim_ifp->mld = gm_ifp;
2071 gm_ifp->pim = pim_ifp->pim;
2072 monotime(&gm_ifp->started);
2073
2074 zlog_info(log_ifp("starting MLD"));
2075
2076 if (pim_ifp->mld_version == 1)
2077 gm_ifp->cur_version = GM_MLDV1;
2078 else
2079 gm_ifp->cur_version = GM_MLDV2;
2080
2081 gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
2082 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
2083 gm_ifp->cur_query_intv_trig =
2084 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2085 gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
2086 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2087
2088 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2089 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2090
2091 gm_sgs_init(gm_ifp->sgs);
2092 gm_subscribers_init(gm_ifp->subscribers);
2093 gm_packet_expires_init(gm_ifp->expires);
2094 gm_grp_pends_init(gm_ifp->grp_pends);
2095 gm_gsq_pends_init(gm_ifp->gsq_pends);
2096
2097 frr_with_privs (&pimd_privs) {
2098 struct ipv6_mreq mreq;
2099 int ret;
2100
2101 /* all-MLDv2 group */
2102 mreq.ipv6mr_multiaddr = gm_all_routers;
2103 mreq.ipv6mr_interface = ifp->ifindex;
2104 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2105 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
2106 if (ret)
2107 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2108 ifp->name);
2109 }
2110 }
2111
2112 void gm_group_delete(struct gm_if *gm_ifp)
2113 {
2114 struct gm_sg *sg;
2115 struct gm_packet_state *pkt;
2116 struct gm_grp_pending *pend_grp;
2117 struct gm_gsq_pending *pend_gsq;
2118 struct gm_subscriber *subscriber;
2119
2120 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2121 gm_packet_drop(pkt, false);
2122
2123 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2124 THREAD_OFF(pend_grp->t_expire);
2125 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2126 }
2127
2128 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2129 THREAD_OFF(pend_gsq->t_send);
2130 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2131 }
2132
2133 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2134 THREAD_OFF(sg->t_sg_expire);
2135 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2136 &sg->sgaddr);
2137 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2138 &sg->sgaddr);
2139
2140 gm_sg_free(sg);
2141 }
2142 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2143 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2144 &subscriber->addr);
2145 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2146 }
2147 }
2148
2149 void gm_ifp_teardown(struct interface *ifp)
2150 {
2151 struct pim_interface *pim_ifp = ifp->info;
2152 struct gm_if *gm_ifp;
2153
2154 if (!pim_ifp || !pim_ifp->mld)
2155 return;
2156
2157 gm_ifp = pim_ifp->mld;
2158 gm_ifp->stopping = true;
2159 if (PIM_DEBUG_GM_EVENTS)
2160 zlog_debug(log_ifp("MLD stop"));
2161
2162 THREAD_OFF(gm_ifp->t_query);
2163 THREAD_OFF(gm_ifp->t_other_querier);
2164 THREAD_OFF(gm_ifp->t_expire);
2165
2166 frr_with_privs (&pimd_privs) {
2167 struct ipv6_mreq mreq;
2168 int ret;
2169
2170 /* all-MLDv2 group */
2171 mreq.ipv6mr_multiaddr = gm_all_routers;
2172 mreq.ipv6mr_interface = ifp->ifindex;
2173 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2174 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2175 if (ret)
2176 zlog_err(
2177 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2178 ifp->name);
2179 }
2180
2181 gm_vrf_socket_decref(gm_ifp->pim);
2182
2183 gm_group_delete(gm_ifp);
2184
2185 gm_grp_pends_fini(gm_ifp->grp_pends);
2186 gm_packet_expires_fini(gm_ifp->expires);
2187 gm_subscribers_fini(gm_ifp->subscribers);
2188 gm_sgs_fini(gm_ifp->sgs);
2189
2190 XFREE(MTYPE_GM_IFACE, gm_ifp);
2191 pim_ifp->mld = NULL;
2192 }
2193
2194 static void gm_update_ll(struct interface *ifp)
2195 {
2196 struct pim_interface *pim_ifp = ifp->info;
2197 struct gm_if *gm_ifp = pim_ifp->mld;
2198 bool was_querier;
2199
2200 was_querier =
2201 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2202 !pim_addr_is_any(gm_ifp->querier);
2203
2204 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2205 if (was_querier)
2206 gm_ifp->querier = pim_ifp->ll_lowest;
2207 THREAD_OFF(gm_ifp->t_query);
2208
2209 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2210 if (was_querier)
2211 zlog_info(log_ifp(
2212 "lost link-local address, stopping querier"));
2213 return;
2214 }
2215
2216 if (was_querier)
2217 zlog_info(log_ifp("new link-local %pPA while querier"),
2218 &gm_ifp->cur_ll_lowest);
2219 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2220 pim_addr_is_any(gm_ifp->querier)) {
2221 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2222 &gm_ifp->cur_ll_lowest);
2223 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2224 } else
2225 return;
2226
2227 gm_ifp->n_startup = gm_ifp->cur_qrv;
2228 thread_execute(router->master, gm_t_query, gm_ifp, 0);
2229 }
2230
2231 void gm_ifp_update(struct interface *ifp)
2232 {
2233 struct pim_interface *pim_ifp = ifp->info;
2234 struct gm_if *gm_ifp;
2235 bool changed = false;
2236
2237 if (!pim_ifp)
2238 return;
2239 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2240 pim_ifp->mroute_vif_index < 0) {
2241 gm_ifp_teardown(ifp);
2242 return;
2243 }
2244
2245 /*
2246 * If ipv6 mld is not enabled on interface, do not start mld activites.
2247 */
2248 if (!pim_ifp->gm_enable)
2249 return;
2250
2251 if (!pim_ifp->mld) {
2252 changed = true;
2253 gm_start(ifp);
2254 }
2255
2256 gm_ifp = pim_ifp->mld;
2257 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2258 gm_update_ll(ifp);
2259
2260 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2261
2262 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2263 gm_ifp->cur_query_intv = cfg_query_intv;
2264 changed = true;
2265 }
2266
2267 unsigned int cfg_query_intv_trig =
2268 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2269
2270 if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
2271 gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
2272 changed = true;
2273 }
2274
2275 unsigned int cfg_max_response =
2276 pim_ifp->gm_query_max_response_time_dsec * 100;
2277
2278 if (gm_ifp->cur_max_resp != cfg_max_response)
2279 gm_ifp->cur_max_resp = cfg_max_response;
2280
2281 if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
2282 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2283
2284 enum gm_version cfg_version;
2285
2286 if (pim_ifp->mld_version == 1)
2287 cfg_version = GM_MLDV1;
2288 else
2289 cfg_version = GM_MLDV2;
2290 if (gm_ifp->cur_version != cfg_version) {
2291 gm_ifp->cur_version = cfg_version;
2292 changed = true;
2293 }
2294
2295 if (changed) {
2296 if (PIM_DEBUG_GM_TRACE)
2297 zlog_debug(log_ifp(
2298 "MLD querier config changed, querying"));
2299 gm_bump_querier(gm_ifp);
2300 }
2301 }
2302
2303 /*
2304 * CLI (show commands only)
2305 */
2306
2307 #include "lib/command.h"
2308
2309 #include "pimd/pim6_mld_clippy.c"
2310
2311 static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2312 int *err)
2313 {
2314 struct vrf *ret;
2315
2316 if (!vrf_str)
2317 return vrf_lookup_by_id(VRF_DEFAULT);
2318 if (!strcmp(vrf_str, "all"))
2319 return NULL;
2320 ret = vrf_lookup_by_name(vrf_str);
2321 if (ret)
2322 return ret;
2323
2324 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2325 *err = CMD_WARNING;
2326 return NULL;
2327 }
2328
2329 static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2330 {
2331 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2332 struct gm_if *gm_ifp;
2333 bool querier;
2334 size_t i;
2335
2336 if (!pim_ifp) {
2337 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2338 return;
2339 }
2340
2341 gm_ifp = pim_ifp->mld;
2342 if (!gm_ifp) {
2343 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2344 return;
2345 }
2346
2347 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2348
2349 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2350 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2351 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2352 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2353 querier ? " (this system)" : "");
2354 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2355 vty_out(vty, " Other querier timer: %pTH\n",
2356 gm_ifp->t_other_querier);
2357 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2358 vty_out(vty, " Query interval: %ums\n",
2359 gm_ifp->cur_query_intv);
2360 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2361 vty_out(vty, " Last member query intv.: %ums\n",
2362 gm_ifp->cur_query_intv_trig);
2363 vty_out(vty, " %u expiry timers from general queries:\n",
2364 gm_ifp->n_pending);
2365 for (i = 0; i < gm_ifp->n_pending; i++) {
2366 struct gm_general_pending *p = &gm_ifp->pending[i];
2367
2368 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2369 &p->query, &p->expiry);
2370 }
2371 vty_out(vty, " %zu expiry timers from *,G queries\n",
2372 gm_grp_pends_count(gm_ifp->grp_pends));
2373 vty_out(vty, " %zu expiry timers from S,G queries\n",
2374 gm_gsq_pends_count(gm_ifp->gsq_pends));
2375 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2376 gm_sgs_count(gm_ifp->sgs),
2377 gm_subscribers_count(gm_ifp->subscribers),
2378 gm_packet_expires_count(gm_ifp->expires));
2379 vty_out(vty, "\n");
2380 }
2381
2382 static void gm_show_if_one(struct vty *vty, struct interface *ifp,
2383 json_object *js_if, struct ttable *tt)
2384 {
2385 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2386 struct gm_if *gm_ifp = pim_ifp->mld;
2387 bool querier;
2388
2389 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2390
2391 if (js_if) {
2392 json_object_string_add(js_if, "name", ifp->name);
2393 json_object_string_addf(js_if, "address", "%pPA",
2394 &pim_ifp->primary_address);
2395 json_object_string_add(js_if, "state", "up");
2396 json_object_string_addf(js_if, "version", "%d",
2397 gm_ifp->cur_version);
2398 json_object_string_addf(js_if, "upTime", "%pTVMs",
2399 &gm_ifp->started);
2400 json_object_boolean_add(js_if, "querier", querier);
2401 json_object_string_addf(js_if, "querierIp", "%pPA",
2402 &gm_ifp->querier);
2403 if (querier)
2404 json_object_string_addf(js_if, "queryTimer", "%pTH",
2405 gm_ifp->t_query);
2406 else
2407 json_object_string_addf(js_if, "otherQuerierTimer",
2408 "%pTH",
2409 gm_ifp->t_other_querier);
2410 json_object_int_add(js_if, "timerRobustnessValue",
2411 gm_ifp->cur_qrv);
2412 json_object_int_add(js_if, "lastMemberQueryCount",
2413 gm_ifp->cur_lmqc);
2414 json_object_int_add(js_if, "timerQueryIntervalMsec",
2415 gm_ifp->cur_query_intv);
2416 json_object_int_add(js_if, "timerQueryResponseTimerMsec",
2417 gm_ifp->cur_max_resp);
2418 json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
2419 gm_ifp->cur_query_intv_trig);
2420 } else {
2421 ttable_add_row(tt, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
2422 ifp->name, "up", &pim_ifp->primary_address,
2423 gm_ifp->cur_version, querier ? "local" : "other",
2424 &gm_ifp->querier, gm_ifp->t_query,
2425 &gm_ifp->started);
2426 }
2427 }
2428
2429 static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2430 bool detail, json_object *js)
2431 {
2432 struct interface *ifp;
2433 json_object *js_vrf = NULL;
2434 struct pim_interface *pim_ifp;
2435 struct ttable *tt = NULL;
2436 char *table = NULL;
2437
2438 if (js) {
2439 js_vrf = json_object_new_object();
2440 json_object_object_add(js, vrf->name, js_vrf);
2441 }
2442
2443 if (!js && !detail) {
2444 /* Prepare table. */
2445 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2446 ttable_add_row(
2447 tt,
2448 "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
2449 tt->style.cell.rpad = 2;
2450 tt->style.corner = '+';
2451 ttable_restyle(tt);
2452 }
2453
2454 FOR_ALL_INTERFACES (vrf, ifp) {
2455 json_object *js_if = NULL;
2456
2457 if (ifname && strcmp(ifp->name, ifname))
2458 continue;
2459 if (detail && !js) {
2460 gm_show_if_one_detail(vty, ifp);
2461 continue;
2462 }
2463
2464 pim_ifp = ifp->info;
2465
2466 if (!pim_ifp || !pim_ifp->mld)
2467 continue;
2468
2469 if (js) {
2470 js_if = json_object_new_object();
2471 json_object_object_add(js_vrf, ifp->name, js_if);
2472 }
2473
2474 gm_show_if_one(vty, ifp, js_if, tt);
2475 }
2476
2477 /* Dump the generated table. */
2478 if (!js && !detail) {
2479 table = ttable_dump(tt, "\n");
2480 vty_out(vty, "%s\n", table);
2481 XFREE(MTYPE_TMP, table);
2482 ttable_del(tt);
2483 }
2484 }
2485
2486 static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2487 bool detail, json_object *js)
2488 {
2489 if (vrf)
2490 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2491 else
2492 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2493 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2494 }
2495
2496 DEFPY(gm_show_interface,
2497 gm_show_interface_cmd,
2498 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
2499 SHOW_STR
2500 IPV6_STR
2501 MLD_STR
2502 VRF_FULL_CMD_HELP_STR
2503 "MLD interface information\n"
2504 "Interface name\n"
2505 "Detailed output\n"
2506 JSON_STR)
2507 {
2508 int ret = CMD_SUCCESS;
2509 struct vrf *vrf;
2510 json_object *js = NULL;
2511
2512 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2513 if (ret != CMD_SUCCESS)
2514 return ret;
2515
2516 if (json)
2517 js = json_object_new_object();
2518 gm_show_if(vty, vrf, ifname, !!detail, js);
2519 return vty_json(vty, js);
2520 }
2521
2522 static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2523 json_object *js_if)
2524 {
2525 struct gm_if_stats *stats = &gm_ifp->stats;
2526 /* clang-format off */
2527 struct {
2528 const char *text;
2529 const char *js_key;
2530 uint64_t *val;
2531 } *item, items[] = {
2532 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2533 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2534 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2535
2536 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2537 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2538 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2539 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2540 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2541 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2542
2543 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2544 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2545 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2546 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2547 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2548 { "TX errors", "txErrors", &stats->tx_query_fail },
2549
2550 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2551 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2552 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2553 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2554 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2555 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2556 };
2557 /* clang-format on */
2558
2559 for (item = items; item < items + array_size(items); item++) {
2560 if (js_if)
2561 json_object_int_add(js_if, item->js_key, *item->val);
2562 else
2563 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2564 *item->val);
2565 }
2566 }
2567
2568 static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2569 const char *ifname, json_object *js)
2570 {
2571 struct interface *ifp;
2572 json_object *js_vrf;
2573
2574 if (js) {
2575 js_vrf = json_object_new_object();
2576 json_object_object_add(js, vrf->name, js_vrf);
2577 }
2578
2579 FOR_ALL_INTERFACES (vrf, ifp) {
2580 struct pim_interface *pim_ifp;
2581 struct gm_if *gm_ifp;
2582 json_object *js_if = NULL;
2583
2584 if (ifname && strcmp(ifp->name, ifname))
2585 continue;
2586
2587 if (!ifp->info)
2588 continue;
2589 pim_ifp = ifp->info;
2590 if (!pim_ifp->mld)
2591 continue;
2592 gm_ifp = pim_ifp->mld;
2593
2594 if (js) {
2595 js_if = json_object_new_object();
2596 json_object_object_add(js_vrf, ifp->name, js_if);
2597 } else {
2598 vty_out(vty, "Interface: %s\n", ifp->name);
2599 }
2600 gm_show_stats_one(vty, gm_ifp, js_if);
2601 if (!js)
2602 vty_out(vty, "\n");
2603 }
2604 }
2605
2606 DEFPY(gm_show_interface_stats,
2607 gm_show_interface_stats_cmd,
2608 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2609 SHOW_STR
2610 IPV6_STR
2611 MLD_STR
2612 VRF_FULL_CMD_HELP_STR
2613 "MLD statistics\n"
2614 INTERFACE_STR
2615 "Interface name\n"
2616 JSON_STR)
2617 {
2618 int ret = CMD_SUCCESS;
2619 struct vrf *vrf;
2620 json_object *js = NULL;
2621
2622 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2623 if (ret != CMD_SUCCESS)
2624 return ret;
2625
2626 if (json)
2627 js = json_object_new_object();
2628
2629 if (vrf)
2630 gm_show_stats_vrf(vty, vrf, ifname, js);
2631 else
2632 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2633 gm_show_stats_vrf(vty, vrf, ifname, js);
2634 return vty_json(vty, js);
2635 }
2636
2637 static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2638 const struct prefix_ipv6 *groups,
2639 const struct prefix_ipv6 *sources, bool detail,
2640 json_object *js_if)
2641 {
2642 struct gm_sg *sg, *sg_start;
2643 json_object *js_group = NULL;
2644 pim_addr js_grpaddr = PIMADDR_ANY;
2645 struct gm_subscriber sub_ref = {}, *sub_untracked;
2646
2647 if (groups) {
2648 struct gm_sg sg_ref = {};
2649
2650 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2651 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2652 } else
2653 sg_start = gm_sgs_first(gm_ifp->sgs);
2654
2655 sub_ref.addr = gm_dummy_untracked;
2656 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2657 /* NB: sub_untracked may be NULL if no untracked joins exist */
2658
2659 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2660 struct timeval *recent = NULL, *untracked = NULL;
2661 json_object *js_src;
2662
2663 if (groups) {
2664 struct prefix grp_p;
2665
2666 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2667 if (!prefix_match(groups, &grp_p))
2668 break;
2669 }
2670
2671 if (sources) {
2672 struct prefix src_p;
2673
2674 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2675 if (!prefix_match(sources, &src_p))
2676 continue;
2677 }
2678
2679 if (sg->most_recent) {
2680 struct gm_packet_state *packet;
2681
2682 packet = gm_packet_sg2state(sg->most_recent);
2683 recent = &packet->received;
2684 }
2685
2686 if (sub_untracked) {
2687 struct gm_packet_state *packet;
2688 struct gm_packet_sg *item;
2689
2690 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2691 if (item) {
2692 packet = gm_packet_sg2state(item);
2693 untracked = &packet->received;
2694 }
2695 }
2696
2697 if (!js_if) {
2698 FMT_NSTD_BEGIN; /* %.0p */
2699 vty_out(vty,
2700 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2701 &sg->sgaddr.grp, &sg->sgaddr.src,
2702 gm_states[sg->state], recent, untracked,
2703 &sg->created);
2704
2705 if (!detail)
2706 continue;
2707
2708 struct gm_packet_sg *item;
2709 struct gm_packet_state *packet;
2710
2711 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2712 packet = gm_packet_sg2state(item);
2713
2714 if (packet->subscriber == sub_untracked)
2715 continue;
2716 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2717 &packet->subscriber->addr, "(JOIN)",
2718 &packet->received);
2719 }
2720 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2721 packet = gm_packet_sg2state(item);
2722
2723 if (packet->subscriber == sub_untracked)
2724 continue;
2725 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2726 &packet->subscriber->addr, "(PRUNE)",
2727 &packet->received);
2728 }
2729 FMT_NSTD_END; /* %.0p */
2730 continue;
2731 }
2732 /* if (js_if) */
2733
2734 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2735 js_group = json_object_new_object();
2736 json_object_object_addf(js_if, js_group, "%pPA",
2737 &sg->sgaddr.grp);
2738 js_grpaddr = sg->sgaddr.grp;
2739 }
2740
2741 js_src = json_object_new_object();
2742 json_object_object_addf(js_group, js_src, "%pPAs",
2743 &sg->sgaddr.src);
2744
2745 json_object_string_add(js_src, "state", gm_states[sg->state]);
2746 json_object_string_addf(js_src, "created", "%pTVMs",
2747 &sg->created);
2748 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2749
2750 if (untracked)
2751 json_object_string_addf(js_src, "untrackedLastSeen",
2752 "%pTVMs", untracked);
2753 if (!detail)
2754 continue;
2755
2756 json_object *js_subs;
2757 struct gm_packet_sg *item;
2758 struct gm_packet_state *packet;
2759
2760 js_subs = json_object_new_object();
2761 json_object_object_add(js_src, "joinedBy", js_subs);
2762 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2763 packet = gm_packet_sg2state(item);
2764 if (packet->subscriber == sub_untracked)
2765 continue;
2766
2767 json_object *js_sub;
2768
2769 js_sub = json_object_new_object();
2770 json_object_object_addf(js_subs, js_sub, "%pPA",
2771 &packet->subscriber->addr);
2772 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2773 &packet->received);
2774 }
2775
2776 js_subs = json_object_new_object();
2777 json_object_object_add(js_src, "prunedBy", js_subs);
2778 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2779 packet = gm_packet_sg2state(item);
2780 if (packet->subscriber == sub_untracked)
2781 continue;
2782
2783 json_object *js_sub;
2784
2785 js_sub = json_object_new_object();
2786 json_object_object_addf(js_subs, js_sub, "%pPA",
2787 &packet->subscriber->addr);
2788 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2789 &packet->received);
2790 }
2791 }
2792 }
2793
2794 static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2795 const char *ifname,
2796 const struct prefix_ipv6 *groups,
2797 const struct prefix_ipv6 *sources, bool detail,
2798 json_object *js)
2799 {
2800 struct interface *ifp;
2801 json_object *js_vrf;
2802
2803 if (js) {
2804 js_vrf = json_object_new_object();
2805 json_object_string_add(js_vrf, "vrf", vrf->name);
2806 json_object_object_add(js, vrf->name, js_vrf);
2807 }
2808
2809 FOR_ALL_INTERFACES (vrf, ifp) {
2810 struct pim_interface *pim_ifp;
2811 struct gm_if *gm_ifp;
2812 json_object *js_if = NULL;
2813
2814 if (ifname && strcmp(ifp->name, ifname))
2815 continue;
2816
2817 if (!ifp->info)
2818 continue;
2819 pim_ifp = ifp->info;
2820 if (!pim_ifp->mld)
2821 continue;
2822 gm_ifp = pim_ifp->mld;
2823
2824 if (js) {
2825 js_if = json_object_new_object();
2826 json_object_object_add(js_vrf, ifp->name, js_if);
2827 }
2828
2829 if (!js && !ifname)
2830 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2831
2832 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2833 }
2834 }
2835
2836 DEFPY(gm_show_interface_joins,
2837 gm_show_interface_joins_cmd,
2838 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2839 SHOW_STR
2840 IPV6_STR
2841 MLD_STR
2842 VRF_FULL_CMD_HELP_STR
2843 "MLD joined groups & sources\n"
2844 INTERFACE_STR
2845 "Interface name\n"
2846 "Limit output to group range\n"
2847 "Show groups covered by this prefix\n"
2848 "Limit output to source range\n"
2849 "Show sources covered by this prefix\n"
2850 "Show details, including tracked receivers\n"
2851 JSON_STR)
2852 {
2853 int ret = CMD_SUCCESS;
2854 struct vrf *vrf;
2855 json_object *js = NULL;
2856
2857 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2858 if (ret != CMD_SUCCESS)
2859 return ret;
2860
2861 if (json)
2862 js = json_object_new_object();
2863 else
2864 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2865 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2866
2867 if (vrf)
2868 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2869 js);
2870 else
2871 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2872 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2873 !!detail, js);
2874 return vty_json(vty, js);
2875 }
2876
2877 static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
2878 {
2879 struct interface *ifp;
2880 struct ttable *tt = NULL;
2881 char *table;
2882 json_object *json = NULL;
2883 json_object *json_iface = NULL;
2884 json_object *json_group = NULL;
2885 json_object *json_groups = NULL;
2886 struct pim_instance *pim = vrf->info;
2887
2888 if (uj) {
2889 json = json_object_new_object();
2890 json_object_int_add(json, "totalGroups", pim->gm_group_count);
2891 json_object_int_add(json, "watermarkLimit",
2892 pim->gm_watermark_limit);
2893 } else {
2894 /* Prepare table. */
2895 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2896 ttable_add_row(tt, "Interface|Group|Version|Uptime");
2897 tt->style.cell.rpad = 2;
2898 tt->style.corner = '+';
2899 ttable_restyle(tt);
2900
2901 vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
2902 vty_out(vty, "Watermark warn limit(%s): %u\n",
2903 pim->gm_watermark_limit ? "Set" : "Not Set",
2904 pim->gm_watermark_limit);
2905 }
2906
2907 /* scan interfaces */
2908 FOR_ALL_INTERFACES (vrf, ifp) {
2909
2910 struct pim_interface *pim_ifp = ifp->info;
2911 struct gm_if *gm_ifp;
2912 struct gm_sg *sg;
2913
2914 if (!pim_ifp)
2915 continue;
2916
2917 gm_ifp = pim_ifp->mld;
2918 if (!gm_ifp)
2919 continue;
2920
2921 /* scan mld groups */
2922 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2923
2924 if (uj) {
2925 json_object_object_get_ex(json, ifp->name,
2926 &json_iface);
2927
2928 if (!json_iface) {
2929 json_iface = json_object_new_object();
2930 json_object_pim_ifp_add(json_iface,
2931 ifp);
2932 json_object_object_add(json, ifp->name,
2933 json_iface);
2934 json_groups = json_object_new_array();
2935 json_object_object_add(json_iface,
2936 "groups",
2937 json_groups);
2938 }
2939
2940 json_group = json_object_new_object();
2941 json_object_string_addf(json_group, "group",
2942 "%pPAs",
2943 &sg->sgaddr.grp);
2944
2945 json_object_int_add(json_group, "version",
2946 pim_ifp->mld_version);
2947 json_object_string_addf(json_group, "uptime",
2948 "%pTVMs", &sg->created);
2949 json_object_array_add(json_groups, json_group);
2950 } else {
2951 ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
2952 ifp->name, &sg->sgaddr.grp,
2953 pim_ifp->mld_version,
2954 &sg->created);
2955 }
2956 } /* scan gm groups */
2957 } /* scan interfaces */
2958
2959 if (uj)
2960 vty_json(vty, json);
2961 else {
2962 /* Dump the generated table. */
2963 table = ttable_dump(tt, "\n");
2964 vty_out(vty, "%s\n", table);
2965 XFREE(MTYPE_TMP, table);
2966 ttable_del(tt);
2967 }
2968 }
2969
2970 DEFPY(gm_show_mld_groups,
2971 gm_show_mld_groups_cmd,
2972 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2973 SHOW_STR
2974 IPV6_STR
2975 MLD_STR
2976 VRF_FULL_CMD_HELP_STR
2977 MLD_GROUP_STR
2978 JSON_STR)
2979 {
2980 int ret = CMD_SUCCESS;
2981 struct vrf *vrf;
2982
2983 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2984 if (ret != CMD_SUCCESS)
2985 return ret;
2986
2987 if (vrf)
2988 gm_show_groups(vty, vrf, !!json);
2989 else
2990 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2991 gm_show_groups(vty, vrf, !!json);
2992
2993 return CMD_SUCCESS;
2994 }
2995
2996 DEFPY(gm_debug_show,
2997 gm_debug_show_cmd,
2998 "debug show mld interface IFNAME",
2999 DEBUG_STR
3000 SHOW_STR
3001 MLD_STR
3002 INTERFACE_STR
3003 "interface name\n")
3004 {
3005 struct interface *ifp;
3006 struct pim_interface *pim_ifp;
3007 struct gm_if *gm_ifp;
3008
3009 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
3010 if (!ifp) {
3011 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
3012 return CMD_WARNING;
3013 }
3014
3015 pim_ifp = ifp->info;
3016 if (!pim_ifp) {
3017 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
3018 return CMD_WARNING;
3019 }
3020
3021 gm_ifp = pim_ifp->mld;
3022 if (!gm_ifp) {
3023 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
3024 return CMD_WARNING;
3025 }
3026
3027 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
3028 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
3029 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
3030 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
3031 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
3032
3033 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
3034 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
3035 int64_t query, expiry;
3036
3037 query = monotime_since(&gm_ifp->pending[i].query, NULL);
3038 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
3039
3040 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
3041 i, query / 1000, expiry / 1000);
3042 }
3043
3044 struct gm_sg *sg;
3045 struct gm_packet_state *pkt;
3046 struct gm_packet_sg *item;
3047 struct gm_subscriber *subscriber;
3048
3049 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
3050 frr_each (gm_sgs, gm_ifp->sgs, sg) {
3051 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
3052 sg->t_sg_expire);
3053
3054 vty_out(vty, "\t @pos:%zu\n",
3055 gm_packet_sg_subs_count(sg->subs_positive));
3056 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
3057 pkt = gm_packet_sg2state(item);
3058
3059 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3060 item->is_src ? "S" : "",
3061 item->is_excl ? "E" : "",
3062 &pkt->subscriber->addr, pkt->subscriber, pkt,
3063 item->offset);
3064
3065 assert(item->sg == sg);
3066 }
3067 vty_out(vty, "\t @neg:%zu\n",
3068 gm_packet_sg_subs_count(sg->subs_negative));
3069 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
3070 pkt = gm_packet_sg2state(item);
3071
3072 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3073 item->is_src ? "S" : "",
3074 item->is_excl ? "E" : "",
3075 &pkt->subscriber->addr, pkt->subscriber, pkt,
3076 item->offset);
3077
3078 assert(item->sg == sg);
3079 }
3080 }
3081
3082 vty_out(vty, "\n%zu subscribers:\n",
3083 gm_subscribers_count(gm_ifp->subscribers));
3084 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
3085 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
3086 subscriber, gm_packets_count(subscriber->packets));
3087
3088 frr_each (gm_packets, subscriber->packets, pkt) {
3089 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
3090 pkt,
3091 monotime_since(&pkt->received, NULL) *
3092 0.000001f,
3093 pkt->n_active, pkt->n_sg);
3094
3095 for (size_t i = 0; i < pkt->n_sg; i++) {
3096 item = pkt->items + i;
3097
3098 vty_out(vty, "\t\t[%zu]", i);
3099
3100 if (!item->sg) {
3101 vty_out(vty, " inactive\n");
3102 continue;
3103 }
3104
3105 vty_out(vty, " %s%s %pSG nE=%u\n",
3106 item->is_src ? "S" : "",
3107 item->is_excl ? "E" : "",
3108 &item->sg->sgaddr, item->n_exclude);
3109 }
3110 }
3111 }
3112
3113 return CMD_SUCCESS;
3114 }
3115
3116 DEFPY(gm_debug_iface_cfg,
3117 gm_debug_iface_cfg_cmd,
3118 "debug ipv6 mld {"
3119 "robustness (0-7)|"
3120 "query-max-response-time (1-8387584)"
3121 "}",
3122 DEBUG_STR
3123 IPV6_STR
3124 "Multicast Listener Discovery\n"
3125 "QRV\nQRV\n"
3126 "maxresp\nmaxresp\n")
3127 {
3128 VTY_DECLVAR_CONTEXT(interface, ifp);
3129 struct pim_interface *pim_ifp;
3130 struct gm_if *gm_ifp;
3131 bool changed = false;
3132
3133 pim_ifp = ifp->info;
3134 if (!pim_ifp) {
3135 vty_out(vty, "%% no PIM state for interface %pSQq\n",
3136 ifp->name);
3137 return CMD_WARNING;
3138 }
3139 gm_ifp = pim_ifp->mld;
3140 if (!gm_ifp) {
3141 vty_out(vty, "%% no MLD state for interface %pSQq\n",
3142 ifp->name);
3143 return CMD_WARNING;
3144 }
3145
3146 if (robustness_str && gm_ifp->cur_qrv != robustness) {
3147 gm_ifp->cur_qrv = robustness;
3148 changed = true;
3149 }
3150 if (query_max_response_time_str &&
3151 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
3152 gm_ifp->cur_max_resp = query_max_response_time;
3153 changed = true;
3154 }
3155
3156 if (changed) {
3157 vty_out(vty, "%% MLD querier config changed, bumping\n");
3158 gm_bump_querier(gm_ifp);
3159 }
3160 return CMD_SUCCESS;
3161 }
3162
3163 void gm_cli_init(void);
3164
3165 void gm_cli_init(void)
3166 {
3167 install_element(VIEW_NODE, &gm_show_interface_cmd);
3168 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3169 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
3170 install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
3171
3172 install_element(VIEW_NODE, &gm_debug_show_cmd);
3173 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3174 }