]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim6_mld.c
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / pimd / pim6_mld.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PIMv6 MLD querier
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
5 */
6
7 /*
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
10 *
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
13 */
14
15 #include <zebra.h>
16 #include <netinet/ip6.h>
17
18 #include "lib/memory.h"
19 #include "lib/jhash.h"
20 #include "lib/prefix.h"
21 #include "lib/checksum.h"
22 #include "lib/frrevent.h"
23 #include "termtable.h"
24
25 #include "pimd/pim6_mld.h"
26 #include "pimd/pim6_mld_protocol.h"
27 #include "pimd/pim_memory.h"
28 #include "pimd/pim_instance.h"
29 #include "pimd/pim_iface.h"
30 #include "pimd/pim6_cmd.h"
31 #include "pimd/pim_cmd_common.h"
32 #include "pimd/pim_util.h"
33 #include "pimd/pim_tib.h"
34 #include "pimd/pimd.h"
35
36 #ifndef IPV6_MULTICAST_ALL
37 #define IPV6_MULTICAST_ALL 29
38 #endif
39
40 DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
41 DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
42 DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
43 DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
44 DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
45 DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
46 DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
47
48 static void gm_t_query(struct event *t);
49 static void gm_trigger_specific(struct gm_sg *sg);
50 static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
51 struct timeval expire_wait);
52
53 /* shorthand for log messages */
54 #define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56 #define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
58 &pkt_src->sin6_addr
59 #define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
62
63 /* clang-format off */
64 #if PIM_IPV == 6
65 static const pim_addr gm_all_hosts = {
66 .s6_addr = {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
69 },
70 };
71 static const pim_addr gm_all_routers = {
72 .s6_addr = {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
75 },
76 };
77 /* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
79 */
80 static const pim_addr gm_dummy_untracked = {
81 .s6_addr = {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
84 },
85 };
86 #else
87 /* 224.0.0.1 */
88 static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
89 /* 224.0.0.22 */
90 static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
91 static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
92 #endif
93 /* clang-format on */
94
95 #define IPV6_MULTICAST_SCOPE_LINK 2
96
97 static inline uint8_t in6_multicast_scope(const pim_addr *addr)
98 {
99 return addr->s6_addr[1] & 0xf;
100 }
101
102 bool in6_multicast_nofwd(const pim_addr *addr)
103 {
104 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
105 }
106
107 /*
108 * (S,G) -> subscriber,(S,G)
109 */
110
111 static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
112 const struct gm_packet_sg *b)
113 {
114 const struct gm_packet_state *s_a, *s_b;
115
116 s_a = gm_packet_sg2state(a);
117 s_b = gm_packet_sg2state(b);
118 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
119 }
120
121 DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
122 gm_packet_sg_cmp);
123
124 static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
125 enum gm_sub_sense sense,
126 struct gm_subscriber *sub)
127 {
128 struct {
129 struct gm_packet_state hdr;
130 struct gm_packet_sg item;
131 } ref = {
132 /* clang-format off */
133 .hdr = {
134 .subscriber = sub,
135 },
136 .item = {
137 .offset = 0,
138 },
139 /* clang-format on */
140 };
141
142 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
143 }
144
145 /*
146 * interface -> (*,G),pending
147 */
148
149 static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
150 const struct gm_grp_pending *b)
151 {
152 return IPV6_ADDR_CMP(&a->grp, &b->grp);
153 }
154
155 DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
156 gm_grp_pending_cmp);
157
158 /*
159 * interface -> ([S1,S2,...],G),pending
160 */
161
162 static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
163 const struct gm_gsq_pending *b)
164 {
165 if (a->s_bit != b->s_bit)
166 return numcmp(a->s_bit, b->s_bit);
167
168 return IPV6_ADDR_CMP(&a->grp, &b->grp);
169 }
170
171 static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
172 {
173 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
174
175 return jhash(&a->grp, sizeof(a->grp), seed);
176 }
177
178 DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
179 gm_gsq_pending_hash);
180
181 /*
182 * interface -> (S,G)
183 */
184
185 int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
186 {
187 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
188 }
189
190 static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
191 pim_addr src)
192 {
193 struct gm_sg ref = {};
194
195 ref.sgaddr.grp = grp;
196 ref.sgaddr.src = src;
197 return gm_sgs_find(gm_ifp->sgs, &ref);
198 }
199
200 static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
201 pim_addr src)
202 {
203 struct gm_sg *ret, *prev;
204
205 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
206 ret->sgaddr.grp = grp;
207 ret->sgaddr.src = src;
208 ret->iface = gm_ifp;
209 prev = gm_sgs_add(gm_ifp->sgs, ret);
210
211 if (prev) {
212 XFREE(MTYPE_GM_SG, ret);
213 ret = prev;
214 } else {
215 monotime(&ret->created);
216 gm_packet_sg_subs_init(ret->subs_positive);
217 gm_packet_sg_subs_init(ret->subs_negative);
218 }
219 return ret;
220 }
221
222 /*
223 * interface -> packets, sorted by expiry (because add_tail insert order)
224 */
225
226 DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
227
228 /*
229 * subscriber -> packets
230 */
231
232 DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
233
234 /*
235 * interface -> subscriber
236 */
237
238 static int gm_subscriber_cmp(const struct gm_subscriber *a,
239 const struct gm_subscriber *b)
240 {
241 return IPV6_ADDR_CMP(&a->addr, &b->addr);
242 }
243
244 static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
245 {
246 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
247 }
248
249 DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
250 gm_subscriber_hash);
251
252 static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
253 pim_addr addr)
254 {
255 struct gm_subscriber ref = {}, *ret;
256
257 ref.addr = addr;
258 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
259 if (ret)
260 ret->refcount++;
261 return ret;
262 }
263
264 static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
265 pim_addr addr)
266 {
267 struct gm_subscriber ref = {}, *ret;
268
269 ref.addr = addr;
270 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
271
272 if (!ret) {
273 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
274 ret->iface = gm_ifp;
275 ret->addr = addr;
276 ret->refcount = 1;
277 monotime(&ret->created);
278 gm_packets_init(ret->packets);
279
280 gm_subscribers_add(gm_ifp->subscribers, ret);
281 }
282 return ret;
283 }
284
285 static void gm_subscriber_drop(struct gm_subscriber **subp)
286 {
287 struct gm_subscriber *sub = *subp;
288 struct gm_if *gm_ifp;
289
290 if (!sub)
291 return;
292 gm_ifp = sub->iface;
293
294 *subp = NULL;
295 sub->refcount--;
296
297 if (sub->refcount)
298 return;
299
300 gm_subscribers_del(gm_ifp->subscribers, sub);
301 XFREE(MTYPE_GM_SUBSCRIBER, sub);
302 }
303
304 /****************************************************************************/
305
306 /* bundle query timer values for combined v1/v2 handling */
307 struct gm_query_timers {
308 unsigned int qrv;
309 unsigned int max_resp_ms;
310 unsigned int qqic_ms;
311
312 struct timeval fuzz;
313 struct timeval expire_wait;
314 };
315
316 static void gm_expiry_calc(struct gm_query_timers *timers)
317 {
318 unsigned int expire =
319 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
320 ldiv_t exp_div = ldiv(expire, 1000);
321
322 timers->expire_wait.tv_sec = exp_div.quot;
323 timers->expire_wait.tv_usec = exp_div.rem * 1000;
324 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
325 }
326
327 static void gm_sg_free(struct gm_sg *sg)
328 {
329 /* t_sg_expiry is handled before this is reached */
330 EVENT_OFF(sg->t_sg_query);
331 gm_packet_sg_subs_fini(sg->subs_negative);
332 gm_packet_sg_subs_fini(sg->subs_positive);
333 XFREE(MTYPE_GM_SG, sg);
334 }
335
336 /* clang-format off */
337 static const char *const gm_states[] = {
338 [GM_SG_NOINFO] = "NOINFO",
339 [GM_SG_JOIN] = "JOIN",
340 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
341 [GM_SG_PRUNE] = "PRUNE",
342 [GM_SG_NOPRUNE] = "NOPRUNE",
343 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
344 };
345 /* clang-format on */
346
347 /* TODO: S,G entries in EXCLUDE (i.e. prune) unsupported" */
348
349 /* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
350 * joined. Whether we actually want/need to support this is a separate
351 * question - it is almost never used. In fact this is exactly what RFC5790
352 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
353 */
354
355 static void gm_sg_update(struct gm_sg *sg, bool has_expired)
356 {
357 struct gm_if *gm_ifp = sg->iface;
358 enum gm_sg_state prev, desired;
359 bool new_join;
360 struct gm_sg *grp = NULL;
361
362 if (!pim_addr_is_any(sg->sgaddr.src))
363 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
364 else
365 assert(sg->state != GM_SG_PRUNE);
366
367 if (gm_packet_sg_subs_count(sg->subs_positive)) {
368 desired = GM_SG_JOIN;
369 assert(!sg->t_sg_expire);
370 } else if ((sg->state == GM_SG_JOIN ||
371 sg->state == GM_SG_JOIN_EXPIRING) &&
372 !has_expired)
373 desired = GM_SG_JOIN_EXPIRING;
374 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
375 desired = GM_SG_NOINFO;
376 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
377 gm_packet_sg_subs_count(sg->subs_negative)) {
378 if ((sg->state == GM_SG_NOPRUNE ||
379 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
380 !has_expired)
381 desired = GM_SG_NOPRUNE_EXPIRING;
382 else
383 desired = GM_SG_PRUNE;
384 } else if (gm_packet_sg_subs_count(sg->subs_negative))
385 desired = GM_SG_NOPRUNE;
386 else
387 desired = GM_SG_NOINFO;
388
389 if (desired != sg->state && !gm_ifp->stopping) {
390 if (PIM_DEBUG_GM_EVENTS)
391 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
392 gm_states[desired]);
393
394 if (desired == GM_SG_JOIN_EXPIRING ||
395 desired == GM_SG_NOPRUNE_EXPIRING) {
396 struct gm_query_timers timers;
397
398 timers.qrv = gm_ifp->cur_qrv;
399 timers.max_resp_ms = gm_ifp->cur_max_resp;
400 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
401 timers.fuzz = gm_ifp->cfg_timing_fuzz;
402
403 gm_expiry_calc(&timers);
404 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
405
406 EVENT_OFF(sg->t_sg_query);
407 sg->n_query = gm_ifp->cur_lmqc;
408 sg->query_sbit = false;
409 gm_trigger_specific(sg);
410 }
411 }
412 prev = sg->state;
413 sg->state = desired;
414
415 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
416 new_join = false;
417 else
418 new_join = gm_sg_state_want_join(desired);
419
420 if (new_join && !sg->tib_joined) {
421 /* this will retry if join previously failed */
422 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
423 gm_ifp->ifp, &sg->oil);
424 if (!sg->tib_joined)
425 zlog_warn(
426 "MLD join for %pSG%%%s not propagated into TIB",
427 &sg->sgaddr, gm_ifp->ifp->name);
428 else
429 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
430 gm_ifp->ifp->name);
431
432 } else if (sg->tib_joined && !new_join) {
433 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
434
435 sg->oil = NULL;
436 sg->tib_joined = false;
437 }
438
439 if (desired == GM_SG_NOINFO) {
440 /* multiple paths can lead to the last state going away;
441 * t_sg_expire can still be running if we're arriving from
442 * another path.
443 */
444 if (has_expired)
445 EVENT_OFF(sg->t_sg_expire);
446
447 assertf((!sg->t_sg_expire &&
448 !gm_packet_sg_subs_count(sg->subs_positive) &&
449 !gm_packet_sg_subs_count(sg->subs_negative)),
450 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
451 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
452 sg->t_sg_expire, gm_states[prev], gm_states[desired],
453 gm_packet_sg_subs_count(sg->subs_positive),
454 gm_packet_sg_subs_count(sg->subs_negative), grp);
455
456 if (PIM_DEBUG_GM_TRACE)
457 zlog_debug(log_sg(sg, "dropping"));
458
459 gm_sgs_del(gm_ifp->sgs, sg);
460 gm_sg_free(sg);
461 }
462 }
463
464 /****************************************************************************/
465
466 /* the following bunch of functions deals with transferring state from
467 * received packets into gm_packet_state. As a reminder, the querier is
468 * structured to keep all items received in one packet together, since they
469 * will share expiry timers and thus allows efficient handling.
470 */
471
472 static void gm_packet_free(struct gm_packet_state *pkt)
473 {
474 gm_packet_expires_del(pkt->iface->expires, pkt);
475 gm_packets_del(pkt->subscriber->packets, pkt);
476 gm_subscriber_drop(&pkt->subscriber);
477 XFREE(MTYPE_GM_STATE, pkt);
478 }
479
480 static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
481 struct gm_sg *sg, bool is_excl,
482 bool is_src)
483 {
484 struct gm_packet_sg *item;
485
486 assert(pkt->n_active < pkt->n_sg);
487
488 item = &pkt->items[pkt->n_active];
489 item->sg = sg;
490 item->is_excl = is_excl;
491 item->is_src = is_src;
492 item->offset = pkt->n_active;
493
494 pkt->n_active++;
495 return item;
496 }
497
498 static bool gm_packet_sg_drop(struct gm_packet_sg *item)
499 {
500 struct gm_packet_state *pkt;
501 size_t i;
502
503 assert(item->sg);
504
505 pkt = gm_packet_sg2state(item);
506 if (item->sg->most_recent == item)
507 item->sg->most_recent = NULL;
508
509 for (i = 0; i < item->n_exclude; i++) {
510 struct gm_packet_sg *excl_item;
511
512 excl_item = item + 1 + i;
513 if (!excl_item->sg)
514 continue;
515
516 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
517 excl_item->sg = NULL;
518 pkt->n_active--;
519
520 assert(pkt->n_active > 0);
521 }
522
523 if (item->is_excl && item->is_src)
524 gm_packet_sg_subs_del(item->sg->subs_negative, item);
525 else
526 gm_packet_sg_subs_del(item->sg->subs_positive, item);
527 item->sg = NULL;
528 pkt->n_active--;
529
530 if (!pkt->n_active) {
531 gm_packet_free(pkt);
532 return true;
533 }
534 return false;
535 }
536
537 static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
538 {
539 for (size_t i = 0; i < pkt->n_sg; i++) {
540 struct gm_sg *sg = pkt->items[i].sg;
541 bool deleted;
542
543 if (!sg)
544 continue;
545
546 if (trace && PIM_DEBUG_GM_TRACE)
547 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
548 &pkt->subscriber->addr);
549 deleted = gm_packet_sg_drop(&pkt->items[i]);
550
551 gm_sg_update(sg, true);
552 if (deleted)
553 break;
554 }
555 }
556
557 static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
558 struct gm_subscriber *subscriber,
559 pim_addr grp, pim_addr *srcs,
560 size_t n_src, enum gm_sub_sense sense)
561 {
562 struct gm_sg *sg;
563 struct gm_packet_sg *old_src;
564 size_t i;
565
566 for (i = 0; i < n_src; i++) {
567 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
568 if (!sg)
569 continue;
570
571 old_src = gm_packet_sg_find(sg, sense, subscriber);
572 if (!old_src)
573 continue;
574
575 gm_packet_sg_drop(old_src);
576 gm_sg_update(sg, false);
577 }
578 }
579
580 static void gm_sg_expiry_cancel(struct gm_sg *sg)
581 {
582 if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
583 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
584 EVENT_OFF(sg->t_sg_expire);
585 sg->query_sbit = true;
586 }
587
588 /* first pass: process all changes resulting in removal of state:
589 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
590 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
591 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
592 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
593 * note *replacing* state is NOT considered *removing* state here
594 *
595 * everything else is thrown into pkt for creation of state in pass 2
596 */
597 static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
598 struct mld_v2_rec_hdr *rechdr, size_t n_src)
599 {
600 /* NB: pkt->subscriber can be NULL here if the subscriber was not
601 * previously seen!
602 */
603 struct gm_subscriber *subscriber = pkt->subscriber;
604 struct gm_sg *grp;
605 struct gm_packet_sg *old_grp = NULL;
606 struct gm_packet_sg *item;
607 size_t j;
608 bool is_excl = false;
609
610 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
611 if (grp && subscriber)
612 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
613
614 assert(old_grp == NULL || old_grp->is_excl);
615
616 switch (rechdr->type) {
617 case MLD_RECTYPE_IS_EXCLUDE:
618 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
619 /* this always replaces or creates state */
620 is_excl = true;
621 if (!grp)
622 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
623
624 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
625 item->n_exclude = n_src;
626
627 /* [EXCL_INCL_SG_NOTE] referenced below
628 *
629 * in theory, we should drop any S,G that the host may have
630 * previously added in INCLUDE mode. In practice, this is both
631 * incredibly rare and entirely irrelevant. It only makes any
632 * difference if an S,G that the host previously had on the
633 * INCLUDE list is now on the blocked list for EXCLUDE, which
634 * we can cover in processing the S,G list in pass2_excl().
635 *
636 * Other S,G from the host are simply left to expire
637 * "naturally" through general expiry.
638 */
639 break;
640
641 case MLD_RECTYPE_IS_INCLUDE:
642 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
643 if (old_grp) {
644 /* INCLUDE has no *,G state, so old_grp here refers to
645 * previous EXCLUDE => delete it
646 */
647 gm_packet_sg_drop(old_grp);
648 gm_sg_update(grp, false);
649 /* TODO "need S,G PRUNE => NO_INFO transition here" */
650 }
651 break;
652
653 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
654 if (old_grp) {
655 /* remove S,Gs from EXCLUDE, and then we're done */
656 gm_packet_sg_remove_sources(pkt->iface, subscriber,
657 rechdr->grp, rechdr->srcs,
658 n_src, GM_SUB_NEG);
659 return;
660 }
661 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
662 * idential to IS_INCLUDE (because the list of sources in
663 * IS_INCLUDE is not exhaustive)
664 */
665 break;
666
667 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
668 if (old_grp) {
669 /* this is intentionally not implemented because it
670 * would be complicated as hell. we only take the list
671 * of blocked sources from full group state records
672 */
673 return;
674 }
675
676 if (subscriber)
677 gm_packet_sg_remove_sources(pkt->iface, subscriber,
678 rechdr->grp, rechdr->srcs,
679 n_src, GM_SUB_POS);
680 return;
681 }
682
683 for (j = 0; j < n_src; j++) {
684 struct gm_sg *sg;
685
686 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
687 if (!sg)
688 sg = gm_sg_make(pkt->iface, rechdr->grp,
689 rechdr->srcs[j]);
690
691 gm_packet_sg_setup(pkt, sg, is_excl, true);
692 }
693 }
694
695 /* second pass: creating/updating/refreshing state. All the items from the
696 * received packet have already been thrown into gm_packet_state.
697 */
698
699 static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
700 {
701 struct gm_packet_sg *item = &pkt->items[i];
702 struct gm_packet_sg *old = NULL;
703 struct gm_sg *sg = item->sg;
704
705 /* EXCLUDE state was already dropped in pass1 */
706 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
707
708 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
709 if (old)
710 gm_packet_sg_drop(old);
711
712 pkt->n_active++;
713 gm_packet_sg_subs_add(sg->subs_positive, item);
714
715 sg->most_recent = item;
716 gm_sg_expiry_cancel(sg);
717 gm_sg_update(sg, false);
718 }
719
720 static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
721 {
722 struct gm_packet_sg *item = &pkt->items[offs];
723 struct gm_packet_sg *old_grp, *item_dup;
724 struct gm_sg *sg_grp = item->sg;
725 size_t i;
726
727 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
728 if (old_grp) {
729 for (i = 0; i < item->n_exclude; i++) {
730 struct gm_packet_sg *item_src, *old_src;
731
732 item_src = &pkt->items[offs + 1 + i];
733 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
734 pkt->subscriber);
735 if (old_src)
736 gm_packet_sg_drop(old_src);
737
738 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
739 * items left over if the host previously had INCLUDE
740 * mode going. Remove them here if we find any.
741 */
742 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
743 pkt->subscriber);
744 if (old_src)
745 gm_packet_sg_drop(old_src);
746 }
747
748 /* the previous loop has removed the S,G entries which are
749 * still excluded after this update. So anything left on the
750 * old item was previously excluded but is now included
751 * => need to trigger update on S,G
752 */
753 for (i = 0; i < old_grp->n_exclude; i++) {
754 struct gm_packet_sg *old_src;
755 struct gm_sg *old_sg_src;
756
757 old_src = old_grp + 1 + i;
758 old_sg_src = old_src->sg;
759 if (!old_sg_src)
760 continue;
761
762 gm_packet_sg_drop(old_src);
763 gm_sg_update(old_sg_src, false);
764 }
765
766 gm_packet_sg_drop(old_grp);
767 }
768
769 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
770 assert(!item_dup);
771 pkt->n_active++;
772
773 sg_grp->most_recent = item;
774 gm_sg_expiry_cancel(sg_grp);
775
776 for (i = 0; i < item->n_exclude; i++) {
777 struct gm_packet_sg *item_src;
778
779 item_src = &pkt->items[offs + 1 + i];
780 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
781 item_src);
782
783 if (item_dup)
784 item_src->sg = NULL;
785 else {
786 pkt->n_active++;
787 gm_sg_update(item_src->sg, false);
788 }
789 }
790
791 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
792 * to get lower PIM churn/flapping
793 */
794 gm_sg_update(sg_grp, false);
795 }
796
797 /* TODO: QRV/QQIC are not copied from queries to local state" */
798
799 /* on receiving a query, we need to update our robustness/query interval to
800 * match, so we correctly process group/source specific queries after last
801 * member leaves
802 */
803
804 static void gm_handle_v2_report(struct gm_if *gm_ifp,
805 const struct sockaddr_in6 *pkt_src, char *data,
806 size_t len)
807 {
808 struct mld_v2_report_hdr *hdr;
809 size_t i, n_records, max_entries;
810 struct gm_packet_state *pkt;
811
812 if (len < sizeof(*hdr)) {
813 if (PIM_DEBUG_GM_PACKETS)
814 zlog_debug(log_pkt_src(
815 "malformed MLDv2 report (truncated header)"));
816 gm_ifp->stats.rx_drop_malformed++;
817 return;
818 }
819
820 hdr = (struct mld_v2_report_hdr *)data;
821 data += sizeof(*hdr);
822 len -= sizeof(*hdr);
823
824 n_records = ntohs(hdr->n_records);
825 if (n_records > len / sizeof(struct mld_v2_rec_hdr)) {
826 /* note this is only an upper bound, records with source lists
827 * are larger. This is mostly here to make coverity happy.
828 */
829 zlog_warn(log_pkt_src(
830 "malformed MLDv2 report (infeasible record count)"));
831 gm_ifp->stats.rx_drop_malformed++;
832 return;
833 }
834
835 /* errors after this may at least partially process the packet */
836 gm_ifp->stats.rx_new_report++;
837
838 /* can't have more *,G and S,G items than there is space for ipv6
839 * addresses, so just use this to allocate temporary buffer
840 */
841 max_entries = len / sizeof(pim_addr);
842 pkt = XCALLOC(MTYPE_GM_STATE,
843 offsetof(struct gm_packet_state, items[max_entries]));
844 pkt->n_sg = max_entries;
845 pkt->iface = gm_ifp;
846 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
847
848 /* validate & remove state in v2_pass1() */
849 for (i = 0; i < n_records; i++) {
850 struct mld_v2_rec_hdr *rechdr;
851 size_t n_src, record_size;
852
853 if (len < sizeof(*rechdr)) {
854 zlog_warn(log_pkt_src(
855 "malformed MLDv2 report (truncated record header)"));
856 gm_ifp->stats.rx_trunc_report++;
857 break;
858 }
859
860 rechdr = (struct mld_v2_rec_hdr *)data;
861 data += sizeof(*rechdr);
862 len -= sizeof(*rechdr);
863
864 n_src = ntohs(rechdr->n_src);
865 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
866
867 if (len < record_size) {
868 zlog_warn(log_pkt_src(
869 "malformed MLDv2 report (truncated source list)"));
870 gm_ifp->stats.rx_trunc_report++;
871 break;
872 }
873 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
874 zlog_warn(
875 log_pkt_src(
876 "malformed MLDv2 report (invalid group %pI6)"),
877 &rechdr->grp);
878 gm_ifp->stats.rx_trunc_report++;
879 break;
880 }
881
882 data += record_size;
883 len -= record_size;
884
885 gm_handle_v2_pass1(pkt, rechdr, n_src);
886 }
887
888 if (!pkt->n_active) {
889 gm_subscriber_drop(&pkt->subscriber);
890 XFREE(MTYPE_GM_STATE, pkt);
891 return;
892 }
893
894 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
895 offsetof(struct gm_packet_state, items[pkt->n_active]));
896 pkt->n_sg = pkt->n_active;
897 pkt->n_active = 0;
898
899 monotime(&pkt->received);
900 if (!pkt->subscriber)
901 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
902 gm_packets_add_tail(pkt->subscriber->packets, pkt);
903 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
904
905 for (i = 0; i < pkt->n_sg; i++)
906 if (!pkt->items[i].is_excl)
907 gm_handle_v2_pass2_incl(pkt, i);
908 else {
909 gm_handle_v2_pass2_excl(pkt, i);
910 i += pkt->items[i].n_exclude;
911 }
912
913 if (pkt->n_active == 0)
914 gm_packet_free(pkt);
915 }
916
917 static void gm_handle_v1_report(struct gm_if *gm_ifp,
918 const struct sockaddr_in6 *pkt_src, char *data,
919 size_t len)
920 {
921 struct mld_v1_pkt *hdr;
922 struct gm_packet_state *pkt;
923 struct gm_sg *grp;
924 struct gm_packet_sg *item;
925 size_t max_entries;
926
927 if (len < sizeof(*hdr)) {
928 if (PIM_DEBUG_GM_PACKETS)
929 zlog_debug(log_pkt_src(
930 "malformed MLDv1 report (truncated)"));
931 gm_ifp->stats.rx_drop_malformed++;
932 return;
933 }
934
935 gm_ifp->stats.rx_old_report++;
936
937 hdr = (struct mld_v1_pkt *)data;
938
939 max_entries = 1;
940 pkt = XCALLOC(MTYPE_GM_STATE,
941 offsetof(struct gm_packet_state, items[max_entries]));
942 pkt->n_sg = max_entries;
943 pkt->iface = gm_ifp;
944 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
945
946 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
947
948 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
949 if (!grp)
950 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
951
952 item = gm_packet_sg_setup(pkt, grp, true, false);
953 item->n_exclude = 0;
954
955 /* TODO "set v1-seen timer on grp here" */
956
957 /* } */
958
959 /* pass2 will count n_active back up to 1. Also since a v1 report
960 * has exactly 1 group, we can skip the realloc() that v2 needs here.
961 */
962 assert(pkt->n_active == 1);
963 pkt->n_sg = pkt->n_active;
964 pkt->n_active = 0;
965
966 monotime(&pkt->received);
967 if (!pkt->subscriber)
968 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
969 gm_packets_add_tail(pkt->subscriber->packets, pkt);
970 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
971
972 /* pass2 covers installing state & removing old state; all the v1
973 * compat is handled at this point.
974 *
975 * Note that "old state" may be v2; subscribers will switch from v2
976 * reports to v1 reports when the querier changes from v2 to v1. So,
977 * limiting this to v1 would be wrong.
978 */
979 gm_handle_v2_pass2_excl(pkt, 0);
980
981 if (pkt->n_active == 0)
982 gm_packet_free(pkt);
983 }
984
985 static void gm_handle_v1_leave(struct gm_if *gm_ifp,
986 const struct sockaddr_in6 *pkt_src, char *data,
987 size_t len)
988 {
989 struct mld_v1_pkt *hdr;
990 struct gm_subscriber *subscriber;
991 struct gm_sg *grp;
992 struct gm_packet_sg *old_grp;
993
994 if (len < sizeof(*hdr)) {
995 if (PIM_DEBUG_GM_PACKETS)
996 zlog_debug(log_pkt_src(
997 "malformed MLDv1 leave (truncated)"));
998 gm_ifp->stats.rx_drop_malformed++;
999 return;
1000 }
1001
1002 gm_ifp->stats.rx_old_leave++;
1003
1004 hdr = (struct mld_v1_pkt *)data;
1005
1006 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
1007 if (!subscriber)
1008 return;
1009
1010 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1011
1012 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
1013 if (grp) {
1014 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
1015 if (old_grp) {
1016 gm_packet_sg_drop(old_grp);
1017 gm_sg_update(grp, false);
1018
1019 /* TODO "need S,G PRUNE => NO_INFO transition here" */
1020
1021 }
1022 }
1023
1024 /* } */
1025
1026 /* nothing more to do here, pass2 is no-op for leaves */
1027 gm_subscriber_drop(&subscriber);
1028 }
1029
1030 /* for each general query received (or sent), a timer is started to expire
1031 * _everything_ at the appropriate time (including robustness multiplier).
1032 *
1033 * So when this timer hits, all packets - with all of their items - that were
1034 * received *before* the query are aged out, and state updated accordingly.
1035 * Note that when we receive a refresh/update, the previous/old packet is
1036 * already dropped and replaced with a new one, so in normal steady-state
1037 * operation, this timer won't be doing anything.
1038 *
1039 * Additionally, if a subscriber actively leaves a group, that goes through
1040 * its own path too and won't hit this. This is really only triggered when a
1041 * host straight up disappears.
1042 */
1043 static void gm_t_expire(struct event *t)
1044 {
1045 struct gm_if *gm_ifp = EVENT_ARG(t);
1046 struct gm_packet_state *pkt;
1047
1048 zlog_info(log_ifp("general expiry timer"));
1049
1050 while (gm_ifp->n_pending) {
1051 struct gm_general_pending *pend = gm_ifp->pending;
1052 struct timeval remain;
1053 int64_t remain_ms;
1054
1055 remain_ms = monotime_until(&pend->expiry, &remain);
1056 if (remain_ms > 0) {
1057 if (PIM_DEBUG_GM_EVENTS)
1058 zlog_debug(
1059 log_ifp("next general expiry in %" PRId64 "ms"),
1060 remain_ms / 1000);
1061
1062 event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1063 &remain, &gm_ifp->t_expire);
1064 return;
1065 }
1066
1067 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1068 if (timercmp(&pkt->received, &pend->query, >=))
1069 break;
1070
1071 if (PIM_DEBUG_GM_PACKETS)
1072 zlog_debug(log_ifp("expire packet %p"), pkt);
1073 gm_packet_drop(pkt, true);
1074 }
1075
1076 gm_ifp->n_pending--;
1077 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1078 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1079 }
1080
1081 if (PIM_DEBUG_GM_EVENTS)
1082 zlog_debug(log_ifp("next general expiry waiting for query"));
1083 }
1084
1085 /* NB: the receive handlers will also run when sending packets, since we
1086 * receive our own packets back in.
1087 */
1088 static void gm_handle_q_general(struct gm_if *gm_ifp,
1089 struct gm_query_timers *timers)
1090 {
1091 struct timeval now, expiry;
1092 struct gm_general_pending *pend;
1093
1094 monotime(&now);
1095 timeradd(&now, &timers->expire_wait, &expiry);
1096
1097 while (gm_ifp->n_pending) {
1098 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1099
1100 if (timercmp(&pend->expiry, &expiry, <))
1101 break;
1102
1103 /* if we end up here, the last item in pending[] has an expiry
1104 * later than the expiry for this query. But our query time
1105 * (now) is later than that of the item (because, well, that's
1106 * how time works.) This makes this query meaningless since
1107 * it's "supersetted" within the preexisting query
1108 */
1109
1110 if (PIM_DEBUG_GM_TRACE_DETAIL)
1111 zlog_debug(
1112 log_ifp("zapping supersetted general timer %pTVMu"),
1113 &pend->expiry);
1114
1115 gm_ifp->n_pending--;
1116 if (!gm_ifp->n_pending)
1117 EVENT_OFF(gm_ifp->t_expire);
1118 }
1119
1120 /* people might be messing with their configs or something */
1121 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1122 return;
1123
1124 pend = &gm_ifp->pending[gm_ifp->n_pending];
1125 pend->query = now;
1126 pend->expiry = expiry;
1127
1128 if (!gm_ifp->n_pending++) {
1129 if (PIM_DEBUG_GM_TRACE)
1130 zlog_debug(
1131 log_ifp("starting general timer @ 0: %pTVMu"),
1132 &pend->expiry);
1133 event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1134 &timers->expire_wait, &gm_ifp->t_expire);
1135 } else if (PIM_DEBUG_GM_TRACE)
1136 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1137 gm_ifp->n_pending, &pend->expiry);
1138 }
1139
1140 static void gm_t_sg_expire(struct event *t)
1141 {
1142 struct gm_sg *sg = EVENT_ARG(t);
1143 struct gm_if *gm_ifp = sg->iface;
1144 struct gm_packet_sg *item;
1145
1146 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1147 sg->state == GM_SG_NOPRUNE_EXPIRING,
1148 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1149
1150 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1151 /* this will also drop EXCLUDE mode S,G lists together with
1152 * the *,G entry
1153 */
1154 gm_packet_sg_drop(item);
1155
1156 /* subs_negative items are only timed out together with the *,G entry
1157 * since we won't get any reports for a group-and-source query
1158 */
1159 gm_sg_update(sg, true);
1160 }
1161
1162 static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1163 struct timeval ref)
1164 {
1165 struct gm_packet_state *pkt;
1166
1167 if (!sg->most_recent) {
1168 struct gm_packet_state *best_pkt = NULL;
1169 struct gm_packet_sg *item;
1170
1171 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1172 pkt = gm_packet_sg2state(item);
1173
1174 if (!best_pkt ||
1175 timercmp(&pkt->received, &best_pkt->received, >)) {
1176 best_pkt = pkt;
1177 sg->most_recent = item;
1178 }
1179 }
1180 }
1181 if (sg->most_recent) {
1182 struct timeval fuzz;
1183
1184 pkt = gm_packet_sg2state(sg->most_recent);
1185
1186 /* this shouldn't happen on plain old real ethernet segment,
1187 * but on something like a VXLAN or VPLS it is very possible
1188 * that we get a report before the query that triggered it.
1189 * (imagine a triangle scenario with 3 datacenters, it's very
1190 * possible A->B + B->C is faster than A->C due to odd routing)
1191 *
1192 * This makes a little tolerance allowance to handle that case.
1193 */
1194 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1195
1196 if (timercmp(&fuzz, &ref, >))
1197 return true;
1198 }
1199 return false;
1200 }
1201
1202 static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1203 struct timeval expire_wait)
1204 {
1205 struct timeval now;
1206
1207 if (!sg)
1208 return;
1209 if (sg->state == GM_SG_PRUNE)
1210 return;
1211
1212 monotime(&now);
1213 if (gm_sg_check_recent(gm_ifp, sg, now))
1214 return;
1215
1216 if (PIM_DEBUG_GM_TRACE)
1217 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1218
1219 if (sg->t_sg_expire) {
1220 struct timeval remain;
1221
1222 remain = event_timer_remain(sg->t_sg_expire);
1223 if (timercmp(&remain, &expire_wait, <=))
1224 return;
1225
1226 EVENT_OFF(sg->t_sg_expire);
1227 }
1228
1229 event_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1230 &sg->t_sg_expire);
1231 }
1232
1233 static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1234 struct gm_query_timers *timers, pim_addr grp,
1235 const pim_addr *srcs, size_t n_src)
1236 {
1237 struct gm_sg *sg;
1238 size_t i;
1239
1240 for (i = 0; i < n_src; i++) {
1241 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1242 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1243 }
1244 }
1245
1246 static void gm_t_grp_expire(struct event *t)
1247 {
1248 /* if we're here, that means when we received the group-specific query
1249 * there was one or more active S,G for this group. For *,G the timer
1250 * in sg->t_sg_expire is running separately and gets cancelled when we
1251 * receive a report, so that work is left to gm_t_sg_expire and we
1252 * shouldn't worry about it here.
1253 */
1254 struct gm_grp_pending *pend = EVENT_ARG(t);
1255 struct gm_if *gm_ifp = pend->iface;
1256 struct gm_sg *sg, *sg_start, sg_ref = {};
1257
1258 if (PIM_DEBUG_GM_EVENTS)
1259 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1260
1261 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1262 * could technically be gt to skip a possible *,G
1263 */
1264 sg_ref.sgaddr.grp = pend->grp;
1265 sg_ref.sgaddr.src = PIMADDR_ANY;
1266 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1267
1268 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1269 struct gm_packet_sg *item;
1270
1271 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1272 break;
1273 if (pim_addr_is_any(sg->sgaddr.src))
1274 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1275 continue;
1276 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1277 continue;
1278
1279 /* we may also have a group-source-specific query going on in
1280 * parallel. But if we received nothing for the *,G query,
1281 * the S,G query is kinda irrelevant.
1282 */
1283 EVENT_OFF(sg->t_sg_expire);
1284
1285 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1286 /* this will also drop the EXCLUDE S,G lists */
1287 gm_packet_sg_drop(item);
1288
1289 gm_sg_update(sg, true);
1290 }
1291
1292 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1293 XFREE(MTYPE_GM_GRP_PENDING, pend);
1294 }
1295
1296 static void gm_handle_q_group(struct gm_if *gm_ifp,
1297 struct gm_query_timers *timers, pim_addr grp)
1298 {
1299 struct gm_sg *sg, sg_ref = {};
1300 struct gm_grp_pending *pend, pend_ref = {};
1301
1302 sg_ref.sgaddr.grp = grp;
1303 sg_ref.sgaddr.src = PIMADDR_ANY;
1304 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1305 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1306
1307 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1308 /* we have nothing at all for this group - don't waste RAM */
1309 return;
1310
1311 if (pim_addr_is_any(sg->sgaddr.src)) {
1312 /* actually found *,G entry here */
1313 if (PIM_DEBUG_GM_TRACE)
1314 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1315 &grp);
1316 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1317
1318 sg = gm_sgs_next(gm_ifp->sgs, sg);
1319 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1320 /* no S,G for this group */
1321 return;
1322 }
1323
1324 pend_ref.grp = grp;
1325 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1326
1327 if (pend) {
1328 struct timeval remain;
1329
1330 remain = event_timer_remain(pend->t_expire);
1331 if (timercmp(&remain, &timers->expire_wait, <=))
1332 return;
1333
1334 EVENT_OFF(pend->t_expire);
1335 } else {
1336 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1337 pend->grp = grp;
1338 pend->iface = gm_ifp;
1339 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1340 }
1341
1342 monotime(&pend->query);
1343 event_add_timer_tv(router->master, gm_t_grp_expire, pend,
1344 &timers->expire_wait, &pend->t_expire);
1345
1346 if (PIM_DEBUG_GM_TRACE)
1347 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1348 pend->t_expire);
1349 }
1350
1351 static void gm_bump_querier(struct gm_if *gm_ifp)
1352 {
1353 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1354
1355 EVENT_OFF(gm_ifp->t_query);
1356
1357 if (pim_addr_is_any(pim_ifp->ll_lowest))
1358 return;
1359 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1360 return;
1361
1362 gm_ifp->n_startup = gm_ifp->cur_qrv;
1363
1364 event_execute(router->master, gm_t_query, gm_ifp, 0);
1365 }
1366
1367 static void gm_t_other_querier(struct event *t)
1368 {
1369 struct gm_if *gm_ifp = EVENT_ARG(t);
1370 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1371
1372 zlog_info(log_ifp("other querier timer expired"));
1373
1374 gm_ifp->querier = pim_ifp->ll_lowest;
1375 gm_ifp->n_startup = gm_ifp->cur_qrv;
1376
1377 event_execute(router->master, gm_t_query, gm_ifp, 0);
1378 }
1379
1380 static void gm_handle_query(struct gm_if *gm_ifp,
1381 const struct sockaddr_in6 *pkt_src,
1382 pim_addr *pkt_dst, char *data, size_t len)
1383 {
1384 struct mld_v2_query_hdr *hdr;
1385 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1386 struct gm_query_timers timers;
1387 bool general_query;
1388
1389 if (len < sizeof(struct mld_v2_query_hdr) &&
1390 len != sizeof(struct mld_v1_pkt)) {
1391 zlog_warn(log_pkt_src("invalid query size"));
1392 gm_ifp->stats.rx_drop_malformed++;
1393 return;
1394 }
1395
1396 hdr = (struct mld_v2_query_hdr *)data;
1397 general_query = pim_addr_is_any(hdr->grp);
1398
1399 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1400 zlog_warn(log_pkt_src(
1401 "malformed MLDv2 query (invalid group %pI6)"),
1402 &hdr->grp);
1403 gm_ifp->stats.rx_drop_malformed++;
1404 return;
1405 }
1406
1407 if (len >= sizeof(struct mld_v2_query_hdr)) {
1408 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1409
1410 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1411 zlog_warn(log_pkt_src(
1412 "malformed MLDv2 query (truncated source list)"));
1413 gm_ifp->stats.rx_drop_malformed++;
1414 return;
1415 }
1416
1417 if (general_query && src_space) {
1418 zlog_warn(log_pkt_src(
1419 "malformed MLDv2 query (general query with non-empty source list)"));
1420 gm_ifp->stats.rx_drop_malformed++;
1421 return;
1422 }
1423 }
1424
1425 /* accepting queries unicast to us (or addressed to a wrong group)
1426 * can mess up querier election as well as cause us to terminate
1427 * traffic (since after a unicast query no reports will be coming in)
1428 */
1429 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1430 if (pim_addr_is_any(hdr->grp)) {
1431 zlog_warn(
1432 log_pkt_src(
1433 "wrong destination %pPA for general query"),
1434 pkt_dst);
1435 gm_ifp->stats.rx_drop_dstaddr++;
1436 return;
1437 }
1438
1439 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
1440 gm_ifp->stats.rx_drop_dstaddr++;
1441 zlog_warn(
1442 log_pkt_src(
1443 "wrong destination %pPA for group specific query"),
1444 pkt_dst);
1445 return;
1446 }
1447 }
1448
1449 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
1450 if (PIM_DEBUG_GM_EVENTS)
1451 zlog_debug(
1452 log_pkt_src("replacing elected querier %pPA"),
1453 &gm_ifp->querier);
1454
1455 gm_ifp->querier = pkt_src->sin6_addr;
1456 }
1457
1458 if (len == sizeof(struct mld_v1_pkt)) {
1459 timers.qrv = gm_ifp->cur_qrv;
1460 timers.max_resp_ms = hdr->max_resp_code;
1461 timers.qqic_ms = gm_ifp->cur_query_intv;
1462 } else {
1463 timers.qrv = (hdr->flags & 0x7) ?: 8;
1464 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1465 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1466 }
1467 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1468
1469 gm_expiry_calc(&timers);
1470
1471 if (PIM_DEBUG_GM_TRACE_DETAIL)
1472 zlog_debug(
1473 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1474 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1475 &timers.expire_wait);
1476
1477 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1478 unsigned int other_ms;
1479
1480 EVENT_OFF(gm_ifp->t_query);
1481 EVENT_OFF(gm_ifp->t_other_querier);
1482
1483 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1484 event_add_timer_msec(router->master, gm_t_other_querier, gm_ifp,
1485 other_ms, &gm_ifp->t_other_querier);
1486 }
1487
1488 if (len == sizeof(struct mld_v1_pkt)) {
1489 if (general_query) {
1490 gm_handle_q_general(gm_ifp, &timers);
1491 gm_ifp->stats.rx_query_old_general++;
1492 } else {
1493 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
1494 gm_ifp->stats.rx_query_old_group++;
1495 }
1496 return;
1497 }
1498
1499 /* v2 query - [S]uppress bit */
1500 if (hdr->flags & 0x8) {
1501 gm_ifp->stats.rx_query_new_sbit++;
1502 return;
1503 }
1504
1505 if (general_query) {
1506 gm_handle_q_general(gm_ifp, &timers);
1507 gm_ifp->stats.rx_query_new_general++;
1508 } else if (!ntohs(hdr->n_src)) {
1509 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
1510 gm_ifp->stats.rx_query_new_group++;
1511 } else {
1512 /* this is checked above:
1513 * if (len >= sizeof(struct mld_v2_query_hdr)) {
1514 * size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1515 * if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1516 */
1517 assume(ntohs(hdr->n_src) <=
1518 (len - sizeof(struct mld_v2_query_hdr)) /
1519 sizeof(pim_addr));
1520
1521 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1522 ntohs(hdr->n_src));
1523 gm_ifp->stats.rx_query_new_groupsrc++;
1524 }
1525 }
1526
1527 static void gm_rx_process(struct gm_if *gm_ifp,
1528 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1529 void *data, size_t pktlen)
1530 {
1531 struct icmp6_plain_hdr *icmp6 = data;
1532 uint16_t pkt_csum, ref_csum;
1533 struct ipv6_ph ph6 = {
1534 .src = pkt_src->sin6_addr,
1535 .dst = *pkt_dst,
1536 .ulpl = htons(pktlen),
1537 .next_hdr = IPPROTO_ICMPV6,
1538 };
1539
1540 pkt_csum = icmp6->icmp6_cksum;
1541 icmp6->icmp6_cksum = 0;
1542 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1543
1544 if (pkt_csum != ref_csum) {
1545 zlog_warn(
1546 log_pkt_src(
1547 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1548 pkt_dst, pkt_csum, ref_csum);
1549 gm_ifp->stats.rx_drop_csum++;
1550 return;
1551 }
1552
1553 data = (icmp6 + 1);
1554 pktlen -= sizeof(*icmp6);
1555
1556 switch (icmp6->icmp6_type) {
1557 case ICMP6_MLD_QUERY:
1558 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1559 break;
1560 case ICMP6_MLD_V1_REPORT:
1561 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1562 break;
1563 case ICMP6_MLD_V1_DONE:
1564 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1565 break;
1566 case ICMP6_MLD_V2_REPORT:
1567 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1568 break;
1569 }
1570 }
1571
1572 static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1573 uint16_t alert_type)
1574 {
1575 uint8_t *hopopt_end;
1576
1577 if (hopopt_len < 8)
1578 return false;
1579 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1580 return false;
1581
1582 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1583 hopopts += 2;
1584
1585 while (hopopts < hopopt_end) {
1586 if (hopopts[0] == IP6OPT_PAD1) {
1587 hopopts++;
1588 continue;
1589 }
1590
1591 if (hopopts > hopopt_end - 2)
1592 break;
1593 if (hopopts > hopopt_end - 2 - hopopts[1])
1594 break;
1595
1596 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1597 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1598
1599 if (have_type == alert_type)
1600 return true;
1601 }
1602
1603 hopopts += 2 + hopopts[1];
1604 }
1605 return false;
1606 }
1607
1608 static void gm_t_recv(struct event *t)
1609 {
1610 struct pim_instance *pim = EVENT_ARG(t);
1611 union {
1612 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1613 CMSG_SPACE(256) /* hop options */ +
1614 CMSG_SPACE(sizeof(int)) /* hopcount */];
1615 struct cmsghdr align;
1616 } cmsgbuf;
1617 struct cmsghdr *cmsg;
1618 struct in6_pktinfo *pktinfo = NULL;
1619 uint8_t *hopopts = NULL;
1620 size_t hopopt_len = 0;
1621 int *hoplimit = NULL;
1622 char rxbuf[2048];
1623 struct msghdr mh[1] = {};
1624 struct iovec iov[1];
1625 struct sockaddr_in6 pkt_src[1] = {};
1626 ssize_t nread;
1627 size_t pktlen;
1628
1629 event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1630 &pim->t_gm_recv);
1631
1632 iov->iov_base = rxbuf;
1633 iov->iov_len = sizeof(rxbuf);
1634
1635 mh->msg_name = pkt_src;
1636 mh->msg_namelen = sizeof(pkt_src);
1637 mh->msg_control = cmsgbuf.buf;
1638 mh->msg_controllen = sizeof(cmsgbuf.buf);
1639 mh->msg_iov = iov;
1640 mh->msg_iovlen = array_size(iov);
1641 mh->msg_flags = 0;
1642
1643 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
1644 if (nread <= 0) {
1645 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1646 pim->gm_rx_drop_sys++;
1647 return;
1648 }
1649
1650 if ((size_t)nread > sizeof(rxbuf)) {
1651 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1652 iov->iov_len = nread;
1653 }
1654 nread = recvmsg(pim->gm_socket, mh, 0);
1655 if (nread <= 0) {
1656 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1657 pim->gm_rx_drop_sys++;
1658 goto out_free;
1659 }
1660
1661 struct interface *ifp;
1662
1663 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1664 if (!ifp || !ifp->info)
1665 goto out_free;
1666
1667 struct pim_interface *pim_ifp = ifp->info;
1668 struct gm_if *gm_ifp = pim_ifp->mld;
1669
1670 if (!gm_ifp)
1671 goto out_free;
1672
1673 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1674 if (cmsg->cmsg_level != SOL_IPV6)
1675 continue;
1676
1677 switch (cmsg->cmsg_type) {
1678 case IPV6_PKTINFO:
1679 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1680 break;
1681 case IPV6_HOPOPTS:
1682 hopopts = CMSG_DATA(cmsg);
1683 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1684 break;
1685 case IPV6_HOPLIMIT:
1686 hoplimit = (int *)CMSG_DATA(cmsg);
1687 break;
1688 }
1689 }
1690
1691 if (!pktinfo || !hoplimit) {
1692 zlog_err(log_ifp(
1693 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
1694 pim->gm_rx_drop_sys++;
1695 goto out_free;
1696 }
1697
1698 if (*hoplimit != 1) {
1699 zlog_err(log_pkt_src("packet with hop limit != 1"));
1700 /* spoofing attempt => count on srcaddr counter */
1701 gm_ifp->stats.rx_drop_srcaddr++;
1702 goto out_free;
1703 }
1704
1705 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1706 zlog_err(log_pkt_src(
1707 "packet without IPv6 Router Alert MLD option"));
1708 gm_ifp->stats.rx_drop_ra++;
1709 goto out_free;
1710 }
1711
1712 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1713 /* reports from :: happen in normal operation for DAD, so
1714 * don't spam log messages about this
1715 */
1716 goto out_free;
1717
1718 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1719 zlog_warn(log_pkt_src("packet from invalid source address"));
1720 gm_ifp->stats.rx_drop_srcaddr++;
1721 goto out_free;
1722 }
1723
1724 pktlen = nread;
1725 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1726 zlog_warn(log_pkt_src("truncated packet"));
1727 gm_ifp->stats.rx_drop_malformed++;
1728 goto out_free;
1729 }
1730
1731 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1732 pktlen);
1733
1734 out_free:
1735 if (iov->iov_base != rxbuf)
1736 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1737 }
1738
1739 static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1740 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1741 {
1742 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1743 struct sockaddr_in6 dstaddr = {
1744 .sin6_family = AF_INET6,
1745 .sin6_scope_id = gm_ifp->ifp->ifindex,
1746 };
1747 struct {
1748 struct icmp6_plain_hdr hdr;
1749 struct mld_v2_query_hdr v2_query;
1750 } query = {
1751 /* clang-format off */
1752 .hdr = {
1753 .icmp6_type = ICMP6_MLD_QUERY,
1754 .icmp6_code = 0,
1755 },
1756 .v2_query = {
1757 .grp = grp,
1758 },
1759 /* clang-format on */
1760 };
1761 struct ipv6_ph ph6 = {
1762 .src = pim_ifp->ll_lowest,
1763 .ulpl = htons(sizeof(query)),
1764 .next_hdr = IPPROTO_ICMPV6,
1765 };
1766 union {
1767 char buf[CMSG_SPACE(8) /* hop options */ +
1768 CMSG_SPACE(sizeof(struct in6_pktinfo))];
1769 struct cmsghdr align;
1770 } cmsg = {};
1771 struct cmsghdr *cmh;
1772 struct msghdr mh[1] = {};
1773 struct iovec iov[3];
1774 size_t iov_len;
1775 ssize_t ret, expect_ret;
1776 uint8_t *dp;
1777 struct in6_pktinfo *pktinfo;
1778
1779 if (if_is_loopback(gm_ifp->ifp)) {
1780 /* Linux is a bit odd with multicast on loopback */
1781 ph6.src = in6addr_loopback;
1782 dstaddr.sin6_addr = in6addr_loopback;
1783 } else if (pim_addr_is_any(grp))
1784 dstaddr.sin6_addr = gm_all_hosts;
1785 else
1786 dstaddr.sin6_addr = grp;
1787
1788 query.v2_query.max_resp_code =
1789 mld_max_resp_encode(gm_ifp->cur_max_resp);
1790 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1791 if (s_bit)
1792 query.v2_query.flags |= 0x08;
1793 query.v2_query.qqic =
1794 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1795 query.v2_query.n_src = htons(n_srcs);
1796
1797 ph6.dst = dstaddr.sin6_addr;
1798
1799 /* ph6 not included in sendmsg */
1800 iov[0].iov_base = &ph6;
1801 iov[0].iov_len = sizeof(ph6);
1802 iov[1].iov_base = &query;
1803 if (gm_ifp->cur_version == GM_MLDV1) {
1804 iov_len = 2;
1805 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1806 } else if (!n_srcs) {
1807 iov_len = 2;
1808 iov[1].iov_len = sizeof(query);
1809 } else {
1810 iov[1].iov_len = sizeof(query);
1811 iov[2].iov_base = (void *)srcs;
1812 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1813 iov_len = 3;
1814 }
1815
1816 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1817
1818 if (PIM_DEBUG_GM_PACKETS)
1819 zlog_debug(
1820 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1821 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1822
1823 mh->msg_name = &dstaddr;
1824 mh->msg_namelen = sizeof(dstaddr);
1825 mh->msg_iov = iov + 1;
1826 mh->msg_iovlen = iov_len - 1;
1827 mh->msg_control = &cmsg;
1828 mh->msg_controllen = sizeof(cmsg.buf);
1829
1830 cmh = CMSG_FIRSTHDR(mh);
1831 cmh->cmsg_level = IPPROTO_IPV6;
1832 cmh->cmsg_type = IPV6_HOPOPTS;
1833 cmh->cmsg_len = CMSG_LEN(8);
1834 dp = CMSG_DATA(cmh);
1835 *dp++ = 0; /* next header */
1836 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1837 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1838 *dp++ = 2; /* length */
1839 *dp++ = 0; /* value (2 bytes) */
1840 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1841 *dp++ = 0; /* pad0 */
1842 *dp++ = 0; /* pad0 */
1843
1844 cmh = CMSG_NXTHDR(mh, cmh);
1845 cmh->cmsg_level = IPPROTO_IPV6;
1846 cmh->cmsg_type = IPV6_PKTINFO;
1847 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1848 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1849 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1850 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1851
1852 expect_ret = iov[1].iov_len;
1853 if (iov_len == 3)
1854 expect_ret += iov[2].iov_len;
1855
1856 frr_with_privs (&pimd_privs) {
1857 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
1858 }
1859
1860 if (ret != expect_ret) {
1861 zlog_warn(log_ifp("failed to send query: %m"));
1862 gm_ifp->stats.tx_query_fail++;
1863 } else {
1864 if (gm_ifp->cur_version == GM_MLDV1) {
1865 if (pim_addr_is_any(grp))
1866 gm_ifp->stats.tx_query_old_general++;
1867 else
1868 gm_ifp->stats.tx_query_old_group++;
1869 } else {
1870 if (pim_addr_is_any(grp))
1871 gm_ifp->stats.tx_query_new_general++;
1872 else if (!n_srcs)
1873 gm_ifp->stats.tx_query_new_group++;
1874 else
1875 gm_ifp->stats.tx_query_new_groupsrc++;
1876 }
1877 }
1878 }
1879
1880 static void gm_t_query(struct event *t)
1881 {
1882 struct gm_if *gm_ifp = EVENT_ARG(t);
1883 unsigned int timer_ms = gm_ifp->cur_query_intv;
1884
1885 if (gm_ifp->n_startup) {
1886 timer_ms /= 4;
1887 gm_ifp->n_startup--;
1888 }
1889
1890 event_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1891 &gm_ifp->t_query);
1892
1893 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1894 }
1895
1896 static void gm_t_sg_query(struct event *t)
1897 {
1898 struct gm_sg *sg = EVENT_ARG(t);
1899
1900 gm_trigger_specific(sg);
1901 }
1902
1903 /* S,G specific queries (triggered by a member leaving) get a little slack
1904 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1905 */
1906 static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1907 {
1908 struct gm_if *gm_ifp = pend_gsq->iface;
1909
1910 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1911 pend_gsq->s_bit);
1912
1913 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1914 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1915 }
1916
1917 static void gm_t_gsq_pend(struct event *t)
1918 {
1919 struct gm_gsq_pending *pend_gsq = EVENT_ARG(t);
1920
1921 gm_send_specific(pend_gsq);
1922 }
1923
1924 static void gm_trigger_specific(struct gm_sg *sg)
1925 {
1926 struct gm_if *gm_ifp = sg->iface;
1927 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1928 struct gm_gsq_pending *pend_gsq, ref = {};
1929
1930 sg->n_query--;
1931 if (sg->n_query)
1932 event_add_timer_msec(router->master, gm_t_sg_query, sg,
1933 gm_ifp->cur_query_intv_trig,
1934 &sg->t_sg_query);
1935
1936 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1937 return;
1938 if (gm_ifp->pim->gm_socket == -1)
1939 return;
1940
1941 if (PIM_DEBUG_GM_TRACE)
1942 zlog_debug(log_sg(sg, "triggered query"));
1943
1944 if (pim_addr_is_any(sg->sgaddr.src)) {
1945 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1946 return;
1947 }
1948
1949 ref.grp = sg->sgaddr.grp;
1950 ref.s_bit = sg->query_sbit;
1951
1952 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1953 if (!pend_gsq) {
1954 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1955 pend_gsq->grp = sg->sgaddr.grp;
1956 pend_gsq->s_bit = sg->query_sbit;
1957 pend_gsq->iface = gm_ifp;
1958 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1959
1960 event_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1961 &gm_ifp->cfg_timing_fuzz, &pend_gsq->t_send);
1962 }
1963
1964 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1965
1966 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1967 pend_gsq->n_src++;
1968
1969 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1970 EVENT_OFF(pend_gsq->t_send);
1971 gm_send_specific(pend_gsq);
1972 pend_gsq = NULL;
1973 }
1974 }
1975
1976 static void gm_vrf_socket_incref(struct pim_instance *pim)
1977 {
1978 struct vrf *vrf = pim->vrf;
1979 int ret, intval;
1980 struct icmp6_filter filter[1];
1981
1982 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1983 return;
1984
1985 ICMP6_FILTER_SETBLOCKALL(filter);
1986 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1987 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1988 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1989 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1990
1991 frr_with_privs (&pimd_privs) {
1992 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1993 vrf->vrf_id, vrf->name);
1994 if (pim->gm_socket < 0) {
1995 zlog_err("(VRF %s) could not create MLD socket: %m",
1996 vrf->name);
1997 return;
1998 }
1999
2000 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
2001 filter, sizeof(filter));
2002 if (ret)
2003 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
2004 vrf->name);
2005
2006 intval = 1;
2007 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
2008 &intval, sizeof(intval));
2009 if (ret)
2010 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
2011 vrf->name);
2012
2013 intval = 1;
2014 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
2015 &intval, sizeof(intval));
2016 if (ret)
2017 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
2018 vrf->name);
2019
2020 intval = 1;
2021 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
2022 &intval, sizeof(intval));
2023 if (ret)
2024 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2025 vrf->name);
2026
2027 intval = 1;
2028 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
2029 &intval, sizeof(intval));
2030 if (ret)
2031 zlog_err(
2032 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2033 vrf->name);
2034
2035 intval = 1;
2036 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
2037 &intval, sizeof(intval));
2038 if (ret)
2039 zlog_err(
2040 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2041 vrf->name);
2042
2043 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2044 * RX filtering in Linux. It only means "receive all groups
2045 * that something on the system has joined". To actually
2046 * receive *all* MLD packets - which is what we need -
2047 * multicast routing must be enabled on the interface. And
2048 * this only works for MLD packets specifically.
2049 *
2050 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2051 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2052 *
2053 * Also note that the code there explicitly checks for the IPv6
2054 * router alert MLD option (which is required by the RFC to be
2055 * on MLD packets.) That implies trying to support hosts which
2056 * erroneously don't add that option is just not possible.
2057 */
2058 intval = 1;
2059 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
2060 &intval, sizeof(intval));
2061 if (ret)
2062 zlog_info(
2063 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2064 vrf->name);
2065 }
2066
2067 event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2068 &pim->t_gm_recv);
2069 }
2070
2071 static void gm_vrf_socket_decref(struct pim_instance *pim)
2072 {
2073 if (--pim->gm_socket_if_count)
2074 return;
2075
2076 EVENT_OFF(pim->t_gm_recv);
2077 close(pim->gm_socket);
2078 pim->gm_socket = -1;
2079 }
2080
2081 static void gm_start(struct interface *ifp)
2082 {
2083 struct pim_interface *pim_ifp = ifp->info;
2084 struct gm_if *gm_ifp;
2085
2086 assert(pim_ifp);
2087 assert(pim_ifp->pim);
2088 assert(pim_ifp->mroute_vif_index >= 0);
2089 assert(!pim_ifp->mld);
2090
2091 gm_vrf_socket_incref(pim_ifp->pim);
2092
2093 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2094 gm_ifp->ifp = ifp;
2095 pim_ifp->mld = gm_ifp;
2096 gm_ifp->pim = pim_ifp->pim;
2097 monotime(&gm_ifp->started);
2098
2099 zlog_info(log_ifp("starting MLD"));
2100
2101 if (pim_ifp->mld_version == 1)
2102 gm_ifp->cur_version = GM_MLDV1;
2103 else
2104 gm_ifp->cur_version = GM_MLDV2;
2105
2106 gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
2107 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
2108 gm_ifp->cur_query_intv_trig =
2109 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2110 gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
2111 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2112
2113 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2114 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2115
2116 gm_sgs_init(gm_ifp->sgs);
2117 gm_subscribers_init(gm_ifp->subscribers);
2118 gm_packet_expires_init(gm_ifp->expires);
2119 gm_grp_pends_init(gm_ifp->grp_pends);
2120 gm_gsq_pends_init(gm_ifp->gsq_pends);
2121
2122 frr_with_privs (&pimd_privs) {
2123 struct ipv6_mreq mreq;
2124 int ret;
2125
2126 /* all-MLDv2 group */
2127 mreq.ipv6mr_multiaddr = gm_all_routers;
2128 mreq.ipv6mr_interface = ifp->ifindex;
2129 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2130 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
2131 if (ret)
2132 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2133 ifp->name);
2134 }
2135 }
2136
2137 void gm_group_delete(struct gm_if *gm_ifp)
2138 {
2139 struct gm_sg *sg;
2140 struct gm_packet_state *pkt;
2141 struct gm_grp_pending *pend_grp;
2142 struct gm_gsq_pending *pend_gsq;
2143 struct gm_subscriber *subscriber;
2144
2145 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2146 gm_packet_drop(pkt, false);
2147
2148 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2149 EVENT_OFF(pend_grp->t_expire);
2150 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2151 }
2152
2153 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2154 EVENT_OFF(pend_gsq->t_send);
2155 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2156 }
2157
2158 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2159 EVENT_OFF(sg->t_sg_expire);
2160 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2161 &sg->sgaddr);
2162 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2163 &sg->sgaddr);
2164
2165 gm_sg_free(sg);
2166 }
2167 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2168 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2169 &subscriber->addr);
2170 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2171 }
2172 }
2173
2174 void gm_ifp_teardown(struct interface *ifp)
2175 {
2176 struct pim_interface *pim_ifp = ifp->info;
2177 struct gm_if *gm_ifp;
2178
2179 if (!pim_ifp || !pim_ifp->mld)
2180 return;
2181
2182 gm_ifp = pim_ifp->mld;
2183 gm_ifp->stopping = true;
2184 if (PIM_DEBUG_GM_EVENTS)
2185 zlog_debug(log_ifp("MLD stop"));
2186
2187 EVENT_OFF(gm_ifp->t_query);
2188 EVENT_OFF(gm_ifp->t_other_querier);
2189 EVENT_OFF(gm_ifp->t_expire);
2190
2191 frr_with_privs (&pimd_privs) {
2192 struct ipv6_mreq mreq;
2193 int ret;
2194
2195 /* all-MLDv2 group */
2196 mreq.ipv6mr_multiaddr = gm_all_routers;
2197 mreq.ipv6mr_interface = ifp->ifindex;
2198 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2199 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2200 if (ret)
2201 zlog_err(
2202 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2203 ifp->name);
2204 }
2205
2206 gm_vrf_socket_decref(gm_ifp->pim);
2207
2208 gm_group_delete(gm_ifp);
2209
2210 gm_grp_pends_fini(gm_ifp->grp_pends);
2211 gm_packet_expires_fini(gm_ifp->expires);
2212 gm_subscribers_fini(gm_ifp->subscribers);
2213 gm_sgs_fini(gm_ifp->sgs);
2214
2215 XFREE(MTYPE_GM_IFACE, gm_ifp);
2216 pim_ifp->mld = NULL;
2217 }
2218
2219 static void gm_update_ll(struct interface *ifp)
2220 {
2221 struct pim_interface *pim_ifp = ifp->info;
2222 struct gm_if *gm_ifp = pim_ifp->mld;
2223 bool was_querier;
2224
2225 was_querier =
2226 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2227 !pim_addr_is_any(gm_ifp->querier);
2228
2229 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2230 if (was_querier)
2231 gm_ifp->querier = pim_ifp->ll_lowest;
2232 EVENT_OFF(gm_ifp->t_query);
2233
2234 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2235 if (was_querier)
2236 zlog_info(log_ifp(
2237 "lost link-local address, stopping querier"));
2238 return;
2239 }
2240
2241 if (was_querier)
2242 zlog_info(log_ifp("new link-local %pPA while querier"),
2243 &gm_ifp->cur_ll_lowest);
2244 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2245 pim_addr_is_any(gm_ifp->querier)) {
2246 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2247 &gm_ifp->cur_ll_lowest);
2248 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2249 } else
2250 return;
2251
2252 gm_ifp->n_startup = gm_ifp->cur_qrv;
2253 event_execute(router->master, gm_t_query, gm_ifp, 0);
2254 }
2255
2256 void gm_ifp_update(struct interface *ifp)
2257 {
2258 struct pim_interface *pim_ifp = ifp->info;
2259 struct gm_if *gm_ifp;
2260 bool changed = false;
2261
2262 if (!pim_ifp)
2263 return;
2264 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2265 pim_ifp->mroute_vif_index < 0) {
2266 gm_ifp_teardown(ifp);
2267 return;
2268 }
2269
2270 /*
2271 * If ipv6 mld is not enabled on interface, do not start mld activites.
2272 */
2273 if (!pim_ifp->gm_enable)
2274 return;
2275
2276 if (!pim_ifp->mld) {
2277 changed = true;
2278 gm_start(ifp);
2279 assume(pim_ifp->mld != NULL);
2280 }
2281
2282 gm_ifp = pim_ifp->mld;
2283 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2284 gm_update_ll(ifp);
2285
2286 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2287
2288 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2289 gm_ifp->cur_query_intv = cfg_query_intv;
2290 changed = true;
2291 }
2292
2293 unsigned int cfg_query_intv_trig =
2294 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2295
2296 if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
2297 gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
2298 changed = true;
2299 }
2300
2301 unsigned int cfg_max_response =
2302 pim_ifp->gm_query_max_response_time_dsec * 100;
2303
2304 if (gm_ifp->cur_max_resp != cfg_max_response)
2305 gm_ifp->cur_max_resp = cfg_max_response;
2306
2307 if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
2308 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2309
2310 enum gm_version cfg_version;
2311
2312 if (pim_ifp->mld_version == 1)
2313 cfg_version = GM_MLDV1;
2314 else
2315 cfg_version = GM_MLDV2;
2316 if (gm_ifp->cur_version != cfg_version) {
2317 gm_ifp->cur_version = cfg_version;
2318 changed = true;
2319 }
2320
2321 if (changed) {
2322 if (PIM_DEBUG_GM_TRACE)
2323 zlog_debug(log_ifp(
2324 "MLD querier config changed, querying"));
2325 gm_bump_querier(gm_ifp);
2326 }
2327 }
2328
2329 /*
2330 * CLI (show commands only)
2331 */
2332
2333 #include "lib/command.h"
2334
2335 #include "pimd/pim6_mld_clippy.c"
2336
2337 static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2338 int *err)
2339 {
2340 struct vrf *ret;
2341
2342 if (!vrf_str)
2343 return vrf_lookup_by_id(VRF_DEFAULT);
2344 if (!strcmp(vrf_str, "all"))
2345 return NULL;
2346 ret = vrf_lookup_by_name(vrf_str);
2347 if (ret)
2348 return ret;
2349
2350 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2351 *err = CMD_WARNING;
2352 return NULL;
2353 }
2354
2355 static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2356 {
2357 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2358 struct gm_if *gm_ifp;
2359 bool querier;
2360 size_t i;
2361
2362 if (!pim_ifp) {
2363 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2364 return;
2365 }
2366
2367 gm_ifp = pim_ifp->mld;
2368 if (!gm_ifp) {
2369 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2370 return;
2371 }
2372
2373 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2374
2375 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2376 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2377 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2378 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2379 querier ? " (this system)" : "");
2380 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2381 vty_out(vty, " Other querier timer: %pTH\n",
2382 gm_ifp->t_other_querier);
2383 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2384 vty_out(vty, " Query interval: %ums\n",
2385 gm_ifp->cur_query_intv);
2386 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2387 vty_out(vty, " Last member query intv.: %ums\n",
2388 gm_ifp->cur_query_intv_trig);
2389 vty_out(vty, " %u expiry timers from general queries:\n",
2390 gm_ifp->n_pending);
2391 for (i = 0; i < gm_ifp->n_pending; i++) {
2392 struct gm_general_pending *p = &gm_ifp->pending[i];
2393
2394 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2395 &p->query, &p->expiry);
2396 }
2397 vty_out(vty, " %zu expiry timers from *,G queries\n",
2398 gm_grp_pends_count(gm_ifp->grp_pends));
2399 vty_out(vty, " %zu expiry timers from S,G queries\n",
2400 gm_gsq_pends_count(gm_ifp->gsq_pends));
2401 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2402 gm_sgs_count(gm_ifp->sgs),
2403 gm_subscribers_count(gm_ifp->subscribers),
2404 gm_packet_expires_count(gm_ifp->expires));
2405 vty_out(vty, "\n");
2406 }
2407
2408 static void gm_show_if_one(struct vty *vty, struct interface *ifp,
2409 json_object *js_if, struct ttable *tt)
2410 {
2411 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2412 struct gm_if *gm_ifp = pim_ifp->mld;
2413 bool querier;
2414
2415 assume(js_if || tt);
2416
2417 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2418
2419 if (js_if) {
2420 json_object_string_add(js_if, "name", ifp->name);
2421 json_object_string_addf(js_if, "address", "%pPA",
2422 &pim_ifp->primary_address);
2423 json_object_string_add(js_if, "state", "up");
2424 json_object_string_addf(js_if, "version", "%d",
2425 gm_ifp->cur_version);
2426 json_object_string_addf(js_if, "upTime", "%pTVMs",
2427 &gm_ifp->started);
2428 json_object_boolean_add(js_if, "querier", querier);
2429 json_object_string_addf(js_if, "querierIp", "%pPA",
2430 &gm_ifp->querier);
2431 if (querier)
2432 json_object_string_addf(js_if, "queryTimer", "%pTH",
2433 gm_ifp->t_query);
2434 else
2435 json_object_string_addf(js_if, "otherQuerierTimer",
2436 "%pTH",
2437 gm_ifp->t_other_querier);
2438 json_object_int_add(js_if, "timerRobustnessValue",
2439 gm_ifp->cur_qrv);
2440 json_object_int_add(js_if, "lastMemberQueryCount",
2441 gm_ifp->cur_lmqc);
2442 json_object_int_add(js_if, "timerQueryIntervalMsec",
2443 gm_ifp->cur_query_intv);
2444 json_object_int_add(js_if, "timerQueryResponseTimerMsec",
2445 gm_ifp->cur_max_resp);
2446 json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
2447 gm_ifp->cur_query_intv_trig);
2448 } else {
2449 ttable_add_row(tt, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
2450 ifp->name, "up", &pim_ifp->primary_address,
2451 gm_ifp->cur_version, querier ? "local" : "other",
2452 &gm_ifp->querier, gm_ifp->t_query,
2453 &gm_ifp->started);
2454 }
2455 }
2456
2457 static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2458 bool detail, json_object *js)
2459 {
2460 struct interface *ifp;
2461 json_object *js_vrf = NULL;
2462 struct pim_interface *pim_ifp;
2463 struct ttable *tt = NULL;
2464 char *table = NULL;
2465
2466 if (js) {
2467 js_vrf = json_object_new_object();
2468 json_object_object_add(js, vrf->name, js_vrf);
2469 }
2470
2471 if (!js && !detail) {
2472 /* Prepare table. */
2473 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2474 ttable_add_row(
2475 tt,
2476 "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
2477 tt->style.cell.rpad = 2;
2478 tt->style.corner = '+';
2479 ttable_restyle(tt);
2480 }
2481
2482 FOR_ALL_INTERFACES (vrf, ifp) {
2483 json_object *js_if = NULL;
2484
2485 if (ifname && strcmp(ifp->name, ifname))
2486 continue;
2487 if (detail && !js) {
2488 gm_show_if_one_detail(vty, ifp);
2489 continue;
2490 }
2491
2492 pim_ifp = ifp->info;
2493
2494 if (!pim_ifp || !pim_ifp->mld)
2495 continue;
2496
2497 if (js) {
2498 js_if = json_object_new_object();
2499 /*
2500 * If we have js as true and detail as false
2501 * and if Coverity thinks that js_if is NULL
2502 * because of a failed call to new then
2503 * when we call gm_show_if_one below
2504 * the tt can be deref'ed and as such
2505 * FRR will crash. But since we know
2506 * that json_object_new_object never fails
2507 * then let's tell Coverity that this assumption
2508 * is true. I'm not worried about fast path
2509 * here at all.
2510 */
2511 assert(js_if);
2512 json_object_object_add(js_vrf, ifp->name, js_if);
2513 }
2514
2515 gm_show_if_one(vty, ifp, js_if, tt);
2516 }
2517
2518 /* Dump the generated table. */
2519 if (!js && !detail) {
2520 table = ttable_dump(tt, "\n");
2521 vty_out(vty, "%s\n", table);
2522 XFREE(MTYPE_TMP, table);
2523 ttable_del(tt);
2524 }
2525 }
2526
2527 static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2528 bool detail, json_object *js)
2529 {
2530 if (vrf)
2531 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2532 else
2533 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2534 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2535 }
2536
2537 DEFPY(gm_show_interface,
2538 gm_show_interface_cmd,
2539 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
2540 SHOW_STR
2541 IPV6_STR
2542 MLD_STR
2543 VRF_FULL_CMD_HELP_STR
2544 "MLD interface information\n"
2545 "Interface name\n"
2546 "Detailed output\n"
2547 JSON_STR)
2548 {
2549 int ret = CMD_SUCCESS;
2550 struct vrf *vrf;
2551 json_object *js = NULL;
2552
2553 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2554 if (ret != CMD_SUCCESS)
2555 return ret;
2556
2557 if (json)
2558 js = json_object_new_object();
2559 gm_show_if(vty, vrf, ifname, !!detail, js);
2560 return vty_json(vty, js);
2561 }
2562
2563 static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2564 json_object *js_if)
2565 {
2566 struct gm_if_stats *stats = &gm_ifp->stats;
2567 /* clang-format off */
2568 struct {
2569 const char *text;
2570 const char *js_key;
2571 uint64_t *val;
2572 } *item, items[] = {
2573 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2574 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2575 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2576
2577 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2578 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2579 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2580 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2581 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2582 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2583
2584 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2585 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2586 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2587 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2588 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2589 { "TX errors", "txErrors", &stats->tx_query_fail },
2590
2591 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2592 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2593 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2594 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2595 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2596 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2597 };
2598 /* clang-format on */
2599
2600 for (item = items; item < items + array_size(items); item++) {
2601 if (js_if)
2602 json_object_int_add(js_if, item->js_key, *item->val);
2603 else
2604 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2605 *item->val);
2606 }
2607 }
2608
2609 static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2610 const char *ifname, json_object *js)
2611 {
2612 struct interface *ifp;
2613 json_object *js_vrf;
2614
2615 if (js) {
2616 js_vrf = json_object_new_object();
2617 json_object_object_add(js, vrf->name, js_vrf);
2618 }
2619
2620 FOR_ALL_INTERFACES (vrf, ifp) {
2621 struct pim_interface *pim_ifp;
2622 struct gm_if *gm_ifp;
2623 json_object *js_if = NULL;
2624
2625 if (ifname && strcmp(ifp->name, ifname))
2626 continue;
2627
2628 if (!ifp->info)
2629 continue;
2630 pim_ifp = ifp->info;
2631 if (!pim_ifp->mld)
2632 continue;
2633 gm_ifp = pim_ifp->mld;
2634
2635 if (js) {
2636 js_if = json_object_new_object();
2637 json_object_object_add(js_vrf, ifp->name, js_if);
2638 } else {
2639 vty_out(vty, "Interface: %s\n", ifp->name);
2640 }
2641 gm_show_stats_one(vty, gm_ifp, js_if);
2642 if (!js)
2643 vty_out(vty, "\n");
2644 }
2645 }
2646
2647 DEFPY(gm_show_interface_stats,
2648 gm_show_interface_stats_cmd,
2649 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2650 SHOW_STR
2651 IPV6_STR
2652 MLD_STR
2653 VRF_FULL_CMD_HELP_STR
2654 "MLD statistics\n"
2655 INTERFACE_STR
2656 "Interface name\n"
2657 JSON_STR)
2658 {
2659 int ret = CMD_SUCCESS;
2660 struct vrf *vrf;
2661 json_object *js = NULL;
2662
2663 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2664 if (ret != CMD_SUCCESS)
2665 return ret;
2666
2667 if (json)
2668 js = json_object_new_object();
2669
2670 if (vrf)
2671 gm_show_stats_vrf(vty, vrf, ifname, js);
2672 else
2673 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2674 gm_show_stats_vrf(vty, vrf, ifname, js);
2675 return vty_json(vty, js);
2676 }
2677
2678 static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2679 const struct prefix_ipv6 *groups,
2680 const struct prefix_ipv6 *sources, bool detail,
2681 json_object *js_if)
2682 {
2683 struct gm_sg *sg, *sg_start;
2684 json_object *js_group = NULL;
2685 pim_addr js_grpaddr = PIMADDR_ANY;
2686 struct gm_subscriber sub_ref = {}, *sub_untracked;
2687
2688 if (groups) {
2689 struct gm_sg sg_ref = {};
2690
2691 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2692 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2693 } else
2694 sg_start = gm_sgs_first(gm_ifp->sgs);
2695
2696 sub_ref.addr = gm_dummy_untracked;
2697 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2698 /* NB: sub_untracked may be NULL if no untracked joins exist */
2699
2700 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2701 struct timeval *recent = NULL, *untracked = NULL;
2702 json_object *js_src;
2703
2704 if (groups) {
2705 struct prefix grp_p;
2706
2707 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2708 if (!prefix_match(groups, &grp_p))
2709 break;
2710 }
2711
2712 if (sources) {
2713 struct prefix src_p;
2714
2715 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2716 if (!prefix_match(sources, &src_p))
2717 continue;
2718 }
2719
2720 if (sg->most_recent) {
2721 struct gm_packet_state *packet;
2722
2723 packet = gm_packet_sg2state(sg->most_recent);
2724 recent = &packet->received;
2725 }
2726
2727 if (sub_untracked) {
2728 struct gm_packet_state *packet;
2729 struct gm_packet_sg *item;
2730
2731 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2732 if (item) {
2733 packet = gm_packet_sg2state(item);
2734 untracked = &packet->received;
2735 }
2736 }
2737
2738 if (!js_if) {
2739 FMT_NSTD_BEGIN; /* %.0p */
2740 vty_out(vty,
2741 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2742 &sg->sgaddr.grp, &sg->sgaddr.src,
2743 gm_states[sg->state], recent, untracked,
2744 &sg->created);
2745
2746 if (!detail)
2747 continue;
2748
2749 struct gm_packet_sg *item;
2750 struct gm_packet_state *packet;
2751
2752 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2753 packet = gm_packet_sg2state(item);
2754
2755 if (packet->subscriber == sub_untracked)
2756 continue;
2757 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2758 &packet->subscriber->addr, "(JOIN)",
2759 &packet->received);
2760 }
2761 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2762 packet = gm_packet_sg2state(item);
2763
2764 if (packet->subscriber == sub_untracked)
2765 continue;
2766 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2767 &packet->subscriber->addr, "(PRUNE)",
2768 &packet->received);
2769 }
2770 FMT_NSTD_END; /* %.0p */
2771 continue;
2772 }
2773 /* if (js_if) */
2774
2775 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2776 js_group = json_object_new_object();
2777 json_object_object_addf(js_if, js_group, "%pPA",
2778 &sg->sgaddr.grp);
2779 js_grpaddr = sg->sgaddr.grp;
2780 }
2781
2782 js_src = json_object_new_object();
2783 json_object_object_addf(js_group, js_src, "%pPAs",
2784 &sg->sgaddr.src);
2785
2786 json_object_string_add(js_src, "state", gm_states[sg->state]);
2787 json_object_string_addf(js_src, "created", "%pTVMs",
2788 &sg->created);
2789 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2790
2791 if (untracked)
2792 json_object_string_addf(js_src, "untrackedLastSeen",
2793 "%pTVMs", untracked);
2794 if (!detail)
2795 continue;
2796
2797 json_object *js_subs;
2798 struct gm_packet_sg *item;
2799 struct gm_packet_state *packet;
2800
2801 js_subs = json_object_new_object();
2802 json_object_object_add(js_src, "joinedBy", js_subs);
2803 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2804 packet = gm_packet_sg2state(item);
2805 if (packet->subscriber == sub_untracked)
2806 continue;
2807
2808 json_object *js_sub;
2809
2810 js_sub = json_object_new_object();
2811 json_object_object_addf(js_subs, js_sub, "%pPA",
2812 &packet->subscriber->addr);
2813 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2814 &packet->received);
2815 }
2816
2817 js_subs = json_object_new_object();
2818 json_object_object_add(js_src, "prunedBy", js_subs);
2819 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2820 packet = gm_packet_sg2state(item);
2821 if (packet->subscriber == sub_untracked)
2822 continue;
2823
2824 json_object *js_sub;
2825
2826 js_sub = json_object_new_object();
2827 json_object_object_addf(js_subs, js_sub, "%pPA",
2828 &packet->subscriber->addr);
2829 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2830 &packet->received);
2831 }
2832 }
2833 }
2834
2835 static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2836 const char *ifname,
2837 const struct prefix_ipv6 *groups,
2838 const struct prefix_ipv6 *sources, bool detail,
2839 json_object *js)
2840 {
2841 struct interface *ifp;
2842 json_object *js_vrf;
2843
2844 if (js) {
2845 js_vrf = json_object_new_object();
2846 json_object_string_add(js_vrf, "vrf", vrf->name);
2847 json_object_object_add(js, vrf->name, js_vrf);
2848 }
2849
2850 FOR_ALL_INTERFACES (vrf, ifp) {
2851 struct pim_interface *pim_ifp;
2852 struct gm_if *gm_ifp;
2853 json_object *js_if = NULL;
2854
2855 if (ifname && strcmp(ifp->name, ifname))
2856 continue;
2857
2858 if (!ifp->info)
2859 continue;
2860 pim_ifp = ifp->info;
2861 if (!pim_ifp->mld)
2862 continue;
2863 gm_ifp = pim_ifp->mld;
2864
2865 if (js) {
2866 js_if = json_object_new_object();
2867 json_object_object_add(js_vrf, ifp->name, js_if);
2868 }
2869
2870 if (!js && !ifname)
2871 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2872
2873 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2874 }
2875 }
2876
2877 DEFPY(gm_show_interface_joins,
2878 gm_show_interface_joins_cmd,
2879 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2880 SHOW_STR
2881 IPV6_STR
2882 MLD_STR
2883 VRF_FULL_CMD_HELP_STR
2884 "MLD joined groups & sources\n"
2885 INTERFACE_STR
2886 "Interface name\n"
2887 "Limit output to group range\n"
2888 "Show groups covered by this prefix\n"
2889 "Limit output to source range\n"
2890 "Show sources covered by this prefix\n"
2891 "Show details, including tracked receivers\n"
2892 JSON_STR)
2893 {
2894 int ret = CMD_SUCCESS;
2895 struct vrf *vrf;
2896 json_object *js = NULL;
2897
2898 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2899 if (ret != CMD_SUCCESS)
2900 return ret;
2901
2902 if (json)
2903 js = json_object_new_object();
2904 else
2905 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2906 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2907
2908 if (vrf)
2909 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2910 js);
2911 else
2912 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2913 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2914 !!detail, js);
2915 return vty_json(vty, js);
2916 }
2917
2918 static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
2919 {
2920 struct interface *ifp;
2921 struct ttable *tt = NULL;
2922 char *table;
2923 json_object *json = NULL;
2924 json_object *json_iface = NULL;
2925 json_object *json_group = NULL;
2926 json_object *json_groups = NULL;
2927 struct pim_instance *pim = vrf->info;
2928
2929 if (uj) {
2930 json = json_object_new_object();
2931 json_object_int_add(json, "totalGroups", pim->gm_group_count);
2932 json_object_int_add(json, "watermarkLimit",
2933 pim->gm_watermark_limit);
2934 } else {
2935 /* Prepare table. */
2936 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2937 ttable_add_row(tt, "Interface|Group|Version|Uptime");
2938 tt->style.cell.rpad = 2;
2939 tt->style.corner = '+';
2940 ttable_restyle(tt);
2941
2942 vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
2943 vty_out(vty, "Watermark warn limit(%s): %u\n",
2944 pim->gm_watermark_limit ? "Set" : "Not Set",
2945 pim->gm_watermark_limit);
2946 }
2947
2948 /* scan interfaces */
2949 FOR_ALL_INTERFACES (vrf, ifp) {
2950
2951 struct pim_interface *pim_ifp = ifp->info;
2952 struct gm_if *gm_ifp;
2953 struct gm_sg *sg;
2954
2955 if (!pim_ifp)
2956 continue;
2957
2958 gm_ifp = pim_ifp->mld;
2959 if (!gm_ifp)
2960 continue;
2961
2962 /* scan mld groups */
2963 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2964
2965 if (uj) {
2966 json_object_object_get_ex(json, ifp->name,
2967 &json_iface);
2968
2969 if (!json_iface) {
2970 json_iface = json_object_new_object();
2971 json_object_pim_ifp_add(json_iface,
2972 ifp);
2973 json_object_object_add(json, ifp->name,
2974 json_iface);
2975 json_groups = json_object_new_array();
2976 json_object_object_add(json_iface,
2977 "groups",
2978 json_groups);
2979 }
2980
2981 json_group = json_object_new_object();
2982 json_object_string_addf(json_group, "group",
2983 "%pPAs",
2984 &sg->sgaddr.grp);
2985
2986 json_object_int_add(json_group, "version",
2987 pim_ifp->mld_version);
2988 json_object_string_addf(json_group, "uptime",
2989 "%pTVMs", &sg->created);
2990 json_object_array_add(json_groups, json_group);
2991 } else {
2992 ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
2993 ifp->name, &sg->sgaddr.grp,
2994 pim_ifp->mld_version,
2995 &sg->created);
2996 }
2997 } /* scan gm groups */
2998 } /* scan interfaces */
2999
3000 if (uj)
3001 vty_json(vty, json);
3002 else {
3003 /* Dump the generated table. */
3004 table = ttable_dump(tt, "\n");
3005 vty_out(vty, "%s\n", table);
3006 XFREE(MTYPE_TMP, table);
3007 ttable_del(tt);
3008 }
3009 }
3010
3011 DEFPY(gm_show_mld_groups,
3012 gm_show_mld_groups_cmd,
3013 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
3014 SHOW_STR
3015 IPV6_STR
3016 MLD_STR
3017 VRF_FULL_CMD_HELP_STR
3018 MLD_GROUP_STR
3019 JSON_STR)
3020 {
3021 int ret = CMD_SUCCESS;
3022 struct vrf *vrf;
3023
3024 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
3025 if (ret != CMD_SUCCESS)
3026 return ret;
3027
3028 if (vrf)
3029 gm_show_groups(vty, vrf, !!json);
3030 else
3031 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
3032 gm_show_groups(vty, vrf, !!json);
3033
3034 return CMD_SUCCESS;
3035 }
3036
3037 DEFPY(gm_debug_show,
3038 gm_debug_show_cmd,
3039 "debug show mld interface IFNAME",
3040 DEBUG_STR
3041 SHOW_STR
3042 MLD_STR
3043 INTERFACE_STR
3044 "interface name\n")
3045 {
3046 struct interface *ifp;
3047 struct pim_interface *pim_ifp;
3048 struct gm_if *gm_ifp;
3049
3050 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
3051 if (!ifp) {
3052 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
3053 return CMD_WARNING;
3054 }
3055
3056 pim_ifp = ifp->info;
3057 if (!pim_ifp) {
3058 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
3059 return CMD_WARNING;
3060 }
3061
3062 gm_ifp = pim_ifp->mld;
3063 if (!gm_ifp) {
3064 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
3065 return CMD_WARNING;
3066 }
3067
3068 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
3069 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
3070 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
3071 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
3072 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
3073
3074 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
3075 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
3076 int64_t query, expiry;
3077
3078 query = monotime_since(&gm_ifp->pending[i].query, NULL);
3079 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
3080
3081 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
3082 i, query / 1000, expiry / 1000);
3083 }
3084
3085 struct gm_sg *sg;
3086 struct gm_packet_state *pkt;
3087 struct gm_packet_sg *item;
3088 struct gm_subscriber *subscriber;
3089
3090 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
3091 frr_each (gm_sgs, gm_ifp->sgs, sg) {
3092 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
3093 sg->t_sg_expire);
3094
3095 vty_out(vty, "\t @pos:%zu\n",
3096 gm_packet_sg_subs_count(sg->subs_positive));
3097 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
3098 pkt = gm_packet_sg2state(item);
3099
3100 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3101 item->is_src ? "S" : "",
3102 item->is_excl ? "E" : "",
3103 &pkt->subscriber->addr, pkt->subscriber, pkt,
3104 item->offset);
3105
3106 assert(item->sg == sg);
3107 }
3108 vty_out(vty, "\t @neg:%zu\n",
3109 gm_packet_sg_subs_count(sg->subs_negative));
3110 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
3111 pkt = gm_packet_sg2state(item);
3112
3113 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3114 item->is_src ? "S" : "",
3115 item->is_excl ? "E" : "",
3116 &pkt->subscriber->addr, pkt->subscriber, pkt,
3117 item->offset);
3118
3119 assert(item->sg == sg);
3120 }
3121 }
3122
3123 vty_out(vty, "\n%zu subscribers:\n",
3124 gm_subscribers_count(gm_ifp->subscribers));
3125 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
3126 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
3127 subscriber, gm_packets_count(subscriber->packets));
3128
3129 frr_each (gm_packets, subscriber->packets, pkt) {
3130 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
3131 pkt,
3132 monotime_since(&pkt->received, NULL) *
3133 0.000001f,
3134 pkt->n_active, pkt->n_sg);
3135
3136 for (size_t i = 0; i < pkt->n_sg; i++) {
3137 item = pkt->items + i;
3138
3139 vty_out(vty, "\t\t[%zu]", i);
3140
3141 if (!item->sg) {
3142 vty_out(vty, " inactive\n");
3143 continue;
3144 }
3145
3146 vty_out(vty, " %s%s %pSG nE=%u\n",
3147 item->is_src ? "S" : "",
3148 item->is_excl ? "E" : "",
3149 &item->sg->sgaddr, item->n_exclude);
3150 }
3151 }
3152 }
3153
3154 return CMD_SUCCESS;
3155 }
3156
3157 DEFPY(gm_debug_iface_cfg,
3158 gm_debug_iface_cfg_cmd,
3159 "debug ipv6 mld {"
3160 "robustness (0-7)|"
3161 "query-max-response-time (1-8387584)"
3162 "}",
3163 DEBUG_STR
3164 IPV6_STR
3165 "Multicast Listener Discovery\n"
3166 "QRV\nQRV\n"
3167 "maxresp\nmaxresp\n")
3168 {
3169 VTY_DECLVAR_CONTEXT(interface, ifp);
3170 struct pim_interface *pim_ifp;
3171 struct gm_if *gm_ifp;
3172 bool changed = false;
3173
3174 pim_ifp = ifp->info;
3175 if (!pim_ifp) {
3176 vty_out(vty, "%% no PIM state for interface %pSQq\n",
3177 ifp->name);
3178 return CMD_WARNING;
3179 }
3180 gm_ifp = pim_ifp->mld;
3181 if (!gm_ifp) {
3182 vty_out(vty, "%% no MLD state for interface %pSQq\n",
3183 ifp->name);
3184 return CMD_WARNING;
3185 }
3186
3187 if (robustness_str && gm_ifp->cur_qrv != robustness) {
3188 gm_ifp->cur_qrv = robustness;
3189 changed = true;
3190 }
3191 if (query_max_response_time_str &&
3192 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
3193 gm_ifp->cur_max_resp = query_max_response_time;
3194 changed = true;
3195 }
3196
3197 if (changed) {
3198 vty_out(vty, "%% MLD querier config changed, bumping\n");
3199 gm_bump_querier(gm_ifp);
3200 }
3201 return CMD_SUCCESS;
3202 }
3203
3204 void gm_cli_init(void);
3205
3206 void gm_cli_init(void)
3207 {
3208 install_element(VIEW_NODE, &gm_show_interface_cmd);
3209 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3210 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
3211 install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
3212
3213 install_element(VIEW_NODE, &gm_debug_show_cmd);
3214 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3215 }