]> git.proxmox.com Git - mirror_frr.git/blob - pimd/pim6_mld.c
ldpd: changes for code maintainability
[mirror_frr.git] / pimd / pim6_mld.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PIMv6 MLD querier
4 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
5 */
6
7 /*
8 * keep pim6_mld.h open when working on this code. Most data structures are
9 * commented in the header.
10 *
11 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
12 * that this code will replace the old IGMP querier at some point.
13 */
14
15 #include <zebra.h>
16 #include <netinet/ip6.h>
17
18 #include "lib/memory.h"
19 #include "lib/jhash.h"
20 #include "lib/prefix.h"
21 #include "lib/checksum.h"
22 #include "lib/frrevent.h"
23 #include "termtable.h"
24
25 #include "pimd/pim6_mld.h"
26 #include "pimd/pim6_mld_protocol.h"
27 #include "pimd/pim_memory.h"
28 #include "pimd/pim_instance.h"
29 #include "pimd/pim_iface.h"
30 #include "pimd/pim6_cmd.h"
31 #include "pimd/pim_cmd_common.h"
32 #include "pimd/pim_util.h"
33 #include "pimd/pim_tib.h"
34 #include "pimd/pimd.h"
35
36 #ifndef IPV6_MULTICAST_ALL
37 #define IPV6_MULTICAST_ALL 29
38 #endif
39
40 DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
41 DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
42 DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
43 DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
44 DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
45 DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
46 DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
47
48 static void gm_t_query(struct event *t);
49 static void gm_trigger_specific(struct gm_sg *sg);
50 static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
51 struct timeval expire_wait);
52
53 /* shorthand for log messages */
54 #define log_ifp(msg) \
55 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
56 #define log_pkt_src(msg) \
57 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
58 &pkt_src->sin6_addr
59 #define log_sg(sg, msg) \
60 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
61 sg->iface->ifp->name, &sg->sgaddr
62
63 /* clang-format off */
64 #if PIM_IPV == 6
65 static const pim_addr gm_all_hosts = {
66 .s6_addr = {
67 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
69 },
70 };
71 static const pim_addr gm_all_routers = {
72 .s6_addr = {
73 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
75 },
76 };
77 /* MLDv1 does not allow subscriber tracking due to report suppression
78 * hence, the source address is replaced with ffff:...:ffff
79 */
80 static const pim_addr gm_dummy_untracked = {
81 .s6_addr = {
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
84 },
85 };
86 #else
87 /* 224.0.0.1 */
88 static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
89 /* 224.0.0.22 */
90 static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
91 static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
92 #endif
93 /* clang-format on */
94
95 #define IPV6_MULTICAST_SCOPE_LINK 2
96
97 static inline uint8_t in6_multicast_scope(const pim_addr *addr)
98 {
99 return addr->s6_addr[1] & 0xf;
100 }
101
102 static inline bool in6_multicast_nofwd(const pim_addr *addr)
103 {
104 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
105 }
106
107 /*
108 * (S,G) -> subscriber,(S,G)
109 */
110
111 static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
112 const struct gm_packet_sg *b)
113 {
114 const struct gm_packet_state *s_a, *s_b;
115
116 s_a = gm_packet_sg2state(a);
117 s_b = gm_packet_sg2state(b);
118 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
119 }
120
121 DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
122 gm_packet_sg_cmp);
123
124 static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
125 enum gm_sub_sense sense,
126 struct gm_subscriber *sub)
127 {
128 struct {
129 struct gm_packet_state hdr;
130 struct gm_packet_sg item;
131 } ref = {
132 /* clang-format off */
133 .hdr = {
134 .subscriber = sub,
135 },
136 .item = {
137 .offset = 0,
138 },
139 /* clang-format on */
140 };
141
142 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
143 }
144
145 /*
146 * interface -> (*,G),pending
147 */
148
149 static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
150 const struct gm_grp_pending *b)
151 {
152 return IPV6_ADDR_CMP(&a->grp, &b->grp);
153 }
154
155 DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
156 gm_grp_pending_cmp);
157
158 /*
159 * interface -> ([S1,S2,...],G),pending
160 */
161
162 static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
163 const struct gm_gsq_pending *b)
164 {
165 if (a->s_bit != b->s_bit)
166 return numcmp(a->s_bit, b->s_bit);
167
168 return IPV6_ADDR_CMP(&a->grp, &b->grp);
169 }
170
171 static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
172 {
173 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
174
175 return jhash(&a->grp, sizeof(a->grp), seed);
176 }
177
178 DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
179 gm_gsq_pending_hash);
180
181 /*
182 * interface -> (S,G)
183 */
184
185 static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
186 {
187 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
188 }
189
190 DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
191
192 static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
193 pim_addr src)
194 {
195 struct gm_sg ref = {};
196
197 ref.sgaddr.grp = grp;
198 ref.sgaddr.src = src;
199 return gm_sgs_find(gm_ifp->sgs, &ref);
200 }
201
202 static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
203 pim_addr src)
204 {
205 struct gm_sg *ret, *prev;
206
207 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
208 ret->sgaddr.grp = grp;
209 ret->sgaddr.src = src;
210 ret->iface = gm_ifp;
211 prev = gm_sgs_add(gm_ifp->sgs, ret);
212
213 if (prev) {
214 XFREE(MTYPE_GM_SG, ret);
215 ret = prev;
216 } else {
217 monotime(&ret->created);
218 gm_packet_sg_subs_init(ret->subs_positive);
219 gm_packet_sg_subs_init(ret->subs_negative);
220 }
221 return ret;
222 }
223
224 /*
225 * interface -> packets, sorted by expiry (because add_tail insert order)
226 */
227
228 DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
229
230 /*
231 * subscriber -> packets
232 */
233
234 DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
235
236 /*
237 * interface -> subscriber
238 */
239
240 static int gm_subscriber_cmp(const struct gm_subscriber *a,
241 const struct gm_subscriber *b)
242 {
243 return IPV6_ADDR_CMP(&a->addr, &b->addr);
244 }
245
246 static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
247 {
248 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
249 }
250
251 DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
252 gm_subscriber_hash);
253
254 static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
255 pim_addr addr)
256 {
257 struct gm_subscriber ref = {}, *ret;
258
259 ref.addr = addr;
260 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
261 if (ret)
262 ret->refcount++;
263 return ret;
264 }
265
266 static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
267 pim_addr addr)
268 {
269 struct gm_subscriber ref = {}, *ret;
270
271 ref.addr = addr;
272 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
273
274 if (!ret) {
275 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
276 ret->iface = gm_ifp;
277 ret->addr = addr;
278 ret->refcount = 1;
279 monotime(&ret->created);
280 gm_packets_init(ret->packets);
281
282 gm_subscribers_add(gm_ifp->subscribers, ret);
283 }
284 return ret;
285 }
286
287 static void gm_subscriber_drop(struct gm_subscriber **subp)
288 {
289 struct gm_subscriber *sub = *subp;
290 struct gm_if *gm_ifp;
291
292 if (!sub)
293 return;
294 gm_ifp = sub->iface;
295
296 *subp = NULL;
297 sub->refcount--;
298
299 if (sub->refcount)
300 return;
301
302 gm_subscribers_del(gm_ifp->subscribers, sub);
303 XFREE(MTYPE_GM_SUBSCRIBER, sub);
304 }
305
306 /****************************************************************************/
307
308 /* bundle query timer values for combined v1/v2 handling */
309 struct gm_query_timers {
310 unsigned int qrv;
311 unsigned int max_resp_ms;
312 unsigned int qqic_ms;
313
314 struct timeval fuzz;
315 struct timeval expire_wait;
316 };
317
318 static void gm_expiry_calc(struct gm_query_timers *timers)
319 {
320 unsigned int expire =
321 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
322 ldiv_t exp_div = ldiv(expire, 1000);
323
324 timers->expire_wait.tv_sec = exp_div.quot;
325 timers->expire_wait.tv_usec = exp_div.rem * 1000;
326 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
327 }
328
329 static void gm_sg_free(struct gm_sg *sg)
330 {
331 /* t_sg_expiry is handled before this is reached */
332 EVENT_OFF(sg->t_sg_query);
333 gm_packet_sg_subs_fini(sg->subs_negative);
334 gm_packet_sg_subs_fini(sg->subs_positive);
335 XFREE(MTYPE_GM_SG, sg);
336 }
337
338 /* clang-format off */
339 static const char *const gm_states[] = {
340 [GM_SG_NOINFO] = "NOINFO",
341 [GM_SG_JOIN] = "JOIN",
342 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
343 [GM_SG_PRUNE] = "PRUNE",
344 [GM_SG_NOPRUNE] = "NOPRUNE",
345 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
346 };
347 /* clang-format on */
348
349 CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
350 /* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
351 * joined. Whether we actually want/need to support this is a separate
352 * question - it is almost never used. In fact this is exactly what RFC5790
353 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
354 */
355
356 static void gm_sg_update(struct gm_sg *sg, bool has_expired)
357 {
358 struct gm_if *gm_ifp = sg->iface;
359 enum gm_sg_state prev, desired;
360 bool new_join;
361 struct gm_sg *grp = NULL;
362
363 if (!pim_addr_is_any(sg->sgaddr.src))
364 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
365 else
366 assert(sg->state != GM_SG_PRUNE);
367
368 if (gm_packet_sg_subs_count(sg->subs_positive)) {
369 desired = GM_SG_JOIN;
370 assert(!sg->t_sg_expire);
371 } else if ((sg->state == GM_SG_JOIN ||
372 sg->state == GM_SG_JOIN_EXPIRING) &&
373 !has_expired)
374 desired = GM_SG_JOIN_EXPIRING;
375 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
376 desired = GM_SG_NOINFO;
377 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
378 gm_packet_sg_subs_count(sg->subs_negative)) {
379 if ((sg->state == GM_SG_NOPRUNE ||
380 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
381 !has_expired)
382 desired = GM_SG_NOPRUNE_EXPIRING;
383 else
384 desired = GM_SG_PRUNE;
385 } else if (gm_packet_sg_subs_count(sg->subs_negative))
386 desired = GM_SG_NOPRUNE;
387 else
388 desired = GM_SG_NOINFO;
389
390 if (desired != sg->state && !gm_ifp->stopping) {
391 if (PIM_DEBUG_GM_EVENTS)
392 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
393 gm_states[desired]);
394
395 if (desired == GM_SG_JOIN_EXPIRING ||
396 desired == GM_SG_NOPRUNE_EXPIRING) {
397 struct gm_query_timers timers;
398
399 timers.qrv = gm_ifp->cur_qrv;
400 timers.max_resp_ms = gm_ifp->cur_max_resp;
401 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
402 timers.fuzz = gm_ifp->cfg_timing_fuzz;
403
404 gm_expiry_calc(&timers);
405 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
406
407 EVENT_OFF(sg->t_sg_query);
408 sg->n_query = gm_ifp->cur_lmqc;
409 sg->query_sbit = false;
410 gm_trigger_specific(sg);
411 }
412 }
413 prev = sg->state;
414 sg->state = desired;
415
416 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
417 new_join = false;
418 else
419 new_join = gm_sg_state_want_join(desired);
420
421 if (new_join && !sg->tib_joined) {
422 /* this will retry if join previously failed */
423 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
424 gm_ifp->ifp, &sg->oil);
425 if (!sg->tib_joined)
426 zlog_warn(
427 "MLD join for %pSG%%%s not propagated into TIB",
428 &sg->sgaddr, gm_ifp->ifp->name);
429 else
430 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
431 gm_ifp->ifp->name);
432
433 } else if (sg->tib_joined && !new_join) {
434 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
435
436 sg->oil = NULL;
437 sg->tib_joined = false;
438 }
439
440 if (desired == GM_SG_NOINFO) {
441 /* multiple paths can lead to the last state going away;
442 * t_sg_expire can still be running if we're arriving from
443 * another path.
444 */
445 if (has_expired)
446 EVENT_OFF(sg->t_sg_expire);
447
448 assertf((!sg->t_sg_expire &&
449 !gm_packet_sg_subs_count(sg->subs_positive) &&
450 !gm_packet_sg_subs_count(sg->subs_negative)),
451 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
452 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
453 sg->t_sg_expire, gm_states[prev], gm_states[desired],
454 gm_packet_sg_subs_count(sg->subs_positive),
455 gm_packet_sg_subs_count(sg->subs_negative), grp);
456
457 if (PIM_DEBUG_GM_TRACE)
458 zlog_debug(log_sg(sg, "dropping"));
459
460 gm_sgs_del(gm_ifp->sgs, sg);
461 gm_sg_free(sg);
462 }
463 }
464
465 /****************************************************************************/
466
467 /* the following bunch of functions deals with transferring state from
468 * received packets into gm_packet_state. As a reminder, the querier is
469 * structured to keep all items received in one packet together, since they
470 * will share expiry timers and thus allows efficient handling.
471 */
472
473 static void gm_packet_free(struct gm_packet_state *pkt)
474 {
475 gm_packet_expires_del(pkt->iface->expires, pkt);
476 gm_packets_del(pkt->subscriber->packets, pkt);
477 gm_subscriber_drop(&pkt->subscriber);
478 XFREE(MTYPE_GM_STATE, pkt);
479 }
480
481 static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
482 struct gm_sg *sg, bool is_excl,
483 bool is_src)
484 {
485 struct gm_packet_sg *item;
486
487 assert(pkt->n_active < pkt->n_sg);
488
489 item = &pkt->items[pkt->n_active];
490 item->sg = sg;
491 item->is_excl = is_excl;
492 item->is_src = is_src;
493 item->offset = pkt->n_active;
494
495 pkt->n_active++;
496 return item;
497 }
498
499 static bool gm_packet_sg_drop(struct gm_packet_sg *item)
500 {
501 struct gm_packet_state *pkt;
502 size_t i;
503
504 assert(item->sg);
505
506 pkt = gm_packet_sg2state(item);
507 if (item->sg->most_recent == item)
508 item->sg->most_recent = NULL;
509
510 for (i = 0; i < item->n_exclude; i++) {
511 struct gm_packet_sg *excl_item;
512
513 excl_item = item + 1 + i;
514 if (!excl_item->sg)
515 continue;
516
517 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
518 excl_item->sg = NULL;
519 pkt->n_active--;
520
521 assert(pkt->n_active > 0);
522 }
523
524 if (item->is_excl && item->is_src)
525 gm_packet_sg_subs_del(item->sg->subs_negative, item);
526 else
527 gm_packet_sg_subs_del(item->sg->subs_positive, item);
528 item->sg = NULL;
529 pkt->n_active--;
530
531 if (!pkt->n_active) {
532 gm_packet_free(pkt);
533 return true;
534 }
535 return false;
536 }
537
538 static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
539 {
540 for (size_t i = 0; i < pkt->n_sg; i++) {
541 struct gm_sg *sg = pkt->items[i].sg;
542 bool deleted;
543
544 if (!sg)
545 continue;
546
547 if (trace && PIM_DEBUG_GM_TRACE)
548 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
549 &pkt->subscriber->addr);
550 deleted = gm_packet_sg_drop(&pkt->items[i]);
551
552 gm_sg_update(sg, true);
553 if (deleted)
554 break;
555 }
556 }
557
558 static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
559 struct gm_subscriber *subscriber,
560 pim_addr grp, pim_addr *srcs,
561 size_t n_src, enum gm_sub_sense sense)
562 {
563 struct gm_sg *sg;
564 struct gm_packet_sg *old_src;
565 size_t i;
566
567 for (i = 0; i < n_src; i++) {
568 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
569 if (!sg)
570 continue;
571
572 old_src = gm_packet_sg_find(sg, sense, subscriber);
573 if (!old_src)
574 continue;
575
576 gm_packet_sg_drop(old_src);
577 gm_sg_update(sg, false);
578 }
579 }
580
581 static void gm_sg_expiry_cancel(struct gm_sg *sg)
582 {
583 if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
584 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
585 EVENT_OFF(sg->t_sg_expire);
586 sg->query_sbit = true;
587 }
588
589 /* first pass: process all changes resulting in removal of state:
590 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
591 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
592 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
593 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
594 * note *replacing* state is NOT considered *removing* state here
595 *
596 * everything else is thrown into pkt for creation of state in pass 2
597 */
598 static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
599 struct mld_v2_rec_hdr *rechdr)
600 {
601 /* NB: pkt->subscriber can be NULL here if the subscriber was not
602 * previously seen!
603 */
604 struct gm_subscriber *subscriber = pkt->subscriber;
605 struct gm_sg *grp;
606 struct gm_packet_sg *old_grp = NULL;
607 struct gm_packet_sg *item;
608 size_t n_src = ntohs(rechdr->n_src);
609 size_t j;
610 bool is_excl = false;
611
612 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
613 if (grp && subscriber)
614 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
615
616 assert(old_grp == NULL || old_grp->is_excl);
617
618 switch (rechdr->type) {
619 case MLD_RECTYPE_IS_EXCLUDE:
620 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
621 /* this always replaces or creates state */
622 is_excl = true;
623 if (!grp)
624 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
625
626 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
627 item->n_exclude = n_src;
628
629 /* [EXCL_INCL_SG_NOTE] referenced below
630 *
631 * in theory, we should drop any S,G that the host may have
632 * previously added in INCLUDE mode. In practice, this is both
633 * incredibly rare and entirely irrelevant. It only makes any
634 * difference if an S,G that the host previously had on the
635 * INCLUDE list is now on the blocked list for EXCLUDE, which
636 * we can cover in processing the S,G list in pass2_excl().
637 *
638 * Other S,G from the host are simply left to expire
639 * "naturally" through general expiry.
640 */
641 break;
642
643 case MLD_RECTYPE_IS_INCLUDE:
644 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
645 if (old_grp) {
646 /* INCLUDE has no *,G state, so old_grp here refers to
647 * previous EXCLUDE => delete it
648 */
649 gm_packet_sg_drop(old_grp);
650 gm_sg_update(grp, false);
651 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
652 }
653 break;
654
655 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
656 if (old_grp) {
657 /* remove S,Gs from EXCLUDE, and then we're done */
658 gm_packet_sg_remove_sources(pkt->iface, subscriber,
659 rechdr->grp, rechdr->srcs,
660 n_src, GM_SUB_NEG);
661 return;
662 }
663 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
664 * idential to IS_INCLUDE (because the list of sources in
665 * IS_INCLUDE is not exhaustive)
666 */
667 break;
668
669 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
670 if (old_grp) {
671 /* this is intentionally not implemented because it
672 * would be complicated as hell. we only take the list
673 * of blocked sources from full group state records
674 */
675 return;
676 }
677
678 if (subscriber)
679 gm_packet_sg_remove_sources(pkt->iface, subscriber,
680 rechdr->grp, rechdr->srcs,
681 n_src, GM_SUB_POS);
682 return;
683 }
684
685 for (j = 0; j < n_src; j++) {
686 struct gm_sg *sg;
687
688 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
689 if (!sg)
690 sg = gm_sg_make(pkt->iface, rechdr->grp,
691 rechdr->srcs[j]);
692
693 gm_packet_sg_setup(pkt, sg, is_excl, true);
694 }
695 }
696
697 /* second pass: creating/updating/refreshing state. All the items from the
698 * received packet have already been thrown into gm_packet_state.
699 */
700
701 static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
702 {
703 struct gm_packet_sg *item = &pkt->items[i];
704 struct gm_packet_sg *old = NULL;
705 struct gm_sg *sg = item->sg;
706
707 /* EXCLUDE state was already dropped in pass1 */
708 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
709
710 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
711 if (old)
712 gm_packet_sg_drop(old);
713
714 pkt->n_active++;
715 gm_packet_sg_subs_add(sg->subs_positive, item);
716
717 sg->most_recent = item;
718 gm_sg_expiry_cancel(sg);
719 gm_sg_update(sg, false);
720 }
721
722 static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
723 {
724 struct gm_packet_sg *item = &pkt->items[offs];
725 struct gm_packet_sg *old_grp, *item_dup;
726 struct gm_sg *sg_grp = item->sg;
727 size_t i;
728
729 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
730 if (old_grp) {
731 for (i = 0; i < item->n_exclude; i++) {
732 struct gm_packet_sg *item_src, *old_src;
733
734 item_src = &pkt->items[offs + 1 + i];
735 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
736 pkt->subscriber);
737 if (old_src)
738 gm_packet_sg_drop(old_src);
739
740 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
741 * items left over if the host previously had INCLUDE
742 * mode going. Remove them here if we find any.
743 */
744 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
745 pkt->subscriber);
746 if (old_src)
747 gm_packet_sg_drop(old_src);
748 }
749
750 /* the previous loop has removed the S,G entries which are
751 * still excluded after this update. So anything left on the
752 * old item was previously excluded but is now included
753 * => need to trigger update on S,G
754 */
755 for (i = 0; i < old_grp->n_exclude; i++) {
756 struct gm_packet_sg *old_src;
757 struct gm_sg *old_sg_src;
758
759 old_src = old_grp + 1 + i;
760 old_sg_src = old_src->sg;
761 if (!old_sg_src)
762 continue;
763
764 gm_packet_sg_drop(old_src);
765 gm_sg_update(old_sg_src, false);
766 }
767
768 gm_packet_sg_drop(old_grp);
769 }
770
771 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
772 assert(!item_dup);
773 pkt->n_active++;
774
775 sg_grp->most_recent = item;
776 gm_sg_expiry_cancel(sg_grp);
777
778 for (i = 0; i < item->n_exclude; i++) {
779 struct gm_packet_sg *item_src;
780
781 item_src = &pkt->items[offs + 1 + i];
782 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
783 item_src);
784
785 if (item_dup)
786 item_src->sg = NULL;
787 else {
788 pkt->n_active++;
789 gm_sg_update(item_src->sg, false);
790 }
791 }
792
793 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
794 * to get lower PIM churn/flapping
795 */
796 gm_sg_update(sg_grp, false);
797 }
798
799 CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
800 /* on receiving a query, we need to update our robustness/query interval to
801 * match, so we correctly process group/source specific queries after last
802 * member leaves
803 */
804
805 static void gm_handle_v2_report(struct gm_if *gm_ifp,
806 const struct sockaddr_in6 *pkt_src, char *data,
807 size_t len)
808 {
809 struct mld_v2_report_hdr *hdr;
810 size_t i, n_records, max_entries;
811 struct gm_packet_state *pkt;
812
813 if (len < sizeof(*hdr)) {
814 if (PIM_DEBUG_GM_PACKETS)
815 zlog_debug(log_pkt_src(
816 "malformed MLDv2 report (truncated header)"));
817 gm_ifp->stats.rx_drop_malformed++;
818 return;
819 }
820
821 /* errors after this may at least partially process the packet */
822 gm_ifp->stats.rx_new_report++;
823
824 hdr = (struct mld_v2_report_hdr *)data;
825 data += sizeof(*hdr);
826 len -= sizeof(*hdr);
827
828 /* can't have more *,G and S,G items than there is space for ipv6
829 * addresses, so just use this to allocate temporary buffer
830 */
831 max_entries = len / sizeof(pim_addr);
832 pkt = XCALLOC(MTYPE_GM_STATE,
833 offsetof(struct gm_packet_state, items[max_entries]));
834 pkt->n_sg = max_entries;
835 pkt->iface = gm_ifp;
836 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
837
838 n_records = ntohs(hdr->n_records);
839
840 /* validate & remove state in v2_pass1() */
841 for (i = 0; i < n_records; i++) {
842 struct mld_v2_rec_hdr *rechdr;
843 size_t n_src, record_size;
844
845 if (len < sizeof(*rechdr)) {
846 zlog_warn(log_pkt_src(
847 "malformed MLDv2 report (truncated record header)"));
848 gm_ifp->stats.rx_trunc_report++;
849 break;
850 }
851
852 rechdr = (struct mld_v2_rec_hdr *)data;
853 data += sizeof(*rechdr);
854 len -= sizeof(*rechdr);
855
856 n_src = ntohs(rechdr->n_src);
857 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
858
859 if (len < record_size) {
860 zlog_warn(log_pkt_src(
861 "malformed MLDv2 report (truncated source list)"));
862 gm_ifp->stats.rx_trunc_report++;
863 break;
864 }
865 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
866 zlog_warn(
867 log_pkt_src(
868 "malformed MLDv2 report (invalid group %pI6)"),
869 &rechdr->grp);
870 gm_ifp->stats.rx_trunc_report++;
871 break;
872 }
873
874 data += record_size;
875 len -= record_size;
876
877 gm_handle_v2_pass1(pkt, rechdr);
878 }
879
880 if (!pkt->n_active) {
881 gm_subscriber_drop(&pkt->subscriber);
882 XFREE(MTYPE_GM_STATE, pkt);
883 return;
884 }
885
886 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
887 offsetof(struct gm_packet_state, items[pkt->n_active]));
888 pkt->n_sg = pkt->n_active;
889 pkt->n_active = 0;
890
891 monotime(&pkt->received);
892 if (!pkt->subscriber)
893 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
894 gm_packets_add_tail(pkt->subscriber->packets, pkt);
895 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
896
897 for (i = 0; i < pkt->n_sg; i++)
898 if (!pkt->items[i].is_excl)
899 gm_handle_v2_pass2_incl(pkt, i);
900 else {
901 gm_handle_v2_pass2_excl(pkt, i);
902 i += pkt->items[i].n_exclude;
903 }
904
905 if (pkt->n_active == 0)
906 gm_packet_free(pkt);
907 }
908
909 static void gm_handle_v1_report(struct gm_if *gm_ifp,
910 const struct sockaddr_in6 *pkt_src, char *data,
911 size_t len)
912 {
913 struct mld_v1_pkt *hdr;
914 struct gm_packet_state *pkt;
915 struct gm_sg *grp;
916 struct gm_packet_sg *item;
917 size_t max_entries;
918
919 if (len < sizeof(*hdr)) {
920 if (PIM_DEBUG_GM_PACKETS)
921 zlog_debug(log_pkt_src(
922 "malformed MLDv1 report (truncated)"));
923 gm_ifp->stats.rx_drop_malformed++;
924 return;
925 }
926
927 gm_ifp->stats.rx_old_report++;
928
929 hdr = (struct mld_v1_pkt *)data;
930
931 max_entries = 1;
932 pkt = XCALLOC(MTYPE_GM_STATE,
933 offsetof(struct gm_packet_state, items[max_entries]));
934 pkt->n_sg = max_entries;
935 pkt->iface = gm_ifp;
936 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
937
938 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
939
940 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
941 if (!grp)
942 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
943
944 item = gm_packet_sg_setup(pkt, grp, true, false);
945 item->n_exclude = 0;
946 CPP_NOTICE("set v1-seen timer on grp here");
947
948 /* } */
949
950 /* pass2 will count n_active back up to 1. Also since a v1 report
951 * has exactly 1 group, we can skip the realloc() that v2 needs here.
952 */
953 assert(pkt->n_active == 1);
954 pkt->n_sg = pkt->n_active;
955 pkt->n_active = 0;
956
957 monotime(&pkt->received);
958 if (!pkt->subscriber)
959 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
960 gm_packets_add_tail(pkt->subscriber->packets, pkt);
961 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
962
963 /* pass2 covers installing state & removing old state; all the v1
964 * compat is handled at this point.
965 *
966 * Note that "old state" may be v2; subscribers will switch from v2
967 * reports to v1 reports when the querier changes from v2 to v1. So,
968 * limiting this to v1 would be wrong.
969 */
970 gm_handle_v2_pass2_excl(pkt, 0);
971
972 if (pkt->n_active == 0)
973 gm_packet_free(pkt);
974 }
975
976 static void gm_handle_v1_leave(struct gm_if *gm_ifp,
977 const struct sockaddr_in6 *pkt_src, char *data,
978 size_t len)
979 {
980 struct mld_v1_pkt *hdr;
981 struct gm_subscriber *subscriber;
982 struct gm_sg *grp;
983 struct gm_packet_sg *old_grp;
984
985 if (len < sizeof(*hdr)) {
986 if (PIM_DEBUG_GM_PACKETS)
987 zlog_debug(log_pkt_src(
988 "malformed MLDv1 leave (truncated)"));
989 gm_ifp->stats.rx_drop_malformed++;
990 return;
991 }
992
993 gm_ifp->stats.rx_old_leave++;
994
995 hdr = (struct mld_v1_pkt *)data;
996
997 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
998 if (!subscriber)
999 return;
1000
1001 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1002
1003 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
1004 if (grp) {
1005 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
1006 if (old_grp) {
1007 gm_packet_sg_drop(old_grp);
1008 gm_sg_update(grp, false);
1009 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1010 }
1011 }
1012
1013 /* } */
1014
1015 /* nothing more to do here, pass2 is no-op for leaves */
1016 gm_subscriber_drop(&subscriber);
1017 }
1018
1019 /* for each general query received (or sent), a timer is started to expire
1020 * _everything_ at the appropriate time (including robustness multiplier).
1021 *
1022 * So when this timer hits, all packets - with all of their items - that were
1023 * received *before* the query are aged out, and state updated accordingly.
1024 * Note that when we receive a refresh/update, the previous/old packet is
1025 * already dropped and replaced with a new one, so in normal steady-state
1026 * operation, this timer won't be doing anything.
1027 *
1028 * Additionally, if a subscriber actively leaves a group, that goes through
1029 * its own path too and won't hit this. This is really only triggered when a
1030 * host straight up disappears.
1031 */
1032 static void gm_t_expire(struct event *t)
1033 {
1034 struct gm_if *gm_ifp = EVENT_ARG(t);
1035 struct gm_packet_state *pkt;
1036
1037 zlog_info(log_ifp("general expiry timer"));
1038
1039 while (gm_ifp->n_pending) {
1040 struct gm_general_pending *pend = gm_ifp->pending;
1041 struct timeval remain;
1042 int64_t remain_ms;
1043
1044 remain_ms = monotime_until(&pend->expiry, &remain);
1045 if (remain_ms > 0) {
1046 if (PIM_DEBUG_GM_EVENTS)
1047 zlog_debug(
1048 log_ifp("next general expiry in %" PRId64 "ms"),
1049 remain_ms / 1000);
1050
1051 event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1052 &remain, &gm_ifp->t_expire);
1053 return;
1054 }
1055
1056 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1057 if (timercmp(&pkt->received, &pend->query, >=))
1058 break;
1059
1060 if (PIM_DEBUG_GM_PACKETS)
1061 zlog_debug(log_ifp("expire packet %p"), pkt);
1062 gm_packet_drop(pkt, true);
1063 }
1064
1065 gm_ifp->n_pending--;
1066 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1067 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1068 }
1069
1070 if (PIM_DEBUG_GM_EVENTS)
1071 zlog_debug(log_ifp("next general expiry waiting for query"));
1072 }
1073
1074 /* NB: the receive handlers will also run when sending packets, since we
1075 * receive our own packets back in.
1076 */
1077 static void gm_handle_q_general(struct gm_if *gm_ifp,
1078 struct gm_query_timers *timers)
1079 {
1080 struct timeval now, expiry;
1081 struct gm_general_pending *pend;
1082
1083 monotime(&now);
1084 timeradd(&now, &timers->expire_wait, &expiry);
1085
1086 while (gm_ifp->n_pending) {
1087 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1088
1089 if (timercmp(&pend->expiry, &expiry, <))
1090 break;
1091
1092 /* if we end up here, the last item in pending[] has an expiry
1093 * later than the expiry for this query. But our query time
1094 * (now) is later than that of the item (because, well, that's
1095 * how time works.) This makes this query meaningless since
1096 * it's "supersetted" within the preexisting query
1097 */
1098
1099 if (PIM_DEBUG_GM_TRACE_DETAIL)
1100 zlog_debug(
1101 log_ifp("zapping supersetted general timer %pTVMu"),
1102 &pend->expiry);
1103
1104 gm_ifp->n_pending--;
1105 if (!gm_ifp->n_pending)
1106 EVENT_OFF(gm_ifp->t_expire);
1107 }
1108
1109 /* people might be messing with their configs or something */
1110 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1111 return;
1112
1113 pend = &gm_ifp->pending[gm_ifp->n_pending];
1114 pend->query = now;
1115 pend->expiry = expiry;
1116
1117 if (!gm_ifp->n_pending++) {
1118 if (PIM_DEBUG_GM_TRACE)
1119 zlog_debug(
1120 log_ifp("starting general timer @ 0: %pTVMu"),
1121 &pend->expiry);
1122 event_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1123 &timers->expire_wait, &gm_ifp->t_expire);
1124 } else if (PIM_DEBUG_GM_TRACE)
1125 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1126 gm_ifp->n_pending, &pend->expiry);
1127 }
1128
1129 static void gm_t_sg_expire(struct event *t)
1130 {
1131 struct gm_sg *sg = EVENT_ARG(t);
1132 struct gm_if *gm_ifp = sg->iface;
1133 struct gm_packet_sg *item;
1134
1135 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1136 sg->state == GM_SG_NOPRUNE_EXPIRING,
1137 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1138
1139 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1140 /* this will also drop EXCLUDE mode S,G lists together with
1141 * the *,G entry
1142 */
1143 gm_packet_sg_drop(item);
1144
1145 /* subs_negative items are only timed out together with the *,G entry
1146 * since we won't get any reports for a group-and-source query
1147 */
1148 gm_sg_update(sg, true);
1149 }
1150
1151 static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1152 struct timeval ref)
1153 {
1154 struct gm_packet_state *pkt;
1155
1156 if (!sg->most_recent) {
1157 struct gm_packet_state *best_pkt = NULL;
1158 struct gm_packet_sg *item;
1159
1160 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1161 pkt = gm_packet_sg2state(item);
1162
1163 if (!best_pkt ||
1164 timercmp(&pkt->received, &best_pkt->received, >)) {
1165 best_pkt = pkt;
1166 sg->most_recent = item;
1167 }
1168 }
1169 }
1170 if (sg->most_recent) {
1171 struct timeval fuzz;
1172
1173 pkt = gm_packet_sg2state(sg->most_recent);
1174
1175 /* this shouldn't happen on plain old real ethernet segment,
1176 * but on something like a VXLAN or VPLS it is very possible
1177 * that we get a report before the query that triggered it.
1178 * (imagine a triangle scenario with 3 datacenters, it's very
1179 * possible A->B + B->C is faster than A->C due to odd routing)
1180 *
1181 * This makes a little tolerance allowance to handle that case.
1182 */
1183 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1184
1185 if (timercmp(&fuzz, &ref, >))
1186 return true;
1187 }
1188 return false;
1189 }
1190
1191 static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1192 struct timeval expire_wait)
1193 {
1194 struct timeval now;
1195
1196 if (!sg)
1197 return;
1198 if (sg->state == GM_SG_PRUNE)
1199 return;
1200
1201 monotime(&now);
1202 if (gm_sg_check_recent(gm_ifp, sg, now))
1203 return;
1204
1205 if (PIM_DEBUG_GM_TRACE)
1206 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1207
1208 if (sg->t_sg_expire) {
1209 struct timeval remain;
1210
1211 remain = event_timer_remain(sg->t_sg_expire);
1212 if (timercmp(&remain, &expire_wait, <=))
1213 return;
1214
1215 EVENT_OFF(sg->t_sg_expire);
1216 }
1217
1218 event_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1219 &sg->t_sg_expire);
1220 }
1221
1222 static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1223 struct gm_query_timers *timers, pim_addr grp,
1224 const pim_addr *srcs, size_t n_src)
1225 {
1226 struct gm_sg *sg;
1227 size_t i;
1228
1229 for (i = 0; i < n_src; i++) {
1230 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1231 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1232 }
1233 }
1234
1235 static void gm_t_grp_expire(struct event *t)
1236 {
1237 /* if we're here, that means when we received the group-specific query
1238 * there was one or more active S,G for this group. For *,G the timer
1239 * in sg->t_sg_expire is running separately and gets cancelled when we
1240 * receive a report, so that work is left to gm_t_sg_expire and we
1241 * shouldn't worry about it here.
1242 */
1243 struct gm_grp_pending *pend = EVENT_ARG(t);
1244 struct gm_if *gm_ifp = pend->iface;
1245 struct gm_sg *sg, *sg_start, sg_ref = {};
1246
1247 if (PIM_DEBUG_GM_EVENTS)
1248 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1249
1250 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1251 * could technically be gt to skip a possible *,G
1252 */
1253 sg_ref.sgaddr.grp = pend->grp;
1254 sg_ref.sgaddr.src = PIMADDR_ANY;
1255 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1256
1257 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1258 struct gm_packet_sg *item;
1259
1260 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1261 break;
1262 if (pim_addr_is_any(sg->sgaddr.src))
1263 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1264 continue;
1265 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1266 continue;
1267
1268 /* we may also have a group-source-specific query going on in
1269 * parallel. But if we received nothing for the *,G query,
1270 * the S,G query is kinda irrelevant.
1271 */
1272 EVENT_OFF(sg->t_sg_expire);
1273
1274 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1275 /* this will also drop the EXCLUDE S,G lists */
1276 gm_packet_sg_drop(item);
1277
1278 gm_sg_update(sg, true);
1279 }
1280
1281 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1282 XFREE(MTYPE_GM_GRP_PENDING, pend);
1283 }
1284
1285 static void gm_handle_q_group(struct gm_if *gm_ifp,
1286 struct gm_query_timers *timers, pim_addr grp)
1287 {
1288 struct gm_sg *sg, sg_ref = {};
1289 struct gm_grp_pending *pend, pend_ref = {};
1290
1291 sg_ref.sgaddr.grp = grp;
1292 sg_ref.sgaddr.src = PIMADDR_ANY;
1293 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1294 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1295
1296 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1297 /* we have nothing at all for this group - don't waste RAM */
1298 return;
1299
1300 if (pim_addr_is_any(sg->sgaddr.src)) {
1301 /* actually found *,G entry here */
1302 if (PIM_DEBUG_GM_TRACE)
1303 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1304 &grp);
1305 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1306
1307 sg = gm_sgs_next(gm_ifp->sgs, sg);
1308 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1309 /* no S,G for this group */
1310 return;
1311 }
1312
1313 pend_ref.grp = grp;
1314 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1315
1316 if (pend) {
1317 struct timeval remain;
1318
1319 remain = event_timer_remain(pend->t_expire);
1320 if (timercmp(&remain, &timers->expire_wait, <=))
1321 return;
1322
1323 EVENT_OFF(pend->t_expire);
1324 } else {
1325 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1326 pend->grp = grp;
1327 pend->iface = gm_ifp;
1328 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1329 }
1330
1331 monotime(&pend->query);
1332 event_add_timer_tv(router->master, gm_t_grp_expire, pend,
1333 &timers->expire_wait, &pend->t_expire);
1334
1335 if (PIM_DEBUG_GM_TRACE)
1336 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1337 pend->t_expire);
1338 }
1339
1340 static void gm_bump_querier(struct gm_if *gm_ifp)
1341 {
1342 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1343
1344 EVENT_OFF(gm_ifp->t_query);
1345
1346 if (pim_addr_is_any(pim_ifp->ll_lowest))
1347 return;
1348 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1349 return;
1350
1351 gm_ifp->n_startup = gm_ifp->cur_qrv;
1352
1353 event_execute(router->master, gm_t_query, gm_ifp, 0);
1354 }
1355
1356 static void gm_t_other_querier(struct event *t)
1357 {
1358 struct gm_if *gm_ifp = EVENT_ARG(t);
1359 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1360
1361 zlog_info(log_ifp("other querier timer expired"));
1362
1363 gm_ifp->querier = pim_ifp->ll_lowest;
1364 gm_ifp->n_startup = gm_ifp->cur_qrv;
1365
1366 event_execute(router->master, gm_t_query, gm_ifp, 0);
1367 }
1368
1369 static void gm_handle_query(struct gm_if *gm_ifp,
1370 const struct sockaddr_in6 *pkt_src,
1371 pim_addr *pkt_dst, char *data, size_t len)
1372 {
1373 struct mld_v2_query_hdr *hdr;
1374 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1375 struct gm_query_timers timers;
1376 bool general_query;
1377
1378 if (len < sizeof(struct mld_v2_query_hdr) &&
1379 len != sizeof(struct mld_v1_pkt)) {
1380 zlog_warn(log_pkt_src("invalid query size"));
1381 gm_ifp->stats.rx_drop_malformed++;
1382 return;
1383 }
1384
1385 hdr = (struct mld_v2_query_hdr *)data;
1386 general_query = pim_addr_is_any(hdr->grp);
1387
1388 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1389 zlog_warn(log_pkt_src(
1390 "malformed MLDv2 query (invalid group %pI6)"),
1391 &hdr->grp);
1392 gm_ifp->stats.rx_drop_malformed++;
1393 return;
1394 }
1395
1396 if (len >= sizeof(struct mld_v2_query_hdr)) {
1397 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1398
1399 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1400 zlog_warn(log_pkt_src(
1401 "malformed MLDv2 query (truncated source list)"));
1402 gm_ifp->stats.rx_drop_malformed++;
1403 return;
1404 }
1405
1406 if (general_query && src_space) {
1407 zlog_warn(log_pkt_src(
1408 "malformed MLDv2 query (general query with non-empty source list)"));
1409 gm_ifp->stats.rx_drop_malformed++;
1410 return;
1411 }
1412 }
1413
1414 /* accepting queries unicast to us (or addressed to a wrong group)
1415 * can mess up querier election as well as cause us to terminate
1416 * traffic (since after a unicast query no reports will be coming in)
1417 */
1418 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1419 if (pim_addr_is_any(hdr->grp)) {
1420 zlog_warn(
1421 log_pkt_src(
1422 "wrong destination %pPA for general query"),
1423 pkt_dst);
1424 gm_ifp->stats.rx_drop_dstaddr++;
1425 return;
1426 }
1427
1428 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
1429 gm_ifp->stats.rx_drop_dstaddr++;
1430 zlog_warn(
1431 log_pkt_src(
1432 "wrong destination %pPA for group specific query"),
1433 pkt_dst);
1434 return;
1435 }
1436 }
1437
1438 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
1439 if (PIM_DEBUG_GM_EVENTS)
1440 zlog_debug(
1441 log_pkt_src("replacing elected querier %pPA"),
1442 &gm_ifp->querier);
1443
1444 gm_ifp->querier = pkt_src->sin6_addr;
1445 }
1446
1447 if (len == sizeof(struct mld_v1_pkt)) {
1448 timers.qrv = gm_ifp->cur_qrv;
1449 timers.max_resp_ms = hdr->max_resp_code;
1450 timers.qqic_ms = gm_ifp->cur_query_intv;
1451 } else {
1452 timers.qrv = (hdr->flags & 0x7) ?: 8;
1453 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1454 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1455 }
1456 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1457
1458 gm_expiry_calc(&timers);
1459
1460 if (PIM_DEBUG_GM_TRACE_DETAIL)
1461 zlog_debug(
1462 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1463 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1464 &timers.expire_wait);
1465
1466 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1467 unsigned int other_ms;
1468
1469 EVENT_OFF(gm_ifp->t_query);
1470 EVENT_OFF(gm_ifp->t_other_querier);
1471
1472 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1473 event_add_timer_msec(router->master, gm_t_other_querier, gm_ifp,
1474 other_ms, &gm_ifp->t_other_querier);
1475 }
1476
1477 if (len == sizeof(struct mld_v1_pkt)) {
1478 if (general_query) {
1479 gm_handle_q_general(gm_ifp, &timers);
1480 gm_ifp->stats.rx_query_old_general++;
1481 } else {
1482 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
1483 gm_ifp->stats.rx_query_old_group++;
1484 }
1485 return;
1486 }
1487
1488 /* v2 query - [S]uppress bit */
1489 if (hdr->flags & 0x8) {
1490 gm_ifp->stats.rx_query_new_sbit++;
1491 return;
1492 }
1493
1494 if (general_query) {
1495 gm_handle_q_general(gm_ifp, &timers);
1496 gm_ifp->stats.rx_query_new_general++;
1497 } else if (!ntohs(hdr->n_src)) {
1498 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
1499 gm_ifp->stats.rx_query_new_group++;
1500 } else {
1501 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1502 ntohs(hdr->n_src));
1503 gm_ifp->stats.rx_query_new_groupsrc++;
1504 }
1505 }
1506
1507 static void gm_rx_process(struct gm_if *gm_ifp,
1508 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1509 void *data, size_t pktlen)
1510 {
1511 struct icmp6_plain_hdr *icmp6 = data;
1512 uint16_t pkt_csum, ref_csum;
1513 struct ipv6_ph ph6 = {
1514 .src = pkt_src->sin6_addr,
1515 .dst = *pkt_dst,
1516 .ulpl = htons(pktlen),
1517 .next_hdr = IPPROTO_ICMPV6,
1518 };
1519
1520 pkt_csum = icmp6->icmp6_cksum;
1521 icmp6->icmp6_cksum = 0;
1522 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1523
1524 if (pkt_csum != ref_csum) {
1525 zlog_warn(
1526 log_pkt_src(
1527 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1528 pkt_dst, pkt_csum, ref_csum);
1529 gm_ifp->stats.rx_drop_csum++;
1530 return;
1531 }
1532
1533 data = (icmp6 + 1);
1534 pktlen -= sizeof(*icmp6);
1535
1536 switch (icmp6->icmp6_type) {
1537 case ICMP6_MLD_QUERY:
1538 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1539 break;
1540 case ICMP6_MLD_V1_REPORT:
1541 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1542 break;
1543 case ICMP6_MLD_V1_DONE:
1544 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1545 break;
1546 case ICMP6_MLD_V2_REPORT:
1547 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1548 break;
1549 }
1550 }
1551
1552 static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1553 uint16_t alert_type)
1554 {
1555 uint8_t *hopopt_end;
1556
1557 if (hopopt_len < 8)
1558 return false;
1559 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1560 return false;
1561
1562 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1563 hopopts += 2;
1564
1565 while (hopopts < hopopt_end) {
1566 if (hopopts[0] == IP6OPT_PAD1) {
1567 hopopts++;
1568 continue;
1569 }
1570
1571 if (hopopts > hopopt_end - 2)
1572 break;
1573 if (hopopts > hopopt_end - 2 - hopopts[1])
1574 break;
1575
1576 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1577 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1578
1579 if (have_type == alert_type)
1580 return true;
1581 }
1582
1583 hopopts += 2 + hopopts[1];
1584 }
1585 return false;
1586 }
1587
1588 static void gm_t_recv(struct event *t)
1589 {
1590 struct pim_instance *pim = EVENT_ARG(t);
1591 union {
1592 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1593 CMSG_SPACE(256) /* hop options */ +
1594 CMSG_SPACE(sizeof(int)) /* hopcount */];
1595 struct cmsghdr align;
1596 } cmsgbuf;
1597 struct cmsghdr *cmsg;
1598 struct in6_pktinfo *pktinfo = NULL;
1599 uint8_t *hopopts = NULL;
1600 size_t hopopt_len = 0;
1601 int *hoplimit = NULL;
1602 char rxbuf[2048];
1603 struct msghdr mh[1] = {};
1604 struct iovec iov[1];
1605 struct sockaddr_in6 pkt_src[1] = {};
1606 ssize_t nread;
1607 size_t pktlen;
1608
1609 event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1610 &pim->t_gm_recv);
1611
1612 iov->iov_base = rxbuf;
1613 iov->iov_len = sizeof(rxbuf);
1614
1615 mh->msg_name = pkt_src;
1616 mh->msg_namelen = sizeof(pkt_src);
1617 mh->msg_control = cmsgbuf.buf;
1618 mh->msg_controllen = sizeof(cmsgbuf.buf);
1619 mh->msg_iov = iov;
1620 mh->msg_iovlen = array_size(iov);
1621 mh->msg_flags = 0;
1622
1623 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
1624 if (nread <= 0) {
1625 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1626 pim->gm_rx_drop_sys++;
1627 return;
1628 }
1629
1630 if ((size_t)nread > sizeof(rxbuf)) {
1631 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1632 iov->iov_len = nread;
1633 }
1634 nread = recvmsg(pim->gm_socket, mh, 0);
1635 if (nread <= 0) {
1636 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1637 pim->gm_rx_drop_sys++;
1638 goto out_free;
1639 }
1640
1641 struct interface *ifp;
1642
1643 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1644 if (!ifp || !ifp->info)
1645 goto out_free;
1646
1647 struct pim_interface *pim_ifp = ifp->info;
1648 struct gm_if *gm_ifp = pim_ifp->mld;
1649
1650 if (!gm_ifp)
1651 goto out_free;
1652
1653 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1654 if (cmsg->cmsg_level != SOL_IPV6)
1655 continue;
1656
1657 switch (cmsg->cmsg_type) {
1658 case IPV6_PKTINFO:
1659 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1660 break;
1661 case IPV6_HOPOPTS:
1662 hopopts = CMSG_DATA(cmsg);
1663 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1664 break;
1665 case IPV6_HOPLIMIT:
1666 hoplimit = (int *)CMSG_DATA(cmsg);
1667 break;
1668 }
1669 }
1670
1671 if (!pktinfo || !hoplimit) {
1672 zlog_err(log_ifp(
1673 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
1674 pim->gm_rx_drop_sys++;
1675 goto out_free;
1676 }
1677
1678 if (*hoplimit != 1) {
1679 zlog_err(log_pkt_src("packet with hop limit != 1"));
1680 /* spoofing attempt => count on srcaddr counter */
1681 gm_ifp->stats.rx_drop_srcaddr++;
1682 goto out_free;
1683 }
1684
1685 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1686 zlog_err(log_pkt_src(
1687 "packet without IPv6 Router Alert MLD option"));
1688 gm_ifp->stats.rx_drop_ra++;
1689 goto out_free;
1690 }
1691
1692 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1693 /* reports from :: happen in normal operation for DAD, so
1694 * don't spam log messages about this
1695 */
1696 goto out_free;
1697
1698 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1699 zlog_warn(log_pkt_src("packet from invalid source address"));
1700 gm_ifp->stats.rx_drop_srcaddr++;
1701 goto out_free;
1702 }
1703
1704 pktlen = nread;
1705 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1706 zlog_warn(log_pkt_src("truncated packet"));
1707 gm_ifp->stats.rx_drop_malformed++;
1708 goto out_free;
1709 }
1710
1711 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1712 pktlen);
1713
1714 out_free:
1715 if (iov->iov_base != rxbuf)
1716 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1717 }
1718
1719 static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1720 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1721 {
1722 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1723 struct sockaddr_in6 dstaddr = {
1724 .sin6_family = AF_INET6,
1725 .sin6_scope_id = gm_ifp->ifp->ifindex,
1726 };
1727 struct {
1728 struct icmp6_plain_hdr hdr;
1729 struct mld_v2_query_hdr v2_query;
1730 } query = {
1731 /* clang-format off */
1732 .hdr = {
1733 .icmp6_type = ICMP6_MLD_QUERY,
1734 .icmp6_code = 0,
1735 },
1736 .v2_query = {
1737 .grp = grp,
1738 },
1739 /* clang-format on */
1740 };
1741 struct ipv6_ph ph6 = {
1742 .src = pim_ifp->ll_lowest,
1743 .ulpl = htons(sizeof(query)),
1744 .next_hdr = IPPROTO_ICMPV6,
1745 };
1746 union {
1747 char buf[CMSG_SPACE(8) /* hop options */ +
1748 CMSG_SPACE(sizeof(struct in6_pktinfo))];
1749 struct cmsghdr align;
1750 } cmsg = {};
1751 struct cmsghdr *cmh;
1752 struct msghdr mh[1] = {};
1753 struct iovec iov[3];
1754 size_t iov_len;
1755 ssize_t ret, expect_ret;
1756 uint8_t *dp;
1757 struct in6_pktinfo *pktinfo;
1758
1759 if (if_is_loopback(gm_ifp->ifp)) {
1760 /* Linux is a bit odd with multicast on loopback */
1761 ph6.src = in6addr_loopback;
1762 dstaddr.sin6_addr = in6addr_loopback;
1763 } else if (pim_addr_is_any(grp))
1764 dstaddr.sin6_addr = gm_all_hosts;
1765 else
1766 dstaddr.sin6_addr = grp;
1767
1768 query.v2_query.max_resp_code =
1769 mld_max_resp_encode(gm_ifp->cur_max_resp);
1770 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1771 if (s_bit)
1772 query.v2_query.flags |= 0x08;
1773 query.v2_query.qqic =
1774 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1775 query.v2_query.n_src = htons(n_srcs);
1776
1777 ph6.dst = dstaddr.sin6_addr;
1778
1779 /* ph6 not included in sendmsg */
1780 iov[0].iov_base = &ph6;
1781 iov[0].iov_len = sizeof(ph6);
1782 iov[1].iov_base = &query;
1783 if (gm_ifp->cur_version == GM_MLDV1) {
1784 iov_len = 2;
1785 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1786 } else if (!n_srcs) {
1787 iov_len = 2;
1788 iov[1].iov_len = sizeof(query);
1789 } else {
1790 iov[1].iov_len = sizeof(query);
1791 iov[2].iov_base = (void *)srcs;
1792 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1793 iov_len = 3;
1794 }
1795
1796 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1797
1798 if (PIM_DEBUG_GM_PACKETS)
1799 zlog_debug(
1800 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1801 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1802
1803 mh->msg_name = &dstaddr;
1804 mh->msg_namelen = sizeof(dstaddr);
1805 mh->msg_iov = iov + 1;
1806 mh->msg_iovlen = iov_len - 1;
1807 mh->msg_control = &cmsg;
1808 mh->msg_controllen = sizeof(cmsg.buf);
1809
1810 cmh = CMSG_FIRSTHDR(mh);
1811 cmh->cmsg_level = IPPROTO_IPV6;
1812 cmh->cmsg_type = IPV6_HOPOPTS;
1813 cmh->cmsg_len = CMSG_LEN(8);
1814 dp = CMSG_DATA(cmh);
1815 *dp++ = 0; /* next header */
1816 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1817 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1818 *dp++ = 2; /* length */
1819 *dp++ = 0; /* value (2 bytes) */
1820 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1821 *dp++ = 0; /* pad0 */
1822 *dp++ = 0; /* pad0 */
1823
1824 cmh = CMSG_NXTHDR(mh, cmh);
1825 cmh->cmsg_level = IPPROTO_IPV6;
1826 cmh->cmsg_type = IPV6_PKTINFO;
1827 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1828 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1829 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1830 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1831
1832 expect_ret = iov[1].iov_len;
1833 if (iov_len == 3)
1834 expect_ret += iov[2].iov_len;
1835
1836 frr_with_privs (&pimd_privs) {
1837 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
1838 }
1839
1840 if (ret != expect_ret) {
1841 zlog_warn(log_ifp("failed to send query: %m"));
1842 gm_ifp->stats.tx_query_fail++;
1843 } else {
1844 if (gm_ifp->cur_version == GM_MLDV1) {
1845 if (pim_addr_is_any(grp))
1846 gm_ifp->stats.tx_query_old_general++;
1847 else
1848 gm_ifp->stats.tx_query_old_group++;
1849 } else {
1850 if (pim_addr_is_any(grp))
1851 gm_ifp->stats.tx_query_new_general++;
1852 else if (!n_srcs)
1853 gm_ifp->stats.tx_query_new_group++;
1854 else
1855 gm_ifp->stats.tx_query_new_groupsrc++;
1856 }
1857 }
1858 }
1859
1860 static void gm_t_query(struct event *t)
1861 {
1862 struct gm_if *gm_ifp = EVENT_ARG(t);
1863 unsigned int timer_ms = gm_ifp->cur_query_intv;
1864
1865 if (gm_ifp->n_startup) {
1866 timer_ms /= 4;
1867 gm_ifp->n_startup--;
1868 }
1869
1870 event_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1871 &gm_ifp->t_query);
1872
1873 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1874 }
1875
1876 static void gm_t_sg_query(struct event *t)
1877 {
1878 struct gm_sg *sg = EVENT_ARG(t);
1879
1880 gm_trigger_specific(sg);
1881 }
1882
1883 /* S,G specific queries (triggered by a member leaving) get a little slack
1884 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1885 */
1886 static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1887 {
1888 struct gm_if *gm_ifp = pend_gsq->iface;
1889
1890 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1891 pend_gsq->s_bit);
1892
1893 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1894 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1895 }
1896
1897 static void gm_t_gsq_pend(struct event *t)
1898 {
1899 struct gm_gsq_pending *pend_gsq = EVENT_ARG(t);
1900
1901 gm_send_specific(pend_gsq);
1902 }
1903
1904 static void gm_trigger_specific(struct gm_sg *sg)
1905 {
1906 struct gm_if *gm_ifp = sg->iface;
1907 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1908 struct gm_gsq_pending *pend_gsq, ref = {};
1909
1910 sg->n_query--;
1911 if (sg->n_query)
1912 event_add_timer_msec(router->master, gm_t_sg_query, sg,
1913 gm_ifp->cur_query_intv_trig,
1914 &sg->t_sg_query);
1915
1916 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1917 return;
1918 if (gm_ifp->pim->gm_socket == -1)
1919 return;
1920
1921 if (PIM_DEBUG_GM_TRACE)
1922 zlog_debug(log_sg(sg, "triggered query"));
1923
1924 if (pim_addr_is_any(sg->sgaddr.src)) {
1925 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1926 return;
1927 }
1928
1929 ref.grp = sg->sgaddr.grp;
1930 ref.s_bit = sg->query_sbit;
1931
1932 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1933 if (!pend_gsq) {
1934 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1935 pend_gsq->grp = sg->sgaddr.grp;
1936 pend_gsq->s_bit = sg->query_sbit;
1937 pend_gsq->iface = gm_ifp;
1938 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1939
1940 event_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1941 &gm_ifp->cfg_timing_fuzz, &pend_gsq->t_send);
1942 }
1943
1944 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1945
1946 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1947 pend_gsq->n_src++;
1948
1949 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1950 EVENT_OFF(pend_gsq->t_send);
1951 gm_send_specific(pend_gsq);
1952 pend_gsq = NULL;
1953 }
1954 }
1955
1956 static void gm_vrf_socket_incref(struct pim_instance *pim)
1957 {
1958 struct vrf *vrf = pim->vrf;
1959 int ret, intval;
1960 struct icmp6_filter filter[1];
1961
1962 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1963 return;
1964
1965 ICMP6_FILTER_SETBLOCKALL(filter);
1966 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1967 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1968 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1969 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1970
1971 frr_with_privs (&pimd_privs) {
1972 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1973 vrf->vrf_id, vrf->name);
1974 if (pim->gm_socket < 0) {
1975 zlog_err("(VRF %s) could not create MLD socket: %m",
1976 vrf->name);
1977 return;
1978 }
1979
1980 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
1981 filter, sizeof(filter));
1982 if (ret)
1983 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1984 vrf->name);
1985
1986 intval = 1;
1987 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
1988 &intval, sizeof(intval));
1989 if (ret)
1990 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1991 vrf->name);
1992
1993 intval = 1;
1994 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
1995 &intval, sizeof(intval));
1996 if (ret)
1997 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
1998 vrf->name);
1999
2000 intval = 1;
2001 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
2002 &intval, sizeof(intval));
2003 if (ret)
2004 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2005 vrf->name);
2006
2007 intval = 1;
2008 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
2009 &intval, sizeof(intval));
2010 if (ret)
2011 zlog_err(
2012 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2013 vrf->name);
2014
2015 intval = 1;
2016 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
2017 &intval, sizeof(intval));
2018 if (ret)
2019 zlog_err(
2020 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2021 vrf->name);
2022
2023 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2024 * RX filtering in Linux. It only means "receive all groups
2025 * that something on the system has joined". To actually
2026 * receive *all* MLD packets - which is what we need -
2027 * multicast routing must be enabled on the interface. And
2028 * this only works for MLD packets specifically.
2029 *
2030 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2031 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2032 *
2033 * Also note that the code there explicitly checks for the IPv6
2034 * router alert MLD option (which is required by the RFC to be
2035 * on MLD packets.) That implies trying to support hosts which
2036 * erroneously don't add that option is just not possible.
2037 */
2038 intval = 1;
2039 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
2040 &intval, sizeof(intval));
2041 if (ret)
2042 zlog_info(
2043 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2044 vrf->name);
2045 }
2046
2047 event_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2048 &pim->t_gm_recv);
2049 }
2050
2051 static void gm_vrf_socket_decref(struct pim_instance *pim)
2052 {
2053 if (--pim->gm_socket_if_count)
2054 return;
2055
2056 EVENT_OFF(pim->t_gm_recv);
2057 close(pim->gm_socket);
2058 pim->gm_socket = -1;
2059 }
2060
2061 static void gm_start(struct interface *ifp)
2062 {
2063 struct pim_interface *pim_ifp = ifp->info;
2064 struct gm_if *gm_ifp;
2065
2066 assert(pim_ifp);
2067 assert(pim_ifp->pim);
2068 assert(pim_ifp->mroute_vif_index >= 0);
2069 assert(!pim_ifp->mld);
2070
2071 gm_vrf_socket_incref(pim_ifp->pim);
2072
2073 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2074 gm_ifp->ifp = ifp;
2075 pim_ifp->mld = gm_ifp;
2076 gm_ifp->pim = pim_ifp->pim;
2077 monotime(&gm_ifp->started);
2078
2079 zlog_info(log_ifp("starting MLD"));
2080
2081 if (pim_ifp->mld_version == 1)
2082 gm_ifp->cur_version = GM_MLDV1;
2083 else
2084 gm_ifp->cur_version = GM_MLDV2;
2085
2086 gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
2087 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
2088 gm_ifp->cur_query_intv_trig =
2089 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2090 gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
2091 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2092
2093 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2094 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2095
2096 gm_sgs_init(gm_ifp->sgs);
2097 gm_subscribers_init(gm_ifp->subscribers);
2098 gm_packet_expires_init(gm_ifp->expires);
2099 gm_grp_pends_init(gm_ifp->grp_pends);
2100 gm_gsq_pends_init(gm_ifp->gsq_pends);
2101
2102 frr_with_privs (&pimd_privs) {
2103 struct ipv6_mreq mreq;
2104 int ret;
2105
2106 /* all-MLDv2 group */
2107 mreq.ipv6mr_multiaddr = gm_all_routers;
2108 mreq.ipv6mr_interface = ifp->ifindex;
2109 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2110 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
2111 if (ret)
2112 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2113 ifp->name);
2114 }
2115 }
2116
2117 void gm_group_delete(struct gm_if *gm_ifp)
2118 {
2119 struct gm_sg *sg;
2120 struct gm_packet_state *pkt;
2121 struct gm_grp_pending *pend_grp;
2122 struct gm_gsq_pending *pend_gsq;
2123 struct gm_subscriber *subscriber;
2124
2125 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2126 gm_packet_drop(pkt, false);
2127
2128 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2129 EVENT_OFF(pend_grp->t_expire);
2130 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2131 }
2132
2133 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2134 EVENT_OFF(pend_gsq->t_send);
2135 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2136 }
2137
2138 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2139 EVENT_OFF(sg->t_sg_expire);
2140 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2141 &sg->sgaddr);
2142 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2143 &sg->sgaddr);
2144
2145 gm_sg_free(sg);
2146 }
2147 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2148 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2149 &subscriber->addr);
2150 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2151 }
2152 }
2153
2154 void gm_ifp_teardown(struct interface *ifp)
2155 {
2156 struct pim_interface *pim_ifp = ifp->info;
2157 struct gm_if *gm_ifp;
2158
2159 if (!pim_ifp || !pim_ifp->mld)
2160 return;
2161
2162 gm_ifp = pim_ifp->mld;
2163 gm_ifp->stopping = true;
2164 if (PIM_DEBUG_GM_EVENTS)
2165 zlog_debug(log_ifp("MLD stop"));
2166
2167 EVENT_OFF(gm_ifp->t_query);
2168 EVENT_OFF(gm_ifp->t_other_querier);
2169 EVENT_OFF(gm_ifp->t_expire);
2170
2171 frr_with_privs (&pimd_privs) {
2172 struct ipv6_mreq mreq;
2173 int ret;
2174
2175 /* all-MLDv2 group */
2176 mreq.ipv6mr_multiaddr = gm_all_routers;
2177 mreq.ipv6mr_interface = ifp->ifindex;
2178 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2179 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2180 if (ret)
2181 zlog_err(
2182 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2183 ifp->name);
2184 }
2185
2186 gm_vrf_socket_decref(gm_ifp->pim);
2187
2188 gm_group_delete(gm_ifp);
2189
2190 gm_grp_pends_fini(gm_ifp->grp_pends);
2191 gm_packet_expires_fini(gm_ifp->expires);
2192 gm_subscribers_fini(gm_ifp->subscribers);
2193 gm_sgs_fini(gm_ifp->sgs);
2194
2195 XFREE(MTYPE_GM_IFACE, gm_ifp);
2196 pim_ifp->mld = NULL;
2197 }
2198
2199 static void gm_update_ll(struct interface *ifp)
2200 {
2201 struct pim_interface *pim_ifp = ifp->info;
2202 struct gm_if *gm_ifp = pim_ifp->mld;
2203 bool was_querier;
2204
2205 was_querier =
2206 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2207 !pim_addr_is_any(gm_ifp->querier);
2208
2209 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2210 if (was_querier)
2211 gm_ifp->querier = pim_ifp->ll_lowest;
2212 EVENT_OFF(gm_ifp->t_query);
2213
2214 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2215 if (was_querier)
2216 zlog_info(log_ifp(
2217 "lost link-local address, stopping querier"));
2218 return;
2219 }
2220
2221 if (was_querier)
2222 zlog_info(log_ifp("new link-local %pPA while querier"),
2223 &gm_ifp->cur_ll_lowest);
2224 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2225 pim_addr_is_any(gm_ifp->querier)) {
2226 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2227 &gm_ifp->cur_ll_lowest);
2228 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2229 } else
2230 return;
2231
2232 gm_ifp->n_startup = gm_ifp->cur_qrv;
2233 event_execute(router->master, gm_t_query, gm_ifp, 0);
2234 }
2235
2236 void gm_ifp_update(struct interface *ifp)
2237 {
2238 struct pim_interface *pim_ifp = ifp->info;
2239 struct gm_if *gm_ifp;
2240 bool changed = false;
2241
2242 if (!pim_ifp)
2243 return;
2244 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2245 pim_ifp->mroute_vif_index < 0) {
2246 gm_ifp_teardown(ifp);
2247 return;
2248 }
2249
2250 /*
2251 * If ipv6 mld is not enabled on interface, do not start mld activites.
2252 */
2253 if (!pim_ifp->gm_enable)
2254 return;
2255
2256 if (!pim_ifp->mld) {
2257 changed = true;
2258 gm_start(ifp);
2259 }
2260
2261 gm_ifp = pim_ifp->mld;
2262 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2263 gm_update_ll(ifp);
2264
2265 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2266
2267 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2268 gm_ifp->cur_query_intv = cfg_query_intv;
2269 changed = true;
2270 }
2271
2272 unsigned int cfg_query_intv_trig =
2273 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2274
2275 if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
2276 gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
2277 changed = true;
2278 }
2279
2280 unsigned int cfg_max_response =
2281 pim_ifp->gm_query_max_response_time_dsec * 100;
2282
2283 if (gm_ifp->cur_max_resp != cfg_max_response)
2284 gm_ifp->cur_max_resp = cfg_max_response;
2285
2286 if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
2287 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2288
2289 enum gm_version cfg_version;
2290
2291 if (pim_ifp->mld_version == 1)
2292 cfg_version = GM_MLDV1;
2293 else
2294 cfg_version = GM_MLDV2;
2295 if (gm_ifp->cur_version != cfg_version) {
2296 gm_ifp->cur_version = cfg_version;
2297 changed = true;
2298 }
2299
2300 if (changed) {
2301 if (PIM_DEBUG_GM_TRACE)
2302 zlog_debug(log_ifp(
2303 "MLD querier config changed, querying"));
2304 gm_bump_querier(gm_ifp);
2305 }
2306 }
2307
2308 /*
2309 * CLI (show commands only)
2310 */
2311
2312 #include "lib/command.h"
2313
2314 #include "pimd/pim6_mld_clippy.c"
2315
2316 static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2317 int *err)
2318 {
2319 struct vrf *ret;
2320
2321 if (!vrf_str)
2322 return vrf_lookup_by_id(VRF_DEFAULT);
2323 if (!strcmp(vrf_str, "all"))
2324 return NULL;
2325 ret = vrf_lookup_by_name(vrf_str);
2326 if (ret)
2327 return ret;
2328
2329 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2330 *err = CMD_WARNING;
2331 return NULL;
2332 }
2333
2334 static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2335 {
2336 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2337 struct gm_if *gm_ifp;
2338 bool querier;
2339 size_t i;
2340
2341 if (!pim_ifp) {
2342 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2343 return;
2344 }
2345
2346 gm_ifp = pim_ifp->mld;
2347 if (!gm_ifp) {
2348 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2349 return;
2350 }
2351
2352 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2353
2354 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2355 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2356 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2357 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2358 querier ? " (this system)" : "");
2359 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2360 vty_out(vty, " Other querier timer: %pTH\n",
2361 gm_ifp->t_other_querier);
2362 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2363 vty_out(vty, " Query interval: %ums\n",
2364 gm_ifp->cur_query_intv);
2365 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2366 vty_out(vty, " Last member query intv.: %ums\n",
2367 gm_ifp->cur_query_intv_trig);
2368 vty_out(vty, " %u expiry timers from general queries:\n",
2369 gm_ifp->n_pending);
2370 for (i = 0; i < gm_ifp->n_pending; i++) {
2371 struct gm_general_pending *p = &gm_ifp->pending[i];
2372
2373 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2374 &p->query, &p->expiry);
2375 }
2376 vty_out(vty, " %zu expiry timers from *,G queries\n",
2377 gm_grp_pends_count(gm_ifp->grp_pends));
2378 vty_out(vty, " %zu expiry timers from S,G queries\n",
2379 gm_gsq_pends_count(gm_ifp->gsq_pends));
2380 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2381 gm_sgs_count(gm_ifp->sgs),
2382 gm_subscribers_count(gm_ifp->subscribers),
2383 gm_packet_expires_count(gm_ifp->expires));
2384 vty_out(vty, "\n");
2385 }
2386
2387 static void gm_show_if_one(struct vty *vty, struct interface *ifp,
2388 json_object *js_if, struct ttable *tt)
2389 {
2390 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2391 struct gm_if *gm_ifp = pim_ifp->mld;
2392 bool querier;
2393
2394 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2395
2396 if (js_if) {
2397 json_object_string_add(js_if, "name", ifp->name);
2398 json_object_string_addf(js_if, "address", "%pPA",
2399 &pim_ifp->primary_address);
2400 json_object_string_add(js_if, "state", "up");
2401 json_object_string_addf(js_if, "version", "%d",
2402 gm_ifp->cur_version);
2403 json_object_string_addf(js_if, "upTime", "%pTVMs",
2404 &gm_ifp->started);
2405 json_object_boolean_add(js_if, "querier", querier);
2406 json_object_string_addf(js_if, "querierIp", "%pPA",
2407 &gm_ifp->querier);
2408 if (querier)
2409 json_object_string_addf(js_if, "queryTimer", "%pTH",
2410 gm_ifp->t_query);
2411 else
2412 json_object_string_addf(js_if, "otherQuerierTimer",
2413 "%pTH",
2414 gm_ifp->t_other_querier);
2415 json_object_int_add(js_if, "timerRobustnessValue",
2416 gm_ifp->cur_qrv);
2417 json_object_int_add(js_if, "lastMemberQueryCount",
2418 gm_ifp->cur_lmqc);
2419 json_object_int_add(js_if, "timerQueryIntervalMsec",
2420 gm_ifp->cur_query_intv);
2421 json_object_int_add(js_if, "timerQueryResponseTimerMsec",
2422 gm_ifp->cur_max_resp);
2423 json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
2424 gm_ifp->cur_query_intv_trig);
2425 } else {
2426 ttable_add_row(tt, "%s|%s|%pPAs|%d|%s|%pPAs|%pTH|%pTVMs",
2427 ifp->name, "up", &pim_ifp->primary_address,
2428 gm_ifp->cur_version, querier ? "local" : "other",
2429 &gm_ifp->querier, gm_ifp->t_query,
2430 &gm_ifp->started);
2431 }
2432 }
2433
2434 static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2435 bool detail, json_object *js)
2436 {
2437 struct interface *ifp;
2438 json_object *js_vrf = NULL;
2439 struct pim_interface *pim_ifp;
2440 struct ttable *tt = NULL;
2441 char *table = NULL;
2442
2443 if (js) {
2444 js_vrf = json_object_new_object();
2445 json_object_object_add(js, vrf->name, js_vrf);
2446 }
2447
2448 if (!js && !detail) {
2449 /* Prepare table. */
2450 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2451 ttable_add_row(
2452 tt,
2453 "Interface|State|Address|V|Querier|QuerierIp|Query Timer|Uptime");
2454 tt->style.cell.rpad = 2;
2455 tt->style.corner = '+';
2456 ttable_restyle(tt);
2457 }
2458
2459 FOR_ALL_INTERFACES (vrf, ifp) {
2460 json_object *js_if = NULL;
2461
2462 if (ifname && strcmp(ifp->name, ifname))
2463 continue;
2464 if (detail && !js) {
2465 gm_show_if_one_detail(vty, ifp);
2466 continue;
2467 }
2468
2469 pim_ifp = ifp->info;
2470
2471 if (!pim_ifp || !pim_ifp->mld)
2472 continue;
2473
2474 if (js) {
2475 js_if = json_object_new_object();
2476 json_object_object_add(js_vrf, ifp->name, js_if);
2477 }
2478
2479 gm_show_if_one(vty, ifp, js_if, tt);
2480 }
2481
2482 /* Dump the generated table. */
2483 if (!js && !detail) {
2484 table = ttable_dump(tt, "\n");
2485 vty_out(vty, "%s\n", table);
2486 XFREE(MTYPE_TMP, table);
2487 ttable_del(tt);
2488 }
2489 }
2490
2491 static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2492 bool detail, json_object *js)
2493 {
2494 if (vrf)
2495 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2496 else
2497 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2498 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2499 }
2500
2501 DEFPY(gm_show_interface,
2502 gm_show_interface_cmd,
2503 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
2504 SHOW_STR
2505 IPV6_STR
2506 MLD_STR
2507 VRF_FULL_CMD_HELP_STR
2508 "MLD interface information\n"
2509 "Interface name\n"
2510 "Detailed output\n"
2511 JSON_STR)
2512 {
2513 int ret = CMD_SUCCESS;
2514 struct vrf *vrf;
2515 json_object *js = NULL;
2516
2517 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2518 if (ret != CMD_SUCCESS)
2519 return ret;
2520
2521 if (json)
2522 js = json_object_new_object();
2523 gm_show_if(vty, vrf, ifname, !!detail, js);
2524 return vty_json(vty, js);
2525 }
2526
2527 static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2528 json_object *js_if)
2529 {
2530 struct gm_if_stats *stats = &gm_ifp->stats;
2531 /* clang-format off */
2532 struct {
2533 const char *text;
2534 const char *js_key;
2535 uint64_t *val;
2536 } *item, items[] = {
2537 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2538 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2539 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2540
2541 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2542 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2543 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2544 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2545 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2546 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2547
2548 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2549 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2550 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2551 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2552 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2553 { "TX errors", "txErrors", &stats->tx_query_fail },
2554
2555 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2556 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2557 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2558 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2559 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2560 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2561 };
2562 /* clang-format on */
2563
2564 for (item = items; item < items + array_size(items); item++) {
2565 if (js_if)
2566 json_object_int_add(js_if, item->js_key, *item->val);
2567 else
2568 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2569 *item->val);
2570 }
2571 }
2572
2573 static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2574 const char *ifname, json_object *js)
2575 {
2576 struct interface *ifp;
2577 json_object *js_vrf;
2578
2579 if (js) {
2580 js_vrf = json_object_new_object();
2581 json_object_object_add(js, vrf->name, js_vrf);
2582 }
2583
2584 FOR_ALL_INTERFACES (vrf, ifp) {
2585 struct pim_interface *pim_ifp;
2586 struct gm_if *gm_ifp;
2587 json_object *js_if = NULL;
2588
2589 if (ifname && strcmp(ifp->name, ifname))
2590 continue;
2591
2592 if (!ifp->info)
2593 continue;
2594 pim_ifp = ifp->info;
2595 if (!pim_ifp->mld)
2596 continue;
2597 gm_ifp = pim_ifp->mld;
2598
2599 if (js) {
2600 js_if = json_object_new_object();
2601 json_object_object_add(js_vrf, ifp->name, js_if);
2602 } else {
2603 vty_out(vty, "Interface: %s\n", ifp->name);
2604 }
2605 gm_show_stats_one(vty, gm_ifp, js_if);
2606 if (!js)
2607 vty_out(vty, "\n");
2608 }
2609 }
2610
2611 DEFPY(gm_show_interface_stats,
2612 gm_show_interface_stats_cmd,
2613 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2614 SHOW_STR
2615 IPV6_STR
2616 MLD_STR
2617 VRF_FULL_CMD_HELP_STR
2618 "MLD statistics\n"
2619 INTERFACE_STR
2620 "Interface name\n"
2621 JSON_STR)
2622 {
2623 int ret = CMD_SUCCESS;
2624 struct vrf *vrf;
2625 json_object *js = NULL;
2626
2627 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2628 if (ret != CMD_SUCCESS)
2629 return ret;
2630
2631 if (json)
2632 js = json_object_new_object();
2633
2634 if (vrf)
2635 gm_show_stats_vrf(vty, vrf, ifname, js);
2636 else
2637 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2638 gm_show_stats_vrf(vty, vrf, ifname, js);
2639 return vty_json(vty, js);
2640 }
2641
2642 static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2643 const struct prefix_ipv6 *groups,
2644 const struct prefix_ipv6 *sources, bool detail,
2645 json_object *js_if)
2646 {
2647 struct gm_sg *sg, *sg_start;
2648 json_object *js_group = NULL;
2649 pim_addr js_grpaddr = PIMADDR_ANY;
2650 struct gm_subscriber sub_ref = {}, *sub_untracked;
2651
2652 if (groups) {
2653 struct gm_sg sg_ref = {};
2654
2655 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2656 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2657 } else
2658 sg_start = gm_sgs_first(gm_ifp->sgs);
2659
2660 sub_ref.addr = gm_dummy_untracked;
2661 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2662 /* NB: sub_untracked may be NULL if no untracked joins exist */
2663
2664 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2665 struct timeval *recent = NULL, *untracked = NULL;
2666 json_object *js_src;
2667
2668 if (groups) {
2669 struct prefix grp_p;
2670
2671 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2672 if (!prefix_match(groups, &grp_p))
2673 break;
2674 }
2675
2676 if (sources) {
2677 struct prefix src_p;
2678
2679 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2680 if (!prefix_match(sources, &src_p))
2681 continue;
2682 }
2683
2684 if (sg->most_recent) {
2685 struct gm_packet_state *packet;
2686
2687 packet = gm_packet_sg2state(sg->most_recent);
2688 recent = &packet->received;
2689 }
2690
2691 if (sub_untracked) {
2692 struct gm_packet_state *packet;
2693 struct gm_packet_sg *item;
2694
2695 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2696 if (item) {
2697 packet = gm_packet_sg2state(item);
2698 untracked = &packet->received;
2699 }
2700 }
2701
2702 if (!js_if) {
2703 FMT_NSTD_BEGIN; /* %.0p */
2704 vty_out(vty,
2705 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2706 &sg->sgaddr.grp, &sg->sgaddr.src,
2707 gm_states[sg->state], recent, untracked,
2708 &sg->created);
2709
2710 if (!detail)
2711 continue;
2712
2713 struct gm_packet_sg *item;
2714 struct gm_packet_state *packet;
2715
2716 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2717 packet = gm_packet_sg2state(item);
2718
2719 if (packet->subscriber == sub_untracked)
2720 continue;
2721 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2722 &packet->subscriber->addr, "(JOIN)",
2723 &packet->received);
2724 }
2725 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2726 packet = gm_packet_sg2state(item);
2727
2728 if (packet->subscriber == sub_untracked)
2729 continue;
2730 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2731 &packet->subscriber->addr, "(PRUNE)",
2732 &packet->received);
2733 }
2734 FMT_NSTD_END; /* %.0p */
2735 continue;
2736 }
2737 /* if (js_if) */
2738
2739 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2740 js_group = json_object_new_object();
2741 json_object_object_addf(js_if, js_group, "%pPA",
2742 &sg->sgaddr.grp);
2743 js_grpaddr = sg->sgaddr.grp;
2744 }
2745
2746 js_src = json_object_new_object();
2747 json_object_object_addf(js_group, js_src, "%pPAs",
2748 &sg->sgaddr.src);
2749
2750 json_object_string_add(js_src, "state", gm_states[sg->state]);
2751 json_object_string_addf(js_src, "created", "%pTVMs",
2752 &sg->created);
2753 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2754
2755 if (untracked)
2756 json_object_string_addf(js_src, "untrackedLastSeen",
2757 "%pTVMs", untracked);
2758 if (!detail)
2759 continue;
2760
2761 json_object *js_subs;
2762 struct gm_packet_sg *item;
2763 struct gm_packet_state *packet;
2764
2765 js_subs = json_object_new_object();
2766 json_object_object_add(js_src, "joinedBy", js_subs);
2767 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2768 packet = gm_packet_sg2state(item);
2769 if (packet->subscriber == sub_untracked)
2770 continue;
2771
2772 json_object *js_sub;
2773
2774 js_sub = json_object_new_object();
2775 json_object_object_addf(js_subs, js_sub, "%pPA",
2776 &packet->subscriber->addr);
2777 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2778 &packet->received);
2779 }
2780
2781 js_subs = json_object_new_object();
2782 json_object_object_add(js_src, "prunedBy", js_subs);
2783 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2784 packet = gm_packet_sg2state(item);
2785 if (packet->subscriber == sub_untracked)
2786 continue;
2787
2788 json_object *js_sub;
2789
2790 js_sub = json_object_new_object();
2791 json_object_object_addf(js_subs, js_sub, "%pPA",
2792 &packet->subscriber->addr);
2793 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2794 &packet->received);
2795 }
2796 }
2797 }
2798
2799 static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2800 const char *ifname,
2801 const struct prefix_ipv6 *groups,
2802 const struct prefix_ipv6 *sources, bool detail,
2803 json_object *js)
2804 {
2805 struct interface *ifp;
2806 json_object *js_vrf;
2807
2808 if (js) {
2809 js_vrf = json_object_new_object();
2810 json_object_string_add(js_vrf, "vrf", vrf->name);
2811 json_object_object_add(js, vrf->name, js_vrf);
2812 }
2813
2814 FOR_ALL_INTERFACES (vrf, ifp) {
2815 struct pim_interface *pim_ifp;
2816 struct gm_if *gm_ifp;
2817 json_object *js_if = NULL;
2818
2819 if (ifname && strcmp(ifp->name, ifname))
2820 continue;
2821
2822 if (!ifp->info)
2823 continue;
2824 pim_ifp = ifp->info;
2825 if (!pim_ifp->mld)
2826 continue;
2827 gm_ifp = pim_ifp->mld;
2828
2829 if (js) {
2830 js_if = json_object_new_object();
2831 json_object_object_add(js_vrf, ifp->name, js_if);
2832 }
2833
2834 if (!js && !ifname)
2835 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2836
2837 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2838 }
2839 }
2840
2841 DEFPY(gm_show_interface_joins,
2842 gm_show_interface_joins_cmd,
2843 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2844 SHOW_STR
2845 IPV6_STR
2846 MLD_STR
2847 VRF_FULL_CMD_HELP_STR
2848 "MLD joined groups & sources\n"
2849 INTERFACE_STR
2850 "Interface name\n"
2851 "Limit output to group range\n"
2852 "Show groups covered by this prefix\n"
2853 "Limit output to source range\n"
2854 "Show sources covered by this prefix\n"
2855 "Show details, including tracked receivers\n"
2856 JSON_STR)
2857 {
2858 int ret = CMD_SUCCESS;
2859 struct vrf *vrf;
2860 json_object *js = NULL;
2861
2862 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2863 if (ret != CMD_SUCCESS)
2864 return ret;
2865
2866 if (json)
2867 js = json_object_new_object();
2868 else
2869 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2870 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2871
2872 if (vrf)
2873 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2874 js);
2875 else
2876 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2877 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2878 !!detail, js);
2879 return vty_json(vty, js);
2880 }
2881
2882 static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
2883 {
2884 struct interface *ifp;
2885 struct ttable *tt = NULL;
2886 char *table;
2887 json_object *json = NULL;
2888 json_object *json_iface = NULL;
2889 json_object *json_group = NULL;
2890 json_object *json_groups = NULL;
2891 struct pim_instance *pim = vrf->info;
2892
2893 if (uj) {
2894 json = json_object_new_object();
2895 json_object_int_add(json, "totalGroups", pim->gm_group_count);
2896 json_object_int_add(json, "watermarkLimit",
2897 pim->gm_watermark_limit);
2898 } else {
2899 /* Prepare table. */
2900 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2901 ttable_add_row(tt, "Interface|Group|Version|Uptime");
2902 tt->style.cell.rpad = 2;
2903 tt->style.corner = '+';
2904 ttable_restyle(tt);
2905
2906 vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
2907 vty_out(vty, "Watermark warn limit(%s): %u\n",
2908 pim->gm_watermark_limit ? "Set" : "Not Set",
2909 pim->gm_watermark_limit);
2910 }
2911
2912 /* scan interfaces */
2913 FOR_ALL_INTERFACES (vrf, ifp) {
2914
2915 struct pim_interface *pim_ifp = ifp->info;
2916 struct gm_if *gm_ifp;
2917 struct gm_sg *sg;
2918
2919 if (!pim_ifp)
2920 continue;
2921
2922 gm_ifp = pim_ifp->mld;
2923 if (!gm_ifp)
2924 continue;
2925
2926 /* scan mld groups */
2927 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2928
2929 if (uj) {
2930 json_object_object_get_ex(json, ifp->name,
2931 &json_iface);
2932
2933 if (!json_iface) {
2934 json_iface = json_object_new_object();
2935 json_object_pim_ifp_add(json_iface,
2936 ifp);
2937 json_object_object_add(json, ifp->name,
2938 json_iface);
2939 json_groups = json_object_new_array();
2940 json_object_object_add(json_iface,
2941 "groups",
2942 json_groups);
2943 }
2944
2945 json_group = json_object_new_object();
2946 json_object_string_addf(json_group, "group",
2947 "%pPAs",
2948 &sg->sgaddr.grp);
2949
2950 json_object_int_add(json_group, "version",
2951 pim_ifp->mld_version);
2952 json_object_string_addf(json_group, "uptime",
2953 "%pTVMs", &sg->created);
2954 json_object_array_add(json_groups, json_group);
2955 } else {
2956 ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
2957 ifp->name, &sg->sgaddr.grp,
2958 pim_ifp->mld_version,
2959 &sg->created);
2960 }
2961 } /* scan gm groups */
2962 } /* scan interfaces */
2963
2964 if (uj)
2965 vty_json(vty, json);
2966 else {
2967 /* Dump the generated table. */
2968 table = ttable_dump(tt, "\n");
2969 vty_out(vty, "%s\n", table);
2970 XFREE(MTYPE_TMP, table);
2971 ttable_del(tt);
2972 }
2973 }
2974
2975 DEFPY(gm_show_mld_groups,
2976 gm_show_mld_groups_cmd,
2977 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2978 SHOW_STR
2979 IPV6_STR
2980 MLD_STR
2981 VRF_FULL_CMD_HELP_STR
2982 MLD_GROUP_STR
2983 JSON_STR)
2984 {
2985 int ret = CMD_SUCCESS;
2986 struct vrf *vrf;
2987
2988 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2989 if (ret != CMD_SUCCESS)
2990 return ret;
2991
2992 if (vrf)
2993 gm_show_groups(vty, vrf, !!json);
2994 else
2995 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2996 gm_show_groups(vty, vrf, !!json);
2997
2998 return CMD_SUCCESS;
2999 }
3000
3001 DEFPY(gm_debug_show,
3002 gm_debug_show_cmd,
3003 "debug show mld interface IFNAME",
3004 DEBUG_STR
3005 SHOW_STR
3006 MLD_STR
3007 INTERFACE_STR
3008 "interface name\n")
3009 {
3010 struct interface *ifp;
3011 struct pim_interface *pim_ifp;
3012 struct gm_if *gm_ifp;
3013
3014 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
3015 if (!ifp) {
3016 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
3017 return CMD_WARNING;
3018 }
3019
3020 pim_ifp = ifp->info;
3021 if (!pim_ifp) {
3022 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
3023 return CMD_WARNING;
3024 }
3025
3026 gm_ifp = pim_ifp->mld;
3027 if (!gm_ifp) {
3028 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
3029 return CMD_WARNING;
3030 }
3031
3032 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
3033 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
3034 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
3035 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
3036 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
3037
3038 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
3039 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
3040 int64_t query, expiry;
3041
3042 query = monotime_since(&gm_ifp->pending[i].query, NULL);
3043 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
3044
3045 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
3046 i, query / 1000, expiry / 1000);
3047 }
3048
3049 struct gm_sg *sg;
3050 struct gm_packet_state *pkt;
3051 struct gm_packet_sg *item;
3052 struct gm_subscriber *subscriber;
3053
3054 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
3055 frr_each (gm_sgs, gm_ifp->sgs, sg) {
3056 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
3057 sg->t_sg_expire);
3058
3059 vty_out(vty, "\t @pos:%zu\n",
3060 gm_packet_sg_subs_count(sg->subs_positive));
3061 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
3062 pkt = gm_packet_sg2state(item);
3063
3064 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3065 item->is_src ? "S" : "",
3066 item->is_excl ? "E" : "",
3067 &pkt->subscriber->addr, pkt->subscriber, pkt,
3068 item->offset);
3069
3070 assert(item->sg == sg);
3071 }
3072 vty_out(vty, "\t @neg:%zu\n",
3073 gm_packet_sg_subs_count(sg->subs_negative));
3074 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
3075 pkt = gm_packet_sg2state(item);
3076
3077 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3078 item->is_src ? "S" : "",
3079 item->is_excl ? "E" : "",
3080 &pkt->subscriber->addr, pkt->subscriber, pkt,
3081 item->offset);
3082
3083 assert(item->sg == sg);
3084 }
3085 }
3086
3087 vty_out(vty, "\n%zu subscribers:\n",
3088 gm_subscribers_count(gm_ifp->subscribers));
3089 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
3090 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
3091 subscriber, gm_packets_count(subscriber->packets));
3092
3093 frr_each (gm_packets, subscriber->packets, pkt) {
3094 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
3095 pkt,
3096 monotime_since(&pkt->received, NULL) *
3097 0.000001f,
3098 pkt->n_active, pkt->n_sg);
3099
3100 for (size_t i = 0; i < pkt->n_sg; i++) {
3101 item = pkt->items + i;
3102
3103 vty_out(vty, "\t\t[%zu]", i);
3104
3105 if (!item->sg) {
3106 vty_out(vty, " inactive\n");
3107 continue;
3108 }
3109
3110 vty_out(vty, " %s%s %pSG nE=%u\n",
3111 item->is_src ? "S" : "",
3112 item->is_excl ? "E" : "",
3113 &item->sg->sgaddr, item->n_exclude);
3114 }
3115 }
3116 }
3117
3118 return CMD_SUCCESS;
3119 }
3120
3121 DEFPY(gm_debug_iface_cfg,
3122 gm_debug_iface_cfg_cmd,
3123 "debug ipv6 mld {"
3124 "robustness (0-7)|"
3125 "query-max-response-time (1-8387584)"
3126 "}",
3127 DEBUG_STR
3128 IPV6_STR
3129 "Multicast Listener Discovery\n"
3130 "QRV\nQRV\n"
3131 "maxresp\nmaxresp\n")
3132 {
3133 VTY_DECLVAR_CONTEXT(interface, ifp);
3134 struct pim_interface *pim_ifp;
3135 struct gm_if *gm_ifp;
3136 bool changed = false;
3137
3138 pim_ifp = ifp->info;
3139 if (!pim_ifp) {
3140 vty_out(vty, "%% no PIM state for interface %pSQq\n",
3141 ifp->name);
3142 return CMD_WARNING;
3143 }
3144 gm_ifp = pim_ifp->mld;
3145 if (!gm_ifp) {
3146 vty_out(vty, "%% no MLD state for interface %pSQq\n",
3147 ifp->name);
3148 return CMD_WARNING;
3149 }
3150
3151 if (robustness_str && gm_ifp->cur_qrv != robustness) {
3152 gm_ifp->cur_qrv = robustness;
3153 changed = true;
3154 }
3155 if (query_max_response_time_str &&
3156 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
3157 gm_ifp->cur_max_resp = query_max_response_time;
3158 changed = true;
3159 }
3160
3161 if (changed) {
3162 vty_out(vty, "%% MLD querier config changed, bumping\n");
3163 gm_bump_querier(gm_ifp);
3164 }
3165 return CMD_SUCCESS;
3166 }
3167
3168 void gm_cli_init(void);
3169
3170 void gm_cli_init(void)
3171 {
3172 install_element(VIEW_NODE, &gm_show_interface_cmd);
3173 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3174 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
3175 install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
3176
3177 install_element(VIEW_NODE, &gm_debug_show_cmd);
3178 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3179 }