]> git.proxmox.com Git - mirror_frr.git/blame - pimd/pim6_mld.c
pimd: Checks imply that pim is not properly configured
[mirror_frr.git] / pimd / pim6_mld.c
CommitLineData
5e5034b0
DL
1/*
2 * PIMv6 MLD querier
3 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20/*
21 * keep pim6_mld.h open when working on this code. Most data structures are
22 * commented in the header.
23 *
24 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
25 * that this code will replace the old IGMP querier at some point.
26 */
27
28#include <zebra.h>
29#include <netinet/ip6.h>
30
31#include "lib/memory.h"
32#include "lib/jhash.h"
33#include "lib/prefix.h"
34#include "lib/checksum.h"
35#include "lib/thread.h"
36
37#include "pimd/pim6_mld.h"
38#include "pimd/pim6_mld_protocol.h"
39#include "pimd/pim_memory.h"
40#include "pimd/pim_instance.h"
41#include "pimd/pim_iface.h"
42#include "pimd/pim_util.h"
43#include "pimd/pim_tib.h"
44#include "pimd/pimd.h"
45
46#ifndef IPV6_MULTICAST_ALL
47#define IPV6_MULTICAST_ALL 29
48#endif
49
50DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
51DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
52DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
53DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
54DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
55DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
56DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
57
58static void gm_t_query(struct thread *t);
59static void gm_trigger_specific(struct gm_sg *sg);
60static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
61 struct timeval expire_wait);
62
63/* shorthand for log messages */
64#define log_ifp(msg) \
65 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
66#define log_pkt_src(msg) \
67 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
68 &pkt_src->sin6_addr
69#define log_sg(sg, msg) \
70 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
71 sg->iface->ifp->name, &sg->sgaddr
72
73/* clang-format off */
74#if PIM_IPV == 6
75static const pim_addr gm_all_hosts = {
76 .s6_addr = {
77 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
79 },
80};
81static const pim_addr gm_all_routers = {
82 .s6_addr = {
83 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
85 },
86};
87/* MLDv1 does not allow subscriber tracking due to report suppression
88 * hence, the source address is replaced with ffff:...:ffff
89 */
90static const pim_addr gm_dummy_untracked = {
91 .s6_addr = {
92 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
93 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
94 },
95};
96#else
97/* 224.0.0.1 */
98static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
99/* 224.0.0.22 */
100static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
101static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
102#endif
103/* clang-format on */
104
105#define IPV6_MULTICAST_SCOPE_LINK 2
106
107static inline uint8_t in6_multicast_scope(const pim_addr *addr)
108{
109 return addr->s6_addr[1] & 0xf;
110}
111
112static inline bool in6_multicast_nofwd(const pim_addr *addr)
113{
114 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
115}
116
117/*
118 * (S,G) -> subscriber,(S,G)
119 */
120
121static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
122 const struct gm_packet_sg *b)
123{
124 const struct gm_packet_state *s_a, *s_b;
125
126 s_a = gm_packet_sg2state(a);
127 s_b = gm_packet_sg2state(b);
128 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
129}
130
131DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
132 gm_packet_sg_cmp);
133
134static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
135 enum gm_sub_sense sense,
136 struct gm_subscriber *sub)
137{
138 struct {
139 struct gm_packet_state hdr;
140 struct gm_packet_sg item;
141 } ref = {
142 /* clang-format off */
143 .hdr = {
144 .subscriber = sub,
145 },
146 .item = {
147 .offset = 0,
148 },
149 /* clang-format on */
150 };
151
152 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
153}
154
155/*
156 * interface -> (*,G),pending
157 */
158
159static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
160 const struct gm_grp_pending *b)
161{
162 return IPV6_ADDR_CMP(&a->grp, &b->grp);
163}
164
165DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
166 gm_grp_pending_cmp);
167
168/*
169 * interface -> ([S1,S2,...],G),pending
170 */
171
172static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
173 const struct gm_gsq_pending *b)
174{
175 if (a->s_bit != b->s_bit)
176 return numcmp(a->s_bit, b->s_bit);
177
178 return IPV6_ADDR_CMP(&a->grp, &b->grp);
179}
180
181static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
182{
183 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
184
185 return jhash(&a->grp, sizeof(a->grp), seed);
186}
187
188DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
189 gm_gsq_pending_hash);
190
191/*
192 * interface -> (S,G)
193 */
194
195static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
196{
197 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
198}
199
200DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
201
202static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
203 pim_addr src)
204{
205 struct gm_sg ref = {};
206
207 ref.sgaddr.grp = grp;
208 ref.sgaddr.src = src;
209 return gm_sgs_find(gm_ifp->sgs, &ref);
210}
211
212static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
213 pim_addr src)
214{
215 struct gm_sg *ret, *prev;
216
217 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
218 ret->sgaddr.grp = grp;
219 ret->sgaddr.src = src;
220 ret->iface = gm_ifp;
221 prev = gm_sgs_add(gm_ifp->sgs, ret);
222
223 if (prev) {
224 XFREE(MTYPE_GM_SG, ret);
225 ret = prev;
226 } else {
aa2f9349 227 monotime(&ret->created);
5e5034b0
DL
228 gm_packet_sg_subs_init(ret->subs_positive);
229 gm_packet_sg_subs_init(ret->subs_negative);
230 }
231 return ret;
232}
233
234/*
235 * interface -> packets, sorted by expiry (because add_tail insert order)
236 */
237
238DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
239
240/*
241 * subscriber -> packets
242 */
243
244DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
245
246/*
247 * interface -> subscriber
248 */
249
250static int gm_subscriber_cmp(const struct gm_subscriber *a,
251 const struct gm_subscriber *b)
252{
253 return IPV6_ADDR_CMP(&a->addr, &b->addr);
254}
255
256static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
257{
258 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
259}
260
261DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
262 gm_subscriber_hash);
263
264static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
265 pim_addr addr)
266{
267 struct gm_subscriber ref = {}, *ret;
268
269 ref.addr = addr;
270 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
271 if (ret)
272 ret->refcount++;
273 return ret;
274}
275
276static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
277 pim_addr addr)
278{
279 struct gm_subscriber ref = {}, *ret;
280
281 ref.addr = addr;
282 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
283
284 if (!ret) {
285 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
286 ret->iface = gm_ifp;
287 ret->addr = addr;
288 ret->refcount = 1;
aa2f9349 289 monotime(&ret->created);
5e5034b0
DL
290 gm_packets_init(ret->packets);
291
292 gm_subscribers_add(gm_ifp->subscribers, ret);
293 }
294 return ret;
295}
296
297static void gm_subscriber_drop(struct gm_subscriber **subp)
298{
299 struct gm_subscriber *sub = *subp;
300 struct gm_if *gm_ifp;
301
302 if (!sub)
303 return;
304 gm_ifp = sub->iface;
305
306 *subp = NULL;
307 sub->refcount--;
308
309 if (sub->refcount)
310 return;
311
312 gm_subscribers_del(gm_ifp->subscribers, sub);
313 XFREE(MTYPE_GM_SUBSCRIBER, sub);
314}
315
316/****************************************************************************/
317
318/* bundle query timer values for combined v1/v2 handling */
319struct gm_query_timers {
320 unsigned int qrv;
321 unsigned int max_resp_ms;
322 unsigned int qqic_ms;
323
324 struct timeval fuzz;
325 struct timeval expire_wait;
326};
327
328static void gm_expiry_calc(struct gm_query_timers *timers)
329{
330 unsigned int expire =
331 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
332 ldiv_t exp_div = ldiv(expire, 1000);
333
334 timers->expire_wait.tv_sec = exp_div.quot;
335 timers->expire_wait.tv_usec = exp_div.rem * 1000;
336 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
337}
338
339static void gm_sg_free(struct gm_sg *sg)
340{
341 /* t_sg_expiry is handled before this is reached */
342 THREAD_OFF(sg->t_sg_query);
343 gm_packet_sg_subs_fini(sg->subs_negative);
344 gm_packet_sg_subs_fini(sg->subs_positive);
345 XFREE(MTYPE_GM_SG, sg);
346}
347
348/* clang-format off */
349static const char *const gm_states[] = {
350 [GM_SG_NOINFO] = "NOINFO",
351 [GM_SG_JOIN] = "JOIN",
352 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
353 [GM_SG_PRUNE] = "PRUNE",
354 [GM_SG_NOPRUNE] = "NOPRUNE",
355 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
356};
357/* clang-format on */
358
359CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
360/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
361 * joined. Whether we actually want/need to support this is a separate
362 * question - it is almost never used. In fact this is exactly what RFC5790
363 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
364 */
365
366static void gm_sg_update(struct gm_sg *sg, bool has_expired)
367{
368 struct gm_if *gm_ifp = sg->iface;
369 enum gm_sg_state prev, desired;
370 bool new_join;
371 struct gm_sg *grp = NULL;
372
373 if (!pim_addr_is_any(sg->sgaddr.src))
374 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
375 else
376 assert(sg->state != GM_SG_PRUNE);
377
378 if (gm_packet_sg_subs_count(sg->subs_positive)) {
379 desired = GM_SG_JOIN;
380 assert(!sg->t_sg_expire);
381 } else if ((sg->state == GM_SG_JOIN ||
382 sg->state == GM_SG_JOIN_EXPIRING) &&
383 !has_expired)
384 desired = GM_SG_JOIN_EXPIRING;
385 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
386 desired = GM_SG_NOINFO;
387 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
388 gm_packet_sg_subs_count(sg->subs_negative)) {
389 if ((sg->state == GM_SG_NOPRUNE ||
390 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
391 !has_expired)
392 desired = GM_SG_NOPRUNE_EXPIRING;
393 else
394 desired = GM_SG_PRUNE;
395 } else if (gm_packet_sg_subs_count(sg->subs_negative))
396 desired = GM_SG_NOPRUNE;
397 else
398 desired = GM_SG_NOINFO;
399
400 if (desired != sg->state && !gm_ifp->stopping) {
401 if (PIM_DEBUG_IGMP_EVENTS)
402 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
403 gm_states[desired]);
404
405 if (desired == GM_SG_JOIN_EXPIRING ||
406 desired == GM_SG_NOPRUNE_EXPIRING) {
407 struct gm_query_timers timers;
408
409 timers.qrv = gm_ifp->cur_qrv;
410 timers.max_resp_ms = gm_ifp->cur_max_resp;
411 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
412 timers.fuzz = gm_ifp->cfg_timing_fuzz;
413
414 gm_expiry_calc(&timers);
415 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
416
417 THREAD_OFF(sg->t_sg_query);
418 sg->n_query = gm_ifp->cur_qrv;
419 sg->query_sbit = false;
420 gm_trigger_specific(sg);
421 }
422 }
423 prev = sg->state;
424 sg->state = desired;
425
426 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
427 new_join = false;
428 else
429 new_join = gm_sg_state_want_join(desired);
430
431 if (new_join && !sg->tib_joined) {
432 /* this will retry if join previously failed */
433 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
434 gm_ifp->ifp, &sg->oil);
435 if (!sg->tib_joined)
436 zlog_warn(
437 "MLD join for %pSG%%%s not propagated into TIB",
438 &sg->sgaddr, gm_ifp->ifp->name);
439 else
440 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
441 gm_ifp->ifp->name);
442
443 } else if (sg->tib_joined && !new_join) {
444 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
445
446 sg->oil = NULL;
447 sg->tib_joined = false;
448 }
449
450 if (desired == GM_SG_NOINFO) {
451 assertf((!sg->t_sg_expire &&
452 !gm_packet_sg_subs_count(sg->subs_positive) &&
453 !gm_packet_sg_subs_count(sg->subs_negative)),
454 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
455 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
456 sg->t_sg_expire, gm_states[prev], gm_states[desired],
457 gm_packet_sg_subs_count(sg->subs_positive),
458 gm_packet_sg_subs_count(sg->subs_negative), grp);
459
460 if (PIM_DEBUG_IGMP_TRACE)
461 zlog_debug(log_sg(sg, "dropping"));
462
463 gm_sgs_del(gm_ifp->sgs, sg);
464 gm_sg_free(sg);
465 }
466}
467
468/****************************************************************************/
469
470/* the following bunch of functions deals with transferring state from
471 * received packets into gm_packet_state. As a reminder, the querier is
472 * structured to keep all items received in one packet together, since they
473 * will share expiry timers and thus allows efficient handling.
474 */
475
476static void gm_packet_free(struct gm_packet_state *pkt)
477{
478 gm_packet_expires_del(pkt->iface->expires, pkt);
479 gm_packets_del(pkt->subscriber->packets, pkt);
480 gm_subscriber_drop(&pkt->subscriber);
481 XFREE(MTYPE_GM_STATE, pkt);
482}
483
484static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
485 struct gm_sg *sg, bool is_excl,
486 bool is_src)
487{
488 struct gm_packet_sg *item;
489
490 assert(pkt->n_active < pkt->n_sg);
491
492 item = &pkt->items[pkt->n_active];
493 item->sg = sg;
494 item->is_excl = is_excl;
495 item->is_src = is_src;
496 item->offset = pkt->n_active;
497
498 pkt->n_active++;
499 return item;
500}
501
502static bool gm_packet_sg_drop(struct gm_packet_sg *item)
503{
504 struct gm_packet_state *pkt;
505 size_t i;
506
507 assert(item->sg);
508
509 pkt = gm_packet_sg2state(item);
510 if (item->sg->most_recent == item)
511 item->sg->most_recent = NULL;
512
513 for (i = 0; i < item->n_exclude; i++) {
514 struct gm_packet_sg *excl_item;
515
516 excl_item = item + 1 + i;
517 if (!excl_item->sg)
518 continue;
519
520 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
521 excl_item->sg = NULL;
522 pkt->n_active--;
523
524 assert(pkt->n_active > 0);
525 }
526
527 if (item->is_excl && item->is_src)
528 gm_packet_sg_subs_del(item->sg->subs_negative, item);
529 else
530 gm_packet_sg_subs_del(item->sg->subs_positive, item);
531 item->sg = NULL;
532 pkt->n_active--;
533
534 if (!pkt->n_active) {
535 gm_packet_free(pkt);
536 return true;
537 }
538 return false;
539}
540
541static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
542{
543 for (size_t i = 0; i < pkt->n_sg; i++) {
544 struct gm_sg *sg = pkt->items[i].sg;
545 bool deleted;
546
547 if (!sg)
548 continue;
549
550 if (trace && PIM_DEBUG_IGMP_TRACE)
551 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
552 &pkt->subscriber->addr);
553 deleted = gm_packet_sg_drop(&pkt->items[i]);
554
555 gm_sg_update(sg, true);
556 if (deleted)
557 break;
558 }
559}
560
561static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
562 struct gm_subscriber *subscriber,
563 pim_addr grp, pim_addr *srcs,
564 size_t n_src, enum gm_sub_sense sense)
565{
566 struct gm_sg *sg;
567 struct gm_packet_sg *old_src;
568 size_t i;
569
570 for (i = 0; i < n_src; i++) {
571 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
572 if (!sg)
573 continue;
574
575 old_src = gm_packet_sg_find(sg, sense, subscriber);
576 if (!old_src)
577 continue;
578
579 gm_packet_sg_drop(old_src);
580 gm_sg_update(sg, false);
581 }
582}
583
584static void gm_sg_expiry_cancel(struct gm_sg *sg)
585{
586 if (sg->t_sg_expire && PIM_DEBUG_IGMP_TRACE)
587 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
588 THREAD_OFF(sg->t_sg_expire);
589 sg->query_sbit = true;
590}
591
592/* first pass: process all changes resulting in removal of state:
593 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
594 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
595 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
596 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
597 * note *replacing* state is NOT considered *removing* state here
598 *
599 * everything else is thrown into pkt for creation of state in pass 2
600 */
601static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
602 struct mld_v2_rec_hdr *rechdr)
603{
604 /* NB: pkt->subscriber can be NULL here if the subscriber was not
605 * previously seen!
606 */
607 struct gm_subscriber *subscriber = pkt->subscriber;
608 struct gm_sg *grp;
609 struct gm_packet_sg *old_grp = NULL;
610 struct gm_packet_sg *item;
611 size_t n_src = ntohs(rechdr->n_src);
612 size_t j;
613 bool is_excl = false;
614
615 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
616 if (grp && subscriber)
617 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
618
619 assert(old_grp == NULL || old_grp->is_excl);
620
621 switch (rechdr->type) {
622 case MLD_RECTYPE_IS_EXCLUDE:
623 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
624 /* this always replaces or creates state */
625 is_excl = true;
626 if (!grp)
627 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
628
629 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
630 item->n_exclude = n_src;
631
632 /* [EXCL_INCL_SG_NOTE] referenced below
633 *
634 * in theory, we should drop any S,G that the host may have
635 * previously added in INCLUDE mode. In practice, this is both
636 * incredibly rare and entirely irrelevant. It only makes any
637 * difference if an S,G that the host previously had on the
638 * INCLUDE list is now on the blocked list for EXCLUDE, which
639 * we can cover in processing the S,G list in pass2_excl().
640 *
641 * Other S,G from the host are simply left to expire
642 * "naturally" through general expiry.
643 */
644 break;
645
646 case MLD_RECTYPE_IS_INCLUDE:
647 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
648 if (old_grp) {
649 /* INCLUDE has no *,G state, so old_grp here refers to
650 * previous EXCLUDE => delete it
651 */
652 gm_packet_sg_drop(old_grp);
653 gm_sg_update(grp, false);
654 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
655 }
656 break;
657
658 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
659 if (old_grp) {
660 /* remove S,Gs from EXCLUDE, and then we're done */
661 gm_packet_sg_remove_sources(pkt->iface, subscriber,
662 rechdr->grp, rechdr->srcs,
663 n_src, GM_SUB_NEG);
664 return;
665 }
666 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
667 * idential to IS_INCLUDE (because the list of sources in
668 * IS_INCLUDE is not exhaustive)
669 */
670 break;
671
672 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
673 if (old_grp) {
674 /* this is intentionally not implemented because it
675 * would be complicated as hell. we only take the list
676 * of blocked sources from full group state records
677 */
678 return;
679 }
680
681 if (subscriber)
682 gm_packet_sg_remove_sources(pkt->iface, subscriber,
683 rechdr->grp, rechdr->srcs,
684 n_src, GM_SUB_POS);
685 return;
686 }
687
688 for (j = 0; j < n_src; j++) {
689 struct gm_sg *sg;
690
691 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
692 if (!sg)
693 sg = gm_sg_make(pkt->iface, rechdr->grp,
694 rechdr->srcs[j]);
695
696 gm_packet_sg_setup(pkt, sg, is_excl, true);
697 }
698}
699
700/* second pass: creating/updating/refreshing state. All the items from the
701 * received packet have already been thrown into gm_packet_state.
702 */
703
704static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
705{
706 struct gm_packet_sg *item = &pkt->items[i];
707 struct gm_packet_sg *old = NULL;
708 struct gm_sg *sg = item->sg;
709
710 /* EXCLUDE state was already dropped in pass1 */
711 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
712
713 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
714 if (old)
715 gm_packet_sg_drop(old);
716
717 pkt->n_active++;
718 gm_packet_sg_subs_add(sg->subs_positive, item);
719
720 sg->most_recent = item;
721 gm_sg_expiry_cancel(sg);
722 gm_sg_update(sg, false);
723}
724
725static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
726{
727 struct gm_packet_sg *item = &pkt->items[offs];
728 struct gm_packet_sg *old_grp, *item_dup;
729 struct gm_sg *sg_grp = item->sg;
730 size_t i;
731
732 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
733 if (old_grp) {
734 for (i = 0; i < item->n_exclude; i++) {
735 struct gm_packet_sg *item_src, *old_src;
736
737 item_src = &pkt->items[offs + 1 + i];
738 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
739 pkt->subscriber);
740 if (old_src)
741 gm_packet_sg_drop(old_src);
742
743 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
744 * items left over if the host previously had INCLUDE
745 * mode going. Remove them here if we find any.
746 */
747 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
748 pkt->subscriber);
749 if (old_src)
750 gm_packet_sg_drop(old_src);
751 }
752
753 /* the previous loop has removed the S,G entries which are
754 * still excluded after this update. So anything left on the
755 * old item was previously excluded but is now included
756 * => need to trigger update on S,G
757 */
758 for (i = 0; i < old_grp->n_exclude; i++) {
759 struct gm_packet_sg *old_src;
760 struct gm_sg *old_sg_src;
761
762 old_src = old_grp + 1 + i;
763 old_sg_src = old_src->sg;
764 if (!old_sg_src)
765 continue;
766
767 gm_packet_sg_drop(old_src);
768 gm_sg_update(old_sg_src, false);
769 }
770
771 gm_packet_sg_drop(old_grp);
772 }
773
774 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
775 assert(!item_dup);
776 pkt->n_active++;
777
778 sg_grp->most_recent = item;
779 gm_sg_expiry_cancel(sg_grp);
780
781 for (i = 0; i < item->n_exclude; i++) {
782 struct gm_packet_sg *item_src;
783
784 item_src = &pkt->items[offs + 1 + i];
785 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
786 item_src);
787
788 if (item_dup)
789 item_src->sg = NULL;
790 else {
791 pkt->n_active++;
792 gm_sg_update(item_src->sg, false);
793 }
794 }
795
796 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
797 * to get lower PIM churn/flapping
798 */
799 gm_sg_update(sg_grp, false);
800}
801
802CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
803/* on receiving a query, we need to update our robustness/query interval to
804 * match, so we correctly process group/source specific queries after last
805 * member leaves
806 */
807
808static void gm_handle_v2_report(struct gm_if *gm_ifp,
809 const struct sockaddr_in6 *pkt_src, char *data,
810 size_t len)
811{
812 struct mld_v2_report_hdr *hdr;
813 size_t i, n_records, max_entries;
814 struct gm_packet_state *pkt;
815
816 if (len < sizeof(*hdr)) {
817 if (PIM_DEBUG_IGMP_PACKETS)
818 zlog_debug(log_pkt_src(
819 "malformed MLDv2 report (truncated header)"));
aa2f9349 820 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
821 return;
822 }
823
aa2f9349
DL
824 /* errors after this may at least partially process the packet */
825 gm_ifp->stats.rx_new_report++;
826
5e5034b0
DL
827 hdr = (struct mld_v2_report_hdr *)data;
828 data += sizeof(*hdr);
829 len -= sizeof(*hdr);
830
831 /* can't have more *,G and S,G items than there is space for ipv6
832 * addresses, so just use this to allocate temporary buffer
833 */
834 max_entries = len / sizeof(pim_addr);
835 pkt = XCALLOC(MTYPE_GM_STATE,
836 offsetof(struct gm_packet_state, items[max_entries]));
837 pkt->n_sg = max_entries;
838 pkt->iface = gm_ifp;
839 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
840
841 n_records = ntohs(hdr->n_records);
842
843 /* validate & remove state in v2_pass1() */
844 for (i = 0; i < n_records; i++) {
845 struct mld_v2_rec_hdr *rechdr;
846 size_t n_src, record_size;
847
848 if (len < sizeof(*rechdr)) {
849 zlog_warn(log_pkt_src(
850 "malformed MLDv2 report (truncated record header)"));
aa2f9349 851 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
852 break;
853 }
854
855 rechdr = (struct mld_v2_rec_hdr *)data;
856 data += sizeof(*rechdr);
857 len -= sizeof(*rechdr);
858
859 n_src = ntohs(rechdr->n_src);
860 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
861
862 if (len < record_size) {
863 zlog_warn(log_pkt_src(
864 "malformed MLDv2 report (truncated source list)"));
aa2f9349 865 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
866 break;
867 }
868 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
869 zlog_warn(
870 log_pkt_src(
871 "malformed MLDv2 report (invalid group %pI6)"),
872 &rechdr->grp);
aa2f9349 873 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
874 break;
875 }
876
877 data += record_size;
878 len -= record_size;
879
880 gm_handle_v2_pass1(pkt, rechdr);
881 }
882
883 if (!pkt->n_active) {
884 gm_subscriber_drop(&pkt->subscriber);
885 XFREE(MTYPE_GM_STATE, pkt);
886 return;
887 }
888
889 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
890 offsetof(struct gm_packet_state, items[pkt->n_active]));
891 pkt->n_sg = pkt->n_active;
892 pkt->n_active = 0;
893
894 monotime(&pkt->received);
895 if (!pkt->subscriber)
896 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
897 gm_packets_add_tail(pkt->subscriber->packets, pkt);
898 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
899
900 for (i = 0; i < pkt->n_sg; i++)
901 if (!pkt->items[i].is_excl)
902 gm_handle_v2_pass2_incl(pkt, i);
903 else {
904 gm_handle_v2_pass2_excl(pkt, i);
905 i += pkt->items[i].n_exclude;
906 }
907
908 if (pkt->n_active == 0)
909 gm_packet_free(pkt);
910}
911
912static void gm_handle_v1_report(struct gm_if *gm_ifp,
913 const struct sockaddr_in6 *pkt_src, char *data,
914 size_t len)
915{
916 struct mld_v1_pkt *hdr;
917 struct gm_packet_state *pkt;
918 struct gm_sg *grp;
919 struct gm_packet_sg *item;
920 size_t max_entries;
921
922 if (len < sizeof(*hdr)) {
923 if (PIM_DEBUG_IGMP_PACKETS)
924 zlog_debug(log_pkt_src(
925 "malformed MLDv1 report (truncated)"));
aa2f9349 926 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
927 return;
928 }
929
aa2f9349
DL
930 gm_ifp->stats.rx_old_report++;
931
5e5034b0
DL
932 hdr = (struct mld_v1_pkt *)data;
933
934 max_entries = 1;
935 pkt = XCALLOC(MTYPE_GM_STATE,
936 offsetof(struct gm_packet_state, items[max_entries]));
937 pkt->n_sg = max_entries;
938 pkt->iface = gm_ifp;
939 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
940
941 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
942
943 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
944 if (!grp)
945 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
946
947 item = gm_packet_sg_setup(pkt, grp, true, false);
948 item->n_exclude = 0;
949 CPP_NOTICE("set v1-seen timer on grp here");
950
951 /* } */
952
953 /* pass2 will count n_active back up to 1. Also since a v1 report
954 * has exactly 1 group, we can skip the realloc() that v2 needs here.
955 */
956 assert(pkt->n_active == 1);
957 pkt->n_sg = pkt->n_active;
958 pkt->n_active = 0;
959
960 monotime(&pkt->received);
961 if (!pkt->subscriber)
962 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
963 gm_packets_add_tail(pkt->subscriber->packets, pkt);
964 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
965
966 /* pass2 covers installing state & removing old state; all the v1
967 * compat is handled at this point.
968 *
969 * Note that "old state" may be v2; subscribers will switch from v2
970 * reports to v1 reports when the querier changes from v2 to v1. So,
971 * limiting this to v1 would be wrong.
972 */
973 gm_handle_v2_pass2_excl(pkt, 0);
974
975 if (pkt->n_active == 0)
976 gm_packet_free(pkt);
977}
978
979static void gm_handle_v1_leave(struct gm_if *gm_ifp,
980 const struct sockaddr_in6 *pkt_src, char *data,
981 size_t len)
982{
983 struct mld_v1_pkt *hdr;
984 struct gm_subscriber *subscriber;
985 struct gm_sg *grp;
986 struct gm_packet_sg *old_grp;
987
988 if (len < sizeof(*hdr)) {
989 if (PIM_DEBUG_IGMP_PACKETS)
990 zlog_debug(log_pkt_src(
991 "malformed MLDv1 leave (truncated)"));
aa2f9349 992 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
993 return;
994 }
995
aa2f9349
DL
996 gm_ifp->stats.rx_old_leave++;
997
5e5034b0
DL
998 hdr = (struct mld_v1_pkt *)data;
999
1000 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
1001 if (!subscriber)
1002 return;
1003
1004 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1005
1006 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
1007 if (grp) {
1008 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
1009 if (old_grp) {
1010 gm_packet_sg_drop(old_grp);
1011 gm_sg_update(grp, false);
1012 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1013 }
1014 }
1015
1016 /* } */
1017
1018 /* nothing more to do here, pass2 is no-op for leaves */
1019 gm_subscriber_drop(&subscriber);
1020}
1021
1022/* for each general query received (or sent), a timer is started to expire
1023 * _everything_ at the appropriate time (including robustness multiplier).
1024 *
1025 * So when this timer hits, all packets - with all of their items - that were
1026 * received *before* the query are aged out, and state updated accordingly.
1027 * Note that when we receive a refresh/update, the previous/old packet is
1028 * already dropped and replaced with a new one, so in normal steady-state
1029 * operation, this timer won't be doing anything.
1030 *
1031 * Additionally, if a subscriber actively leaves a group, that goes through
1032 * its own path too and won't hit this. This is really only triggered when a
1033 * host straight up disappears.
1034 */
1035static void gm_t_expire(struct thread *t)
1036{
1037 struct gm_if *gm_ifp = THREAD_ARG(t);
1038 struct gm_packet_state *pkt;
1039
1040 zlog_info(log_ifp("general expiry timer"));
1041
1042 while (gm_ifp->n_pending) {
1043 struct gm_general_pending *pend = gm_ifp->pending;
1044 struct timeval remain;
1045 int64_t remain_ms;
1046
1047 remain_ms = monotime_until(&pend->expiry, &remain);
1048 if (remain_ms > 0) {
1049 if (PIM_DEBUG_IGMP_EVENTS)
1050 zlog_debug(
1051 log_ifp("next general expiry in %" PRId64 "ms"),
1052 remain_ms / 1000);
1053
1054 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1055 &remain, &gm_ifp->t_expire);
1056 return;
1057 }
1058
1059 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1060 if (timercmp(&pkt->received, &pend->query, >=))
1061 break;
1062
1063 if (PIM_DEBUG_IGMP_PACKETS)
1064 zlog_debug(log_ifp("expire packet %p"), pkt);
1065 gm_packet_drop(pkt, true);
1066 }
1067
1068 gm_ifp->n_pending--;
1069 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1070 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1071 }
1072
1073 if (PIM_DEBUG_IGMP_EVENTS)
1074 zlog_debug(log_ifp("next general expiry waiting for query"));
1075}
1076
1077/* NB: the receive handlers will also run when sending packets, since we
1078 * receive our own packets back in.
1079 */
1080static void gm_handle_q_general(struct gm_if *gm_ifp,
1081 struct gm_query_timers *timers)
1082{
1083 struct timeval now, expiry;
1084 struct gm_general_pending *pend;
1085
1086 monotime(&now);
1087 timeradd(&now, &timers->expire_wait, &expiry);
1088
1089 while (gm_ifp->n_pending) {
1090 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1091
1092 if (timercmp(&pend->expiry, &expiry, <))
1093 break;
1094
1095 /* if we end up here, the last item in pending[] has an expiry
1096 * later than the expiry for this query. But our query time
1097 * (now) is later than that of the item (because, well, that's
1098 * how time works.) This makes this query meaningless since
1099 * it's "supersetted" within the preexisting query
1100 */
1101
1102 if (PIM_DEBUG_IGMP_TRACE_DETAIL)
1103 zlog_debug(
1104 log_ifp("zapping supersetted general timer %pTVMu"),
1105 &pend->expiry);
1106
1107 gm_ifp->n_pending--;
1108 if (!gm_ifp->n_pending)
1109 THREAD_OFF(gm_ifp->t_expire);
1110 }
1111
1112 /* people might be messing with their configs or something */
1113 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1114 return;
1115
1116 pend = &gm_ifp->pending[gm_ifp->n_pending];
1117 pend->query = now;
1118 pend->expiry = expiry;
1119
1120 if (!gm_ifp->n_pending++) {
1121 if (PIM_DEBUG_IGMP_TRACE)
1122 zlog_debug(
1123 log_ifp("starting general timer @ 0: %pTVMu"),
1124 &pend->expiry);
1125 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1126 &timers->expire_wait, &gm_ifp->t_expire);
1127 } else if (PIM_DEBUG_IGMP_TRACE)
1128 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1129 gm_ifp->n_pending, &pend->expiry);
1130}
1131
1132static void gm_t_sg_expire(struct thread *t)
1133{
1134 struct gm_sg *sg = THREAD_ARG(t);
1135 struct gm_if *gm_ifp = sg->iface;
1136 struct gm_packet_sg *item;
1137
1138 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1139 sg->state == GM_SG_NOPRUNE_EXPIRING,
1140 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1141
1142 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1143 /* this will also drop EXCLUDE mode S,G lists together with
1144 * the *,G entry
1145 */
1146 gm_packet_sg_drop(item);
1147
1148 /* subs_negative items are only timed out together with the *,G entry
1149 * since we won't get any reports for a group-and-source query
1150 */
1151 gm_sg_update(sg, true);
1152}
1153
1154static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1155 struct timeval ref)
1156{
1157 struct gm_packet_state *pkt;
1158
1159 if (!sg->most_recent) {
1160 struct gm_packet_state *best_pkt = NULL;
1161 struct gm_packet_sg *item;
1162
1163 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1164 pkt = gm_packet_sg2state(item);
1165
1166 if (!best_pkt ||
1167 timercmp(&pkt->received, &best_pkt->received, >)) {
1168 best_pkt = pkt;
1169 sg->most_recent = item;
1170 }
1171 }
1172 }
1173 if (sg->most_recent) {
1174 struct timeval fuzz;
1175
1176 pkt = gm_packet_sg2state(sg->most_recent);
1177
1178 /* this shouldn't happen on plain old real ethernet segment,
1179 * but on something like a VXLAN or VPLS it is very possible
1180 * that we get a report before the query that triggered it.
1181 * (imagine a triangle scenario with 3 datacenters, it's very
1182 * possible A->B + B->C is faster than A->C due to odd routing)
1183 *
1184 * This makes a little tolerance allowance to handle that case.
1185 */
1186 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1187
1188 if (timercmp(&fuzz, &ref, >))
1189 return true;
1190 }
1191 return false;
1192}
1193
1194static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1195 struct timeval expire_wait)
1196{
1197 struct timeval now;
1198
1199 if (!sg)
1200 return;
1201 if (sg->state == GM_SG_PRUNE)
1202 return;
1203
1204 monotime(&now);
1205 if (gm_sg_check_recent(gm_ifp, sg, now))
1206 return;
1207
1208 if (PIM_DEBUG_IGMP_TRACE)
1209 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1210
1211 if (sg->t_sg_expire) {
1212 struct timeval remain;
1213
1214 remain = thread_timer_remain(sg->t_sg_expire);
1215 if (timercmp(&remain, &expire_wait, <=))
1216 return;
1217
1218 THREAD_OFF(sg->t_sg_expire);
1219 }
1220
1221 thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1222 &sg->t_sg_expire);
1223}
1224
1225static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1226 struct gm_query_timers *timers, pim_addr grp,
1227 const pim_addr *srcs, size_t n_src)
1228{
1229 struct gm_sg *sg;
1230 size_t i;
1231
1232 for (i = 0; i < n_src; i++) {
1233 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1234 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1235 }
1236}
1237
1238static void gm_t_grp_expire(struct thread *t)
1239{
1240 /* if we're here, that means when we received the group-specific query
1241 * there was one or more active S,G for this group. For *,G the timer
1242 * in sg->t_sg_expire is running separately and gets cancelled when we
1243 * receive a report, so that work is left to gm_t_sg_expire and we
1244 * shouldn't worry about it here.
1245 */
1246 struct gm_grp_pending *pend = THREAD_ARG(t);
1247 struct gm_if *gm_ifp = pend->iface;
1248 struct gm_sg *sg, *sg_start, sg_ref = {};
1249
1250 if (PIM_DEBUG_IGMP_EVENTS)
1251 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1252
1253 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1254 * could technically be gt to skip a possible *,G
1255 */
1256 sg_ref.sgaddr.grp = pend->grp;
1257 sg_ref.sgaddr.src = PIMADDR_ANY;
1258 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1259
1260 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1261 struct gm_packet_sg *item;
1262
1263 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1264 break;
1265 if (pim_addr_is_any(sg->sgaddr.src))
1266 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1267 continue;
1268 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1269 continue;
1270
1271 /* we may also have a group-source-specific query going on in
1272 * parallel. But if we received nothing for the *,G query,
1273 * the S,G query is kinda irrelevant.
1274 */
1275 THREAD_OFF(sg->t_sg_expire);
1276
1277 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1278 /* this will also drop the EXCLUDE S,G lists */
1279 gm_packet_sg_drop(item);
1280
1281 gm_sg_update(sg, true);
1282 }
1283
1284 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1285 XFREE(MTYPE_GM_GRP_PENDING, pend);
1286}
1287
1288static void gm_handle_q_group(struct gm_if *gm_ifp,
1289 struct gm_query_timers *timers, pim_addr grp)
1290{
1291 struct gm_sg *sg, sg_ref = {};
1292 struct gm_grp_pending *pend, pend_ref = {};
1293
1294 sg_ref.sgaddr.grp = grp;
1295 sg_ref.sgaddr.src = PIMADDR_ANY;
1296 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1297 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1298
1299 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1300 /* we have nothing at all for this group - don't waste RAM */
1301 return;
1302
1303 if (pim_addr_is_any(sg->sgaddr.src)) {
1304 /* actually found *,G entry here */
1305 if (PIM_DEBUG_IGMP_TRACE)
1306 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1307 &grp);
1308 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1309
1310 sg = gm_sgs_next(gm_ifp->sgs, sg);
1311 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1312 /* no S,G for this group */
1313 return;
1314 }
1315
1316 pend_ref.grp = grp;
1317 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1318
1319 if (pend) {
1320 struct timeval remain;
1321
1322 remain = thread_timer_remain(pend->t_expire);
1323 if (timercmp(&remain, &timers->expire_wait, <=))
1324 return;
1325
1326 THREAD_OFF(pend->t_expire);
1327 } else {
1328 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1329 pend->grp = grp;
1330 pend->iface = gm_ifp;
1331 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1332 }
1333
1334 monotime(&pend->query);
1335 thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
1336 &timers->expire_wait, &pend->t_expire);
1337
1338 if (PIM_DEBUG_IGMP_TRACE)
1339 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1340 pend->t_expire);
1341}
1342
1343static void gm_bump_querier(struct gm_if *gm_ifp)
1344{
1345 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1346
1347 THREAD_OFF(gm_ifp->t_query);
1348
1349 if (pim_addr_is_any(pim_ifp->ll_lowest))
1350 return;
1351 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1352 return;
1353
1354 gm_ifp->n_startup = gm_ifp->cur_qrv;
1355
1356 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1357}
1358
1359static void gm_t_other_querier(struct thread *t)
1360{
1361 struct gm_if *gm_ifp = THREAD_ARG(t);
1362 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1363
1364 zlog_info(log_ifp("other querier timer expired"));
1365
1366 gm_ifp->querier = pim_ifp->ll_lowest;
1367 gm_ifp->n_startup = gm_ifp->cur_qrv;
1368
1369 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1370}
1371
1372static void gm_handle_query(struct gm_if *gm_ifp,
1373 const struct sockaddr_in6 *pkt_src,
1374 pim_addr *pkt_dst, char *data, size_t len)
1375{
1376 struct mld_v2_query_hdr *hdr;
1377 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1378 struct gm_query_timers timers;
1379 bool general_query;
1380
1381 if (len < sizeof(struct mld_v2_query_hdr) &&
1382 len != sizeof(struct mld_v1_pkt)) {
1383 zlog_warn(log_pkt_src("invalid query size"));
aa2f9349 1384 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1385 return;
1386 }
1387
1388 hdr = (struct mld_v2_query_hdr *)data;
1389 general_query = pim_addr_is_any(hdr->grp);
1390
1391 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1392 zlog_warn(log_pkt_src(
1393 "malformed MLDv2 query (invalid group %pI6)"),
1394 &hdr->grp);
aa2f9349 1395 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1396 return;
1397 }
1398
1399 if (len >= sizeof(struct mld_v2_query_hdr)) {
1400 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1401
1402 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1403 zlog_warn(log_pkt_src(
1404 "malformed MLDv2 query (truncated source list)"));
aa2f9349 1405 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1406 return;
1407 }
1408
1409 if (general_query && src_space) {
1410 zlog_warn(log_pkt_src(
1411 "malformed MLDv2 query (general query with non-empty source list)"));
aa2f9349 1412 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1413 return;
1414 }
1415 }
1416
1417 /* accepting queries unicast to us (or addressed to a wrong group)
1418 * can mess up querier election as well as cause us to terminate
1419 * traffic (since after a unicast query no reports will be coming in)
1420 */
1421 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1422 if (pim_addr_is_any(hdr->grp)) {
1423 zlog_warn(
1424 log_pkt_src(
1425 "wrong destination %pPA for general query"),
1426 pkt_dst);
aa2f9349 1427 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1428 return;
1429 }
1430
1431 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
aa2f9349 1432 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1433 zlog_warn(
1434 log_pkt_src(
1435 "wrong destination %pPA for group specific query"),
1436 pkt_dst);
1437 return;
1438 }
1439 }
1440
1441 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
1442 if (PIM_DEBUG_IGMP_EVENTS)
1443 zlog_debug(
1444 log_pkt_src("replacing elected querier %pPA"),
1445 &gm_ifp->querier);
1446
1447 gm_ifp->querier = pkt_src->sin6_addr;
1448 }
1449
1450 if (len == sizeof(struct mld_v1_pkt)) {
1451 timers.qrv = gm_ifp->cur_qrv;
1452 timers.max_resp_ms = hdr->max_resp_code;
1453 timers.qqic_ms = gm_ifp->cur_query_intv;
1454 } else {
1455 timers.qrv = (hdr->flags & 0x7) ?: 8;
1456 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1457 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1458 }
1459 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1460
1461 gm_expiry_calc(&timers);
1462
1463 if (PIM_DEBUG_IGMP_TRACE_DETAIL)
1464 zlog_debug(
1465 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1466 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1467 &timers.expire_wait);
1468
1469 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1470 unsigned int other_ms;
1471
1472 THREAD_OFF(gm_ifp->t_query);
1473 THREAD_OFF(gm_ifp->t_other_querier);
1474
1475 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1476 thread_add_timer_msec(router->master, gm_t_other_querier,
1477 gm_ifp, other_ms,
1478 &gm_ifp->t_other_querier);
1479 }
1480
1481 if (len == sizeof(struct mld_v1_pkt)) {
aa2f9349 1482 if (general_query) {
5e5034b0 1483 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1484 gm_ifp->stats.rx_query_old_general++;
1485 } else {
5e5034b0 1486 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1487 gm_ifp->stats.rx_query_old_group++;
1488 }
5e5034b0
DL
1489 return;
1490 }
1491
1492 /* v2 query - [S]uppress bit */
aa2f9349
DL
1493 if (hdr->flags & 0x8) {
1494 gm_ifp->stats.rx_query_new_sbit++;
5e5034b0 1495 return;
aa2f9349 1496 }
5e5034b0 1497
aa2f9349 1498 if (general_query) {
5e5034b0 1499 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1500 gm_ifp->stats.rx_query_new_general++;
1501 } else if (!ntohs(hdr->n_src)) {
5e5034b0 1502 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1503 gm_ifp->stats.rx_query_new_group++;
1504 } else {
5e5034b0
DL
1505 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1506 ntohs(hdr->n_src));
aa2f9349
DL
1507 gm_ifp->stats.rx_query_new_groupsrc++;
1508 }
5e5034b0
DL
1509}
1510
1511static void gm_rx_process(struct gm_if *gm_ifp,
1512 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1513 void *data, size_t pktlen)
1514{
1515 struct icmp6_plain_hdr *icmp6 = data;
1516 uint16_t pkt_csum, ref_csum;
1517 struct ipv6_ph ph6 = {
1518 .src = pkt_src->sin6_addr,
1519 .dst = *pkt_dst,
1520 .ulpl = htons(pktlen),
1521 .next_hdr = IPPROTO_ICMPV6,
1522 };
1523
1524 pkt_csum = icmp6->icmp6_cksum;
1525 icmp6->icmp6_cksum = 0;
1526 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1527
1528 if (pkt_csum != ref_csum) {
1529 zlog_warn(
1530 log_pkt_src(
1531 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1532 pkt_dst, pkt_csum, ref_csum);
aa2f9349 1533 gm_ifp->stats.rx_drop_csum++;
5e5034b0
DL
1534 return;
1535 }
1536
1537 data = (icmp6 + 1);
1538 pktlen -= sizeof(*icmp6);
1539
1540 switch (icmp6->icmp6_type) {
1541 case ICMP6_MLD_QUERY:
1542 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1543 break;
1544 case ICMP6_MLD_V1_REPORT:
1545 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1546 break;
1547 case ICMP6_MLD_V1_DONE:
1548 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1549 break;
1550 case ICMP6_MLD_V2_REPORT:
1551 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1552 break;
1553 }
1554}
1555
1556static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1557 uint16_t alert_type)
1558{
1559 uint8_t *hopopt_end;
1560
1561 if (hopopt_len < 8)
1562 return false;
1563 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1564 return false;
1565
1566 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1567 hopopts += 2;
1568
1569 while (hopopts < hopopt_end) {
1570 if (hopopts[0] == IP6OPT_PAD1) {
1571 hopopts++;
1572 continue;
1573 }
1574
1575 if (hopopts > hopopt_end - 2)
1576 break;
1577 if (hopopts > hopopt_end - 2 - hopopts[1])
1578 break;
1579
1580 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1581 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1582
1583 if (have_type == alert_type)
1584 return true;
1585 }
1586
1587 hopopts += 2 + hopopts[1];
1588 }
1589 return false;
1590}
1591
1592static void gm_t_recv(struct thread *t)
1593{
df655593 1594 struct pim_instance *pim = THREAD_ARG(t);
5e5034b0
DL
1595 union {
1596 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1597 CMSG_SPACE(256) /* hop options */ +
1598 CMSG_SPACE(sizeof(int)) /* hopcount */];
1599 struct cmsghdr align;
1600 } cmsgbuf;
1601 struct cmsghdr *cmsg;
1602 struct in6_pktinfo *pktinfo = NULL;
1603 uint8_t *hopopts = NULL;
1604 size_t hopopt_len = 0;
1605 int *hoplimit = NULL;
1606 char rxbuf[2048];
1607 struct msghdr mh[1] = {};
1608 struct iovec iov[1];
1609 struct sockaddr_in6 pkt_src[1];
1610 ssize_t nread;
1611 size_t pktlen;
1612
df655593
DL
1613 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1614 &pim->t_gm_recv);
5e5034b0
DL
1615
1616 iov->iov_base = rxbuf;
1617 iov->iov_len = sizeof(rxbuf);
1618
1619 mh->msg_name = pkt_src;
1620 mh->msg_namelen = sizeof(pkt_src);
1621 mh->msg_control = cmsgbuf.buf;
1622 mh->msg_controllen = sizeof(cmsgbuf.buf);
1623 mh->msg_iov = iov;
1624 mh->msg_iovlen = array_size(iov);
1625 mh->msg_flags = 0;
1626
df655593 1627 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
5e5034b0 1628 if (nread <= 0) {
df655593
DL
1629 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1630 pim->gm_rx_drop_sys++;
5e5034b0
DL
1631 return;
1632 }
1633
1634 if ((size_t)nread > sizeof(rxbuf)) {
1635 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1636 iov->iov_len = nread;
1637 }
df655593 1638 nread = recvmsg(pim->gm_socket, mh, 0);
5e5034b0 1639 if (nread <= 0) {
df655593
DL
1640 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1641 pim->gm_rx_drop_sys++;
5e5034b0
DL
1642 goto out_free;
1643 }
1644
df655593
DL
1645 struct interface *ifp;
1646
1647 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1648 if (!ifp || !ifp->info)
1649 goto out_free;
1650
1651 struct pim_interface *pim_ifp = ifp->info;
1652 struct gm_if *gm_ifp = pim_ifp->mld;
1653
1654 if (!gm_ifp)
5e5034b0
DL
1655 goto out_free;
1656
1657 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1658 if (cmsg->cmsg_level != SOL_IPV6)
1659 continue;
1660
1661 switch (cmsg->cmsg_type) {
1662 case IPV6_PKTINFO:
1663 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1664 break;
1665 case IPV6_HOPOPTS:
1666 hopopts = CMSG_DATA(cmsg);
1667 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1668 break;
1669 case IPV6_HOPLIMIT:
1670 hoplimit = (int *)CMSG_DATA(cmsg);
1671 break;
1672 }
1673 }
1674
1675 if (!pktinfo || !hoplimit) {
1676 zlog_err(log_ifp(
1677 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
df655593 1678 pim->gm_rx_drop_sys++;
5e5034b0
DL
1679 goto out_free;
1680 }
1681
1682 if (*hoplimit != 1) {
1683 zlog_err(log_pkt_src("packet with hop limit != 1"));
aa2f9349
DL
1684 /* spoofing attempt => count on srcaddr counter */
1685 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1686 goto out_free;
1687 }
1688
1689 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1690 zlog_err(log_pkt_src(
1691 "packet without IPv6 Router Alert MLD option"));
aa2f9349 1692 gm_ifp->stats.rx_drop_ra++;
5e5034b0
DL
1693 goto out_free;
1694 }
1695
1696 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1697 /* reports from :: happen in normal operation for DAD, so
1698 * don't spam log messages about this
1699 */
1700 goto out_free;
1701
1702 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1703 zlog_warn(log_pkt_src("packet from invalid source address"));
aa2f9349 1704 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1705 goto out_free;
1706 }
1707
1708 pktlen = nread;
1709 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1710 zlog_warn(log_pkt_src("truncated packet"));
aa2f9349 1711 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1712 goto out_free;
1713 }
1714
1715 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1716 pktlen);
1717
1718out_free:
1719 if (iov->iov_base != rxbuf)
1720 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1721}
1722
1723static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1724 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1725{
1726 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1727 struct sockaddr_in6 dstaddr = {
1728 .sin6_family = AF_INET6,
1729 .sin6_scope_id = gm_ifp->ifp->ifindex,
1730 };
1731 struct {
1732 struct icmp6_plain_hdr hdr;
1733 struct mld_v2_query_hdr v2_query;
1734 } query = {
1735 /* clang-format off */
1736 .hdr = {
1737 .icmp6_type = ICMP6_MLD_QUERY,
1738 .icmp6_code = 0,
1739 },
1740 .v2_query = {
1741 .grp = grp,
1742 },
1743 /* clang-format on */
1744 };
1745 struct ipv6_ph ph6 = {
1746 .src = pim_ifp->ll_lowest,
1747 .ulpl = htons(sizeof(query)),
1748 .next_hdr = IPPROTO_ICMPV6,
1749 };
1750 union {
df655593
DL
1751 char buf[CMSG_SPACE(8) /* hop options */ +
1752 CMSG_SPACE(sizeof(struct in6_pktinfo))];
5e5034b0
DL
1753 struct cmsghdr align;
1754 } cmsg = {};
1755 struct cmsghdr *cmh;
1756 struct msghdr mh[1] = {};
1757 struct iovec iov[3];
1758 size_t iov_len;
1759 ssize_t ret, expect_ret;
1760 uint8_t *dp;
df655593 1761 struct in6_pktinfo *pktinfo;
5e5034b0
DL
1762
1763 if (if_is_loopback(gm_ifp->ifp)) {
1764 /* Linux is a bit odd with multicast on loopback */
1765 ph6.src = in6addr_loopback;
1766 dstaddr.sin6_addr = in6addr_loopback;
1767 } else if (pim_addr_is_any(grp))
1768 dstaddr.sin6_addr = gm_all_hosts;
1769 else
1770 dstaddr.sin6_addr = grp;
1771
1772 query.v2_query.max_resp_code =
1773 mld_max_resp_encode(gm_ifp->cur_max_resp);
1774 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1775 if (s_bit)
1776 query.v2_query.flags |= 0x08;
1777 query.v2_query.qqic =
1778 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1779 query.v2_query.n_src = htons(n_srcs);
1780
1781 ph6.dst = dstaddr.sin6_addr;
1782
1783 /* ph6 not included in sendmsg */
1784 iov[0].iov_base = &ph6;
1785 iov[0].iov_len = sizeof(ph6);
1786 iov[1].iov_base = &query;
1787 if (gm_ifp->cur_version == GM_MLDV1) {
1788 iov_len = 2;
1789 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1790 } else if (!n_srcs) {
1791 iov_len = 2;
1792 iov[1].iov_len = sizeof(query);
1793 } else {
1794 iov[1].iov_len = sizeof(query);
1795 iov[2].iov_base = (void *)srcs;
1796 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1797 iov_len = 3;
1798 }
1799
1800 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1801
1802 if (PIM_DEBUG_IGMP_PACKETS)
1803 zlog_debug(
1804 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1805 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1806
1807 mh->msg_name = &dstaddr;
1808 mh->msg_namelen = sizeof(dstaddr);
1809 mh->msg_iov = iov + 1;
1810 mh->msg_iovlen = iov_len - 1;
1811 mh->msg_control = &cmsg;
1812 mh->msg_controllen = sizeof(cmsg.buf);
df655593 1813
5e5034b0
DL
1814 cmh = CMSG_FIRSTHDR(mh);
1815 cmh->cmsg_level = IPPROTO_IPV6;
1816 cmh->cmsg_type = IPV6_HOPOPTS;
1817 cmh->cmsg_len = CMSG_LEN(8);
1818 dp = CMSG_DATA(cmh);
1819 *dp++ = 0; /* next header */
1820 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1821 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1822 *dp++ = 2; /* length */
1823 *dp++ = 0; /* value (2 bytes) */
1824 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1825 *dp++ = 0; /* pad0 */
1826 *dp++ = 0; /* pad0 */
1827
df655593
DL
1828 cmh = CMSG_NXTHDR(mh, cmh);
1829 cmh->cmsg_level = IPPROTO_IPV6;
1830 cmh->cmsg_type = IPV6_PKTINFO;
1831 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1832 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1833 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1834 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1835
5e5034b0
DL
1836 expect_ret = iov[1].iov_len;
1837 if (iov_len == 3)
1838 expect_ret += iov[2].iov_len;
1839
1840 frr_with_privs (&pimd_privs) {
df655593 1841 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
5e5034b0
DL
1842 }
1843
aa2f9349 1844 if (ret != expect_ret) {
5e5034b0 1845 zlog_warn(log_ifp("failed to send query: %m"));
aa2f9349
DL
1846 gm_ifp->stats.tx_query_fail++;
1847 } else {
1848 if (gm_ifp->cur_version == GM_MLDV1) {
1849 if (pim_addr_is_any(grp))
1850 gm_ifp->stats.tx_query_old_general++;
1851 else
1852 gm_ifp->stats.tx_query_old_group++;
1853 } else {
1854 if (pim_addr_is_any(grp))
1855 gm_ifp->stats.tx_query_new_general++;
1856 else if (!n_srcs)
1857 gm_ifp->stats.tx_query_new_group++;
1858 else
1859 gm_ifp->stats.tx_query_new_groupsrc++;
1860 }
1861 }
5e5034b0
DL
1862}
1863
1864static void gm_t_query(struct thread *t)
1865{
1866 struct gm_if *gm_ifp = THREAD_ARG(t);
1867 unsigned int timer_ms = gm_ifp->cur_query_intv;
1868
1869 if (gm_ifp->n_startup) {
1870 timer_ms /= 4;
1871 gm_ifp->n_startup--;
1872 }
1873
1874 thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1875 &gm_ifp->t_query);
1876
1877 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1878}
1879
1880static void gm_t_sg_query(struct thread *t)
1881{
1882 struct gm_sg *sg = THREAD_ARG(t);
1883
1884 gm_trigger_specific(sg);
1885}
1886
1887/* S,G specific queries (triggered by a member leaving) get a little slack
1888 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1889 */
1890static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1891{
1892 struct gm_if *gm_ifp = pend_gsq->iface;
1893
1894 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1895 pend_gsq->s_bit);
1896
1897 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1898 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1899}
1900
1901static void gm_t_gsq_pend(struct thread *t)
1902{
1903 struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
1904
1905 gm_send_specific(pend_gsq);
1906}
1907
1908static void gm_trigger_specific(struct gm_sg *sg)
1909{
1910 struct gm_if *gm_ifp = sg->iface;
1911 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1912 struct gm_gsq_pending *pend_gsq, ref = {};
1913
1914 sg->n_query--;
1915 if (sg->n_query)
1916 thread_add_timer_msec(router->master, gm_t_sg_query, sg,
1917 gm_ifp->cur_query_intv_trig,
1918 &sg->t_sg_query);
1919
1920 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1921 return;
df655593 1922 if (gm_ifp->pim->gm_socket == -1)
5e5034b0
DL
1923 return;
1924
1925 if (PIM_DEBUG_IGMP_TRACE)
1926 zlog_debug(log_sg(sg, "triggered query"));
1927
1928 if (pim_addr_is_any(sg->sgaddr.src)) {
1929 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1930 return;
1931 }
1932
1933 ref.grp = sg->sgaddr.grp;
1934 ref.s_bit = sg->query_sbit;
1935
1936 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1937 if (!pend_gsq) {
1938 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1939 pend_gsq->grp = sg->sgaddr.grp;
1940 pend_gsq->s_bit = sg->query_sbit;
1941 pend_gsq->iface = gm_ifp;
1942 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1943
1944 thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1945 &gm_ifp->cfg_timing_fuzz,
1946 &pend_gsq->t_send);
1947 }
1948
1949 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1950
1951 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1952 pend_gsq->n_src++;
1953
1954 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1955 THREAD_OFF(pend_gsq->t_send);
1956 gm_send_specific(pend_gsq);
1957 pend_gsq = NULL;
1958 }
1959}
1960
df655593 1961static void gm_vrf_socket_incref(struct pim_instance *pim)
5e5034b0 1962{
df655593 1963 struct vrf *vrf = pim->vrf;
5e5034b0
DL
1964 int ret, intval;
1965 struct icmp6_filter filter[1];
1966
df655593
DL
1967 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1968 return;
5e5034b0
DL
1969
1970 ICMP6_FILTER_SETBLOCKALL(filter);
1971 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1972 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1973 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1974 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1975
1976 frr_with_privs (&pimd_privs) {
df655593
DL
1977 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1978 vrf->vrf_id, vrf->name);
1979 if (pim->gm_socket < 0) {
1980 zlog_err("(VRF %s) could not create MLD socket: %m",
1981 vrf->name);
5e5034b0
DL
1982 return;
1983 }
1984
df655593
DL
1985 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
1986 filter, sizeof(filter));
5e5034b0 1987 if (ret)
df655593
DL
1988 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1989 vrf->name);
5e5034b0
DL
1990
1991 intval = 1;
df655593 1992 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
5e5034b0
DL
1993 &intval, sizeof(intval));
1994 if (ret)
df655593
DL
1995 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1996 vrf->name);
5e5034b0
DL
1997
1998 intval = 1;
df655593 1999 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
5e5034b0
DL
2000 &intval, sizeof(intval));
2001 if (ret)
df655593
DL
2002 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
2003 vrf->name);
5e5034b0
DL
2004
2005 intval = 1;
df655593 2006 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
5e5034b0
DL
2007 &intval, sizeof(intval));
2008 if (ret)
df655593
DL
2009 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2010 vrf->name);
5e5034b0
DL
2011
2012 intval = 1;
df655593 2013 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
5e5034b0
DL
2014 &intval, sizeof(intval));
2015 if (ret)
2016 zlog_err(
df655593
DL
2017 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2018 vrf->name);
5e5034b0
DL
2019
2020 intval = 1;
df655593 2021 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
5e5034b0
DL
2022 &intval, sizeof(intval));
2023 if (ret)
df655593
DL
2024 zlog_err(
2025 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2026 vrf->name);
5e5034b0
DL
2027
2028 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2029 * RX filtering in Linux. It only means "receive all groups
2030 * that something on the system has joined". To actually
2031 * receive *all* MLD packets - which is what we need -
2032 * multicast routing must be enabled on the interface. And
2033 * this only works for MLD packets specifically.
2034 *
2035 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2036 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2037 *
2038 * Also note that the code there explicitly checks for the IPv6
2039 * router alert MLD option (which is required by the RFC to be
2040 * on MLD packets.) That implies trying to support hosts which
2041 * erroneously don't add that option is just not possible.
2042 */
2043 intval = 1;
df655593 2044 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
5e5034b0
DL
2045 &intval, sizeof(intval));
2046 if (ret)
2047 zlog_info(
df655593
DL
2048 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2049 vrf->name);
2050 }
5e5034b0 2051
df655593
DL
2052 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2053 &pim->t_gm_recv);
2054}
2055
2056static void gm_vrf_socket_decref(struct pim_instance *pim)
2057{
2058 if (--pim->gm_socket_if_count)
2059 return;
2060
2061 THREAD_OFF(pim->t_gm_recv);
2062 close(pim->gm_socket);
2063 pim->gm_socket = -1;
2064}
2065
2066static void gm_start(struct interface *ifp)
2067{
2068 struct pim_interface *pim_ifp = ifp->info;
2069 struct gm_if *gm_ifp;
2070
2071 assert(pim_ifp);
2072 assert(pim_ifp->pim);
2073 assert(pim_ifp->mroute_vif_index >= 0);
2074 assert(!pim_ifp->mld);
2075
2076 gm_vrf_socket_incref(pim_ifp->pim);
2077
2078 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2079 gm_ifp->ifp = ifp;
2080 pim_ifp->mld = gm_ifp;
2081 gm_ifp->pim = pim_ifp->pim;
2082 monotime(&gm_ifp->started);
2083
2084 zlog_info(log_ifp("starting MLD"));
2085
2086 if (pim_ifp->mld_version == 1)
2087 gm_ifp->cur_version = GM_MLDV1;
2088 else
2089 gm_ifp->cur_version = GM_MLDV2;
2090
2091 /* hardcoded for dev without CLI */
2092 gm_ifp->cur_qrv = 2;
2093 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
2094 gm_ifp->cur_query_intv_trig = gm_ifp->cur_query_intv;
2095 gm_ifp->cur_max_resp = 250;
2096
2097 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2098 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2099
2100 gm_sgs_init(gm_ifp->sgs);
2101 gm_subscribers_init(gm_ifp->subscribers);
2102 gm_packet_expires_init(gm_ifp->expires);
2103 gm_grp_pends_init(gm_ifp->grp_pends);
2104 gm_gsq_pends_init(gm_ifp->gsq_pends);
2105
2106 frr_with_privs (&pimd_privs) {
5e5034b0 2107 struct ipv6_mreq mreq;
df655593 2108 int ret;
5e5034b0
DL
2109
2110 /* all-MLDv2 group */
2111 mreq.ipv6mr_multiaddr = gm_all_routers;
2112 mreq.ipv6mr_interface = ifp->ifindex;
df655593
DL
2113 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2114 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
5e5034b0
DL
2115 if (ret)
2116 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2117 ifp->name);
2118 }
5e5034b0
DL
2119}
2120
2121void gm_ifp_teardown(struct interface *ifp)
2122{
2123 struct pim_interface *pim_ifp = ifp->info;
2124 struct gm_if *gm_ifp;
2125 struct gm_packet_state *pkt;
2126 struct gm_grp_pending *pend_grp;
2127 struct gm_gsq_pending *pend_gsq;
2128 struct gm_subscriber *subscriber;
2129 struct gm_sg *sg;
2130
2131 if (!pim_ifp || !pim_ifp->mld)
2132 return;
2133
2134 gm_ifp = pim_ifp->mld;
2135 gm_ifp->stopping = true;
2136 if (PIM_DEBUG_IGMP_EVENTS)
2137 zlog_debug(log_ifp("MLD stop"));
2138
2139 THREAD_OFF(gm_ifp->t_query);
2140 THREAD_OFF(gm_ifp->t_other_querier);
5e5034b0
DL
2141 THREAD_OFF(gm_ifp->t_expire);
2142
df655593
DL
2143 frr_with_privs (&pimd_privs) {
2144 struct ipv6_mreq mreq;
2145 int ret;
2146
2147 /* all-MLDv2 group */
2148 mreq.ipv6mr_multiaddr = gm_all_routers;
2149 mreq.ipv6mr_interface = ifp->ifindex;
2150 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2151 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2152 if (ret)
2153 zlog_err(
2154 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2155 ifp->name);
5e5034b0
DL
2156 }
2157
df655593
DL
2158 gm_vrf_socket_decref(gm_ifp->pim);
2159
5e5034b0
DL
2160 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2161 gm_packet_drop(pkt, false);
2162
2163 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2164 THREAD_OFF(pend_grp->t_expire);
2165 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2166 }
2167
2168 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2169 THREAD_OFF(pend_gsq->t_send);
2170 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2171 }
2172
2173 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2174 THREAD_OFF(sg->t_sg_expire);
2175 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2176 &sg->sgaddr);
2177 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2178 &sg->sgaddr);
2179
2180 gm_sg_free(sg);
2181 }
2182
2183 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2184 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2185 &subscriber->addr);
2186 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2187 }
2188
2189 gm_grp_pends_fini(gm_ifp->grp_pends);
2190 gm_packet_expires_fini(gm_ifp->expires);
2191 gm_subscribers_fini(gm_ifp->subscribers);
2192 gm_sgs_fini(gm_ifp->sgs);
2193
2194 XFREE(MTYPE_GM_IFACE, gm_ifp);
2195 pim_ifp->mld = NULL;
2196}
2197
2198static void gm_update_ll(struct interface *ifp)
2199{
2200 struct pim_interface *pim_ifp = ifp->info;
f4e8f5d4 2201 struct gm_if *gm_ifp = pim_ifp->mld;
5e5034b0
DL
2202 bool was_querier;
2203
2204 was_querier =
2205 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2206 !pim_addr_is_any(gm_ifp->querier);
2207
2208 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2209 if (was_querier)
2210 gm_ifp->querier = pim_ifp->ll_lowest;
2211 THREAD_OFF(gm_ifp->t_query);
2212
2213 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2214 if (was_querier)
2215 zlog_info(log_ifp(
2216 "lost link-local address, stopping querier"));
2217 return;
2218 }
2219
2220 if (was_querier)
2221 zlog_info(log_ifp("new link-local %pPA while querier"),
2222 &gm_ifp->cur_ll_lowest);
2223 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2224 pim_addr_is_any(gm_ifp->querier)) {
2225 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2226 &gm_ifp->cur_ll_lowest);
2227 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2228 } else
2229 return;
2230
5e5034b0
DL
2231 gm_ifp->n_startup = gm_ifp->cur_qrv;
2232 thread_execute(router->master, gm_t_query, gm_ifp, 0);
2233}
2234
2235void gm_ifp_update(struct interface *ifp)
2236{
2237 struct pim_interface *pim_ifp = ifp->info;
2238 struct gm_if *gm_ifp;
2239 bool changed = false;
2240
2241 if (!pim_ifp)
2242 return;
2243 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2244 pim_ifp->mroute_vif_index < 0) {
2245 gm_ifp_teardown(ifp);
2246 return;
2247 }
2248
2249 if (!pim_ifp->mld)
2250 gm_start(ifp);
2251
2252 gm_ifp = pim_ifp->mld;
2253 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2254 gm_update_ll(ifp);
2255
2256 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2257
2258 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2259 gm_ifp->cur_query_intv = cfg_query_intv;
2260 gm_ifp->cur_query_intv_trig = cfg_query_intv;
2261 changed = true;
2262 }
2263
2264 enum gm_version cfg_version;
2265
2266 if (pim_ifp->mld_version == 1)
2267 cfg_version = GM_MLDV1;
2268 else
2269 cfg_version = GM_MLDV2;
2270 if (gm_ifp->cur_version != cfg_version) {
2271 gm_ifp->cur_version = cfg_version;
2272 changed = true;
2273 }
2274
2275 if (changed) {
2276 if (PIM_DEBUG_IGMP_TRACE)
2277 zlog_debug(log_ifp(
2278 "MLD querier config changed, querying"));
2279 gm_bump_querier(gm_ifp);
2280 }
2281}
2282
d2951219
DL
2283/*
2284 * CLI (show commands only)
2285 */
5e5034b0
DL
2286
2287#include "lib/command.h"
2288
2289#ifndef VTYSH_EXTRACT_PL
2290#include "pimd/pim6_mld_clippy.c"
2291#endif
2292
d2951219
DL
2293#define MLD_STR "Multicast Listener Discovery\n"
2294
2295static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2296 int *err)
2297{
2298 struct vrf *ret;
2299
2300 if (!vrf_str)
2301 return vrf_lookup_by_id(VRF_DEFAULT);
2302 if (!strcmp(vrf_str, "all"))
2303 return NULL;
2304 ret = vrf_lookup_by_name(vrf_str);
2305 if (ret)
2306 return ret;
2307
2308 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2309 *err = CMD_WARNING;
2310 return NULL;
2311}
2312
2313static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2314{
2315 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2316 struct gm_if *gm_ifp;
2317 bool querier;
2318 size_t i;
2319
2320 if (!pim_ifp) {
2321 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2322 return;
2323 }
2324
2325 gm_ifp = pim_ifp->mld;
2326 if (!gm_ifp) {
2327 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2328 return;
2329 }
2330
2331 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2332
2333 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2334 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2335 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2336 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2337 querier ? " (this system)" : "");
2338 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2339 vty_out(vty, " Other querier timer: %pTH\n",
2340 gm_ifp->t_other_querier);
2341 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2342 vty_out(vty, " Query interval: %ums\n",
2343 gm_ifp->cur_query_intv);
2344 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2345 vty_out(vty, " Last member query intv.: %ums\n",
2346 gm_ifp->cur_query_intv_trig);
2347 vty_out(vty, " %u expiry timers from general queries:\n",
2348 gm_ifp->n_pending);
2349 for (i = 0; i < gm_ifp->n_pending; i++) {
2350 struct gm_general_pending *p = &gm_ifp->pending[i];
2351
2352 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2353 &p->query, &p->expiry);
2354 }
2355 vty_out(vty, " %zu expiry timers from *,G queries\n",
2356 gm_grp_pends_count(gm_ifp->grp_pends));
2357 vty_out(vty, " %zu expiry timers from S,G queries\n",
2358 gm_gsq_pends_count(gm_ifp->gsq_pends));
2359 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2360 gm_sgs_count(gm_ifp->sgs),
2361 gm_subscribers_count(gm_ifp->subscribers),
2362 gm_packet_expires_count(gm_ifp->expires));
2363 vty_out(vty, "\n");
2364}
2365
2366static void gm_show_if_one(struct vty *vty, struct interface *ifp,
2367 json_object *js_if)
2368{
2369 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2370 struct gm_if *gm_ifp = pim_ifp->mld;
2371 bool querier;
2372
2373 if (!gm_ifp) {
2374 if (js_if)
2375 json_object_string_add(js_if, "state", "down");
2376 else
2377 vty_out(vty, "%-16s %5s\n", ifp->name, "down");
2378 return;
2379 }
2380
2381 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2382
2383 if (js_if) {
2384 json_object_string_add(js_if, "state", "up");
2385 json_object_string_addf(js_if, "version", "%d",
2386 gm_ifp->cur_version);
2387 json_object_string_addf(js_if, "upTime", "%pTVMs",
2388 &gm_ifp->started);
2389 json_object_boolean_add(js_if, "querier", querier);
2390 json_object_string_addf(js_if, "querierIp", "%pPA",
2391 &gm_ifp->querier);
2392 if (querier)
2393 json_object_string_addf(js_if, "queryTimer", "%pTH",
2394 gm_ifp->t_query);
2395 else
2396 json_object_string_addf(js_if, "otherQuerierTimer",
2397 "%pTH",
2398 gm_ifp->t_other_querier);
2399 } else {
2400 vty_out(vty, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n",
2401 ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier,
2402 querier ? "query" : "other",
2403 querier ? gm_ifp->t_query : gm_ifp->t_other_querier,
2404 &gm_ifp->started);
2405 }
2406}
2407
2408static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2409 bool detail, json_object *js)
2410{
2411 struct interface *ifp;
2412 json_object *js_vrf;
2413
2414 if (js) {
2415 js_vrf = json_object_new_object();
2416 json_object_object_add(js, vrf->name, js_vrf);
2417 }
2418
2419 FOR_ALL_INTERFACES (vrf, ifp) {
2420 json_object *js_if = NULL;
2421
2422 if (ifname && strcmp(ifp->name, ifname))
2423 continue;
2424 if (detail && !js) {
2425 gm_show_if_one_detail(vty, ifp);
2426 continue;
2427 }
2428
2429 if (!ifp->info)
2430 continue;
2431 if (js) {
2432 js_if = json_object_new_object();
2433 json_object_object_add(js_vrf, ifp->name, js_if);
2434 }
2435
2436 gm_show_if_one(vty, ifp, js_if);
2437 }
2438}
2439
2440static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2441 bool detail, json_object *js)
2442{
2443 if (!js && !detail)
2444 vty_out(vty, "%-16s %-5s V %-25s %-18s %s\n", "Interface",
2445 "State", "Querier", "Timer", "Uptime");
2446
2447 if (vrf)
2448 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2449 else
2450 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2451 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2452}
2453
2454DEFPY(gm_show_interface,
2455 gm_show_interface_cmd,
2456 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME] [detail$detail|json$json]",
2457 DEBUG_STR
2458 SHOW_STR
2459 IPV6_STR
2460 MLD_STR
2461 VRF_FULL_CMD_HELP_STR
2462 "MLD interface information\n"
2463 "Detailed output\n"
2464 JSON_STR)
2465{
2466 int ret = CMD_SUCCESS;
2467 struct vrf *vrf;
2468 json_object *js = NULL;
2469
2470 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2471 if (ret != CMD_SUCCESS)
2472 return ret;
2473
2474 if (json)
2475 js = json_object_new_object();
2476 gm_show_if(vty, vrf, ifname, !!detail, js);
2477 return vty_json(vty, js);
2478}
2479
2480static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2481 json_object *js_if)
2482{
2483 struct gm_if_stats *stats = &gm_ifp->stats;
2484 /* clang-format off */
2485 struct {
2486 const char *text;
2487 const char *js_key;
2488 uint64_t *val;
2489 } *item, items[] = {
2490 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2491 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2492 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2493
2494 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2495 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2496 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2497 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2498 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2499 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2500
2501 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2502 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2503 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2504 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2505 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2506 { "TX errors", "txErrors", &stats->tx_query_fail },
2507
d2951219
DL
2508 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2509 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2510 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2511 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2512 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2513 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2514 };
2515 /* clang-format on */
2516
2517 for (item = items; item < items + array_size(items); item++) {
2518 if (js_if)
2519 json_object_int_add(js_if, item->js_key, *item->val);
2520 else
2521 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2522 *item->val);
2523 }
2524}
2525
2526static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2527 const char *ifname, json_object *js)
2528{
2529 struct interface *ifp;
2530 json_object *js_vrf;
2531
2532 if (js) {
2533 js_vrf = json_object_new_object();
2534 json_object_object_add(js, vrf->name, js_vrf);
2535 }
2536
2537 FOR_ALL_INTERFACES (vrf, ifp) {
2538 struct pim_interface *pim_ifp;
2539 struct gm_if *gm_ifp;
2540 json_object *js_if = NULL;
2541
2542 if (ifname && strcmp(ifp->name, ifname))
2543 continue;
2544
2545 if (!ifp->info)
2546 continue;
2547 pim_ifp = ifp->info;
2548 if (!pim_ifp->mld)
2549 continue;
2550 gm_ifp = pim_ifp->mld;
2551
2552 if (js) {
2553 js_if = json_object_new_object();
2554 json_object_object_add(js_vrf, ifp->name, js_if);
2555 } else {
2556 vty_out(vty, "Interface: %s\n", ifp->name);
2557 }
2558 gm_show_stats_one(vty, gm_ifp, js_if);
2559 if (!js)
2560 vty_out(vty, "\n");
2561 }
2562}
2563
2564DEFPY(gm_show_interface_stats,
2565 gm_show_interface_stats_cmd,
2566 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2567 SHOW_STR
2568 IPV6_STR
2569 MLD_STR
2570 VRF_FULL_CMD_HELP_STR
2571 "MLD statistics\n"
2572 INTERFACE_STR
2573 "Interface name\n"
2574 JSON_STR)
2575{
2576 int ret = CMD_SUCCESS;
2577 struct vrf *vrf;
2578 json_object *js = NULL;
2579
2580 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2581 if (ret != CMD_SUCCESS)
2582 return ret;
2583
2584 if (json)
2585 js = json_object_new_object();
2586
2587 if (vrf)
2588 gm_show_stats_vrf(vty, vrf, ifname, js);
2589 else
2590 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2591 gm_show_stats_vrf(vty, vrf, ifname, js);
2592 return vty_json(vty, js);
2593}
2594
2595static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2596 const struct prefix_ipv6 *groups,
2597 const struct prefix_ipv6 *sources, bool detail,
2598 json_object *js_if)
2599{
2600 struct gm_sg *sg, *sg_start;
2601 json_object *js_group = NULL;
2602 pim_addr js_grpaddr = PIMADDR_ANY;
2603 struct gm_subscriber sub_ref = {}, *sub_untracked;
2604
2605 if (groups) {
2606 struct gm_sg sg_ref = {};
2607
2608 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2609 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2610 } else
2611 sg_start = gm_sgs_first(gm_ifp->sgs);
2612
2613 sub_ref.addr = gm_dummy_untracked;
2614 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2615 /* NB: sub_untracked may be NULL if no untracked joins exist */
2616
2617 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2618 struct timeval *recent = NULL, *untracked = NULL;
2619 json_object *js_src;
2620
2621 if (groups) {
2622 struct prefix grp_p;
2623
2624 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2625 if (!prefix_match(groups, &grp_p))
2626 break;
2627 }
2628
2629 if (sources) {
2630 struct prefix src_p;
2631
2632 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2633 if (!prefix_match(sources, &src_p))
2634 continue;
2635 }
2636
2637 if (sg->most_recent) {
2638 struct gm_packet_state *packet;
2639
2640 packet = gm_packet_sg2state(sg->most_recent);
2641 recent = &packet->received;
2642 }
2643
2644 if (sub_untracked) {
2645 struct gm_packet_state *packet;
2646 struct gm_packet_sg *item;
2647
2648 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2649 if (item) {
2650 packet = gm_packet_sg2state(item);
2651 untracked = &packet->received;
2652 }
2653 }
2654
2655 if (!js_if) {
2656 FMT_NSTD_BEGIN; /* %.0p */
2657 vty_out(vty,
2658 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2659 &sg->sgaddr.grp, &sg->sgaddr.src,
2660 gm_states[sg->state], recent, untracked,
2661 &sg->created);
2662
2663 if (!detail)
2664 continue;
2665
2666 struct gm_packet_sg *item;
2667 struct gm_packet_state *packet;
2668
2669 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2670 packet = gm_packet_sg2state(item);
2671
2672 if (packet->subscriber == sub_untracked)
2673 continue;
2674 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2675 &packet->subscriber->addr, "(JOIN)",
2676 &packet->received);
2677 }
2678 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2679 packet = gm_packet_sg2state(item);
2680
2681 if (packet->subscriber == sub_untracked)
2682 continue;
2683 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2684 &packet->subscriber->addr, "(PRUNE)",
2685 &packet->received);
2686 }
2687 FMT_NSTD_END; /* %.0p */
2688 continue;
2689 }
2690 /* if (js_if) */
2691
2692 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2693 js_group = json_object_new_object();
2694 json_object_object_addf(js_if, js_group, "%pPA",
2695 &sg->sgaddr.grp);
2696 js_grpaddr = sg->sgaddr.grp;
2697 }
2698
2699 js_src = json_object_new_object();
2700 json_object_object_addf(js_group, js_src, "%pPA",
2701 &sg->sgaddr.src);
2702
2703 json_object_string_add(js_src, "state", gm_states[sg->state]);
2704 json_object_string_addf(js_src, "created", "%pTVMs",
2705 &sg->created);
2706 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2707
2708 if (untracked)
2709 json_object_string_addf(js_src, "untrackedLastSeen",
2710 "%pTVMs", untracked);
2711 if (!detail)
2712 continue;
2713
2714 json_object *js_subs;
2715 struct gm_packet_sg *item;
2716 struct gm_packet_state *packet;
2717
2718 js_subs = json_object_new_object();
2719 json_object_object_add(js_src, "joinedBy", js_subs);
2720 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2721 packet = gm_packet_sg2state(item);
2722 if (packet->subscriber == sub_untracked)
2723 continue;
2724
2725 json_object *js_sub;
2726
2727 js_sub = json_object_new_object();
2728 json_object_object_addf(js_subs, js_sub, "%pPA",
2729 &packet->subscriber->addr);
2730 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2731 &packet->received);
2732 }
2733
2734 js_subs = json_object_new_object();
2735 json_object_object_add(js_src, "prunedBy", js_subs);
2736 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2737 packet = gm_packet_sg2state(item);
2738 if (packet->subscriber == sub_untracked)
2739 continue;
2740
2741 json_object *js_sub;
2742
2743 js_sub = json_object_new_object();
2744 json_object_object_addf(js_subs, js_sub, "%pPA",
2745 &packet->subscriber->addr);
2746 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2747 &packet->received);
2748 }
2749 }
2750}
2751
2752static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2753 const char *ifname,
2754 const struct prefix_ipv6 *groups,
2755 const struct prefix_ipv6 *sources, bool detail,
2756 json_object *js)
2757{
2758 struct interface *ifp;
2759 json_object *js_vrf;
2760
2761 if (js) {
2762 js_vrf = json_object_new_object();
2763 json_object_object_add(js, vrf->name, js_vrf);
2764 }
2765
2766 FOR_ALL_INTERFACES (vrf, ifp) {
2767 struct pim_interface *pim_ifp;
2768 struct gm_if *gm_ifp;
2769 json_object *js_if = NULL;
2770
2771 if (ifname && strcmp(ifp->name, ifname))
2772 continue;
2773
2774 if (!ifp->info)
2775 continue;
2776 pim_ifp = ifp->info;
2777 if (!pim_ifp->mld)
2778 continue;
2779 gm_ifp = pim_ifp->mld;
2780
2781 if (js) {
2782 js_if = json_object_new_object();
2783 json_object_object_add(js_vrf, ifp->name, js_if);
2784 }
2785
2786 if (!js && !ifname)
2787 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2788
2789 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2790 }
2791}
2792
2793DEFPY(gm_show_interface_joins,
2794 gm_show_interface_joins_cmd,
2795 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2796 SHOW_STR
2797 IPV6_STR
2798 MLD_STR
2799 VRF_FULL_CMD_HELP_STR
2800 "MLD joined groups & sources\n"
2801 INTERFACE_STR
2802 "Interface name\n"
2803 "Limit output to group range\n"
2804 "Show groups covered by this prefix\n"
2805 "Limit output to source range\n"
2806 "Show sources covered by this prefix\n"
2807 "Show details, including tracked receivers\n"
2808 JSON_STR)
2809{
2810 int ret = CMD_SUCCESS;
2811 struct vrf *vrf;
2812 json_object *js = NULL;
2813
2814 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2815 if (ret != CMD_SUCCESS)
2816 return ret;
2817
2818 if (json)
2819 js = json_object_new_object();
2820 else
2821 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2822 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2823
2824 if (vrf)
2825 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2826 js);
2827 else
2828 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2829 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2830 !!detail, js);
2831 return vty_json(vty, js);
2832}
2833
5e5034b0
DL
2834DEFPY(gm_debug_show,
2835 gm_debug_show_cmd,
2836 "debug show mld interface IFNAME",
2837 DEBUG_STR
2838 SHOW_STR
2839 "MLD"
2840 INTERFACE_STR
2841 "interface name")
2842{
2843 struct interface *ifp;
2844 struct pim_interface *pim_ifp;
2845 struct gm_if *gm_ifp;
2846
2847 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
2848 if (!ifp) {
2849 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
2850 return CMD_WARNING;
2851 }
2852
2853 pim_ifp = ifp->info;
2854 if (!pim_ifp) {
2855 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
2856 return CMD_WARNING;
2857 }
2858
2859 gm_ifp = pim_ifp->mld;
2860 if (!gm_ifp) {
2861 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
2862 return CMD_WARNING;
2863 }
2864
2865 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
2866 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
2867 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
2868 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
5e5034b0
DL
2869 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
2870
2871 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
2872 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
2873 int64_t query, expiry;
2874
2875 query = monotime_since(&gm_ifp->pending[i].query, NULL);
2876 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
2877
2878 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
2879 i, query / 1000, expiry / 1000);
2880 }
2881
2882 struct gm_sg *sg;
2883 struct gm_packet_state *pkt;
2884 struct gm_packet_sg *item;
2885 struct gm_subscriber *subscriber;
2886
2887 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
2888 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2889 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
2890 sg->t_sg_expire);
2891
2892 vty_out(vty, "\t @pos:%zu\n",
2893 gm_packet_sg_subs_count(sg->subs_positive));
2894 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2895 pkt = gm_packet_sg2state(item);
2896
2897 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
2898 item->is_src ? "S" : "",
2899 item->is_excl ? "E" : "",
2900 &pkt->subscriber->addr, pkt->subscriber, pkt,
2901 item->offset);
2902
2903 assert(item->sg == sg);
2904 }
2905 vty_out(vty, "\t @neg:%zu\n",
2906 gm_packet_sg_subs_count(sg->subs_negative));
2907 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2908 pkt = gm_packet_sg2state(item);
2909
2910 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
2911 item->is_src ? "S" : "",
2912 item->is_excl ? "E" : "",
2913 &pkt->subscriber->addr, pkt->subscriber, pkt,
2914 item->offset);
2915
2916 assert(item->sg == sg);
2917 }
2918 }
2919
2920 vty_out(vty, "\n%zu subscribers:\n",
2921 gm_subscribers_count(gm_ifp->subscribers));
2922 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
2923 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
2924 subscriber, gm_packets_count(subscriber->packets));
2925
2926 frr_each (gm_packets, subscriber->packets, pkt) {
2927 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
2928 pkt,
2929 monotime_since(&pkt->received, NULL) *
2930 0.000001f,
2931 pkt->n_active, pkt->n_sg);
2932
2933 for (size_t i = 0; i < pkt->n_sg; i++) {
2934 item = pkt->items + i;
2935
2936 vty_out(vty, "\t\t[%zu]", i);
2937
2938 if (!item->sg) {
2939 vty_out(vty, " inactive\n");
2940 continue;
2941 }
2942
2943 vty_out(vty, " %s%s %pSG nE=%u\n",
2944 item->is_src ? "S" : "",
2945 item->is_excl ? "E" : "",
2946 &item->sg->sgaddr, item->n_exclude);
2947 }
2948 }
2949 }
2950
2951 return CMD_SUCCESS;
2952}
2953
2954DEFPY(gm_debug_iface_cfg,
2955 gm_debug_iface_cfg_cmd,
2956 "debug ipv6 mld {"
2957 "robustness (0-7)|"
2958 "query-max-response-time (1-8387584)"
2959 "}",
2960 DEBUG_STR
2961 IPV6_STR
2962 "Multicast Listener Discovery\n"
2963 "QRV\nQRV\n"
2964 "maxresp\nmaxresp\n")
2965{
2966 VTY_DECLVAR_CONTEXT(interface, ifp);
2967 struct pim_interface *pim_ifp;
2968 struct gm_if *gm_ifp;
2969 bool changed = false;
2970
2971 pim_ifp = ifp->info;
2972 if (!pim_ifp) {
2973 vty_out(vty, "%% no PIM state for interface %pSQq\n",
2974 ifp->name);
2975 return CMD_WARNING;
2976 }
2977 gm_ifp = pim_ifp->mld;
2978 if (!gm_ifp) {
2979 vty_out(vty, "%% no MLD state for interface %pSQq\n",
2980 ifp->name);
2981 return CMD_WARNING;
2982 }
2983
2984 if (robustness_str && gm_ifp->cur_qrv != robustness) {
2985 gm_ifp->cur_qrv = robustness;
2986 changed = true;
2987 }
2988 if (query_max_response_time_str &&
2989 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
2990 gm_ifp->cur_max_resp = query_max_response_time;
2991 changed = true;
2992 }
2993
2994 if (changed) {
2995 vty_out(vty, "%% MLD querier config changed, bumping\n");
2996 gm_bump_querier(gm_ifp);
2997 }
2998 return CMD_SUCCESS;
2999}
3000
3001void gm_cli_init(void);
3002
3003void gm_cli_init(void)
3004{
d2951219
DL
3005 install_element(VIEW_NODE, &gm_show_interface_cmd);
3006 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3007 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
3008
5e5034b0
DL
3009 install_element(VIEW_NODE, &gm_debug_show_cmd);
3010 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3011}