]> git.proxmox.com Git - mirror_frr.git/blame - pimd/pim6_mld.c
lib: add CMD_ATTR_NOSH
[mirror_frr.git] / pimd / pim6_mld.c
CommitLineData
5e5034b0
DL
1/*
2 * PIMv6 MLD querier
3 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20/*
21 * keep pim6_mld.h open when working on this code. Most data structures are
22 * commented in the header.
23 *
24 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
25 * that this code will replace the old IGMP querier at some point.
26 */
27
28#include <zebra.h>
29#include <netinet/ip6.h>
30
31#include "lib/memory.h"
32#include "lib/jhash.h"
33#include "lib/prefix.h"
34#include "lib/checksum.h"
35#include "lib/thread.h"
cdc1b770 36#include "termtable.h"
5e5034b0
DL
37
38#include "pimd/pim6_mld.h"
39#include "pimd/pim6_mld_protocol.h"
40#include "pimd/pim_memory.h"
41#include "pimd/pim_instance.h"
42#include "pimd/pim_iface.h"
cdc1b770
SG
43#include "pimd/pim6_cmd.h"
44#include "pimd/pim_cmd_common.h"
5e5034b0
DL
45#include "pimd/pim_util.h"
46#include "pimd/pim_tib.h"
47#include "pimd/pimd.h"
48
49#ifndef IPV6_MULTICAST_ALL
50#define IPV6_MULTICAST_ALL 29
51#endif
52
53DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
54DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
55DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
56DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
57DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
58DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
59DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
60
61static void gm_t_query(struct thread *t);
62static void gm_trigger_specific(struct gm_sg *sg);
63static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
64 struct timeval expire_wait);
65
66/* shorthand for log messages */
67#define log_ifp(msg) \
68 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
69#define log_pkt_src(msg) \
70 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
71 &pkt_src->sin6_addr
72#define log_sg(sg, msg) \
73 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
74 sg->iface->ifp->name, &sg->sgaddr
75
76/* clang-format off */
77#if PIM_IPV == 6
78static const pim_addr gm_all_hosts = {
79 .s6_addr = {
80 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
82 },
83};
84static const pim_addr gm_all_routers = {
85 .s6_addr = {
86 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
88 },
89};
90/* MLDv1 does not allow subscriber tracking due to report suppression
91 * hence, the source address is replaced with ffff:...:ffff
92 */
93static const pim_addr gm_dummy_untracked = {
94 .s6_addr = {
95 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
96 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
97 },
98};
99#else
100/* 224.0.0.1 */
101static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
102/* 224.0.0.22 */
103static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
104static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
105#endif
106/* clang-format on */
107
108#define IPV6_MULTICAST_SCOPE_LINK 2
109
110static inline uint8_t in6_multicast_scope(const pim_addr *addr)
111{
112 return addr->s6_addr[1] & 0xf;
113}
114
115static inline bool in6_multicast_nofwd(const pim_addr *addr)
116{
117 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
118}
119
120/*
121 * (S,G) -> subscriber,(S,G)
122 */
123
124static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
125 const struct gm_packet_sg *b)
126{
127 const struct gm_packet_state *s_a, *s_b;
128
129 s_a = gm_packet_sg2state(a);
130 s_b = gm_packet_sg2state(b);
131 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
132}
133
134DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
135 gm_packet_sg_cmp);
136
137static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
138 enum gm_sub_sense sense,
139 struct gm_subscriber *sub)
140{
141 struct {
142 struct gm_packet_state hdr;
143 struct gm_packet_sg item;
144 } ref = {
145 /* clang-format off */
146 .hdr = {
147 .subscriber = sub,
148 },
149 .item = {
150 .offset = 0,
151 },
152 /* clang-format on */
153 };
154
155 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
156}
157
158/*
159 * interface -> (*,G),pending
160 */
161
162static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
163 const struct gm_grp_pending *b)
164{
165 return IPV6_ADDR_CMP(&a->grp, &b->grp);
166}
167
168DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
169 gm_grp_pending_cmp);
170
171/*
172 * interface -> ([S1,S2,...],G),pending
173 */
174
175static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
176 const struct gm_gsq_pending *b)
177{
178 if (a->s_bit != b->s_bit)
179 return numcmp(a->s_bit, b->s_bit);
180
181 return IPV6_ADDR_CMP(&a->grp, &b->grp);
182}
183
184static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
185{
186 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
187
188 return jhash(&a->grp, sizeof(a->grp), seed);
189}
190
191DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
192 gm_gsq_pending_hash);
193
194/*
195 * interface -> (S,G)
196 */
197
198static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
199{
200 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
201}
202
203DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
204
205static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
206 pim_addr src)
207{
208 struct gm_sg ref = {};
209
210 ref.sgaddr.grp = grp;
211 ref.sgaddr.src = src;
212 return gm_sgs_find(gm_ifp->sgs, &ref);
213}
214
215static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
216 pim_addr src)
217{
218 struct gm_sg *ret, *prev;
219
220 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
221 ret->sgaddr.grp = grp;
222 ret->sgaddr.src = src;
223 ret->iface = gm_ifp;
224 prev = gm_sgs_add(gm_ifp->sgs, ret);
225
226 if (prev) {
227 XFREE(MTYPE_GM_SG, ret);
228 ret = prev;
229 } else {
aa2f9349 230 monotime(&ret->created);
5e5034b0
DL
231 gm_packet_sg_subs_init(ret->subs_positive);
232 gm_packet_sg_subs_init(ret->subs_negative);
233 }
234 return ret;
235}
236
237/*
238 * interface -> packets, sorted by expiry (because add_tail insert order)
239 */
240
241DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
242
243/*
244 * subscriber -> packets
245 */
246
247DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
248
249/*
250 * interface -> subscriber
251 */
252
253static int gm_subscriber_cmp(const struct gm_subscriber *a,
254 const struct gm_subscriber *b)
255{
256 return IPV6_ADDR_CMP(&a->addr, &b->addr);
257}
258
259static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
260{
261 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
262}
263
264DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
265 gm_subscriber_hash);
266
267static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
268 pim_addr addr)
269{
270 struct gm_subscriber ref = {}, *ret;
271
272 ref.addr = addr;
273 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
274 if (ret)
275 ret->refcount++;
276 return ret;
277}
278
279static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
280 pim_addr addr)
281{
282 struct gm_subscriber ref = {}, *ret;
283
284 ref.addr = addr;
285 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
286
287 if (!ret) {
288 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
289 ret->iface = gm_ifp;
290 ret->addr = addr;
291 ret->refcount = 1;
aa2f9349 292 monotime(&ret->created);
5e5034b0
DL
293 gm_packets_init(ret->packets);
294
295 gm_subscribers_add(gm_ifp->subscribers, ret);
296 }
297 return ret;
298}
299
300static void gm_subscriber_drop(struct gm_subscriber **subp)
301{
302 struct gm_subscriber *sub = *subp;
303 struct gm_if *gm_ifp;
304
305 if (!sub)
306 return;
307 gm_ifp = sub->iface;
308
309 *subp = NULL;
310 sub->refcount--;
311
312 if (sub->refcount)
313 return;
314
315 gm_subscribers_del(gm_ifp->subscribers, sub);
316 XFREE(MTYPE_GM_SUBSCRIBER, sub);
317}
318
319/****************************************************************************/
320
321/* bundle query timer values for combined v1/v2 handling */
322struct gm_query_timers {
323 unsigned int qrv;
324 unsigned int max_resp_ms;
325 unsigned int qqic_ms;
326
327 struct timeval fuzz;
328 struct timeval expire_wait;
329};
330
331static void gm_expiry_calc(struct gm_query_timers *timers)
332{
333 unsigned int expire =
334 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
335 ldiv_t exp_div = ldiv(expire, 1000);
336
337 timers->expire_wait.tv_sec = exp_div.quot;
338 timers->expire_wait.tv_usec = exp_div.rem * 1000;
339 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
340}
341
342static void gm_sg_free(struct gm_sg *sg)
343{
344 /* t_sg_expiry is handled before this is reached */
345 THREAD_OFF(sg->t_sg_query);
346 gm_packet_sg_subs_fini(sg->subs_negative);
347 gm_packet_sg_subs_fini(sg->subs_positive);
348 XFREE(MTYPE_GM_SG, sg);
349}
350
351/* clang-format off */
352static const char *const gm_states[] = {
353 [GM_SG_NOINFO] = "NOINFO",
354 [GM_SG_JOIN] = "JOIN",
355 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
356 [GM_SG_PRUNE] = "PRUNE",
357 [GM_SG_NOPRUNE] = "NOPRUNE",
358 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
359};
360/* clang-format on */
361
362CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
363/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
364 * joined. Whether we actually want/need to support this is a separate
365 * question - it is almost never used. In fact this is exactly what RFC5790
366 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
367 */
368
369static void gm_sg_update(struct gm_sg *sg, bool has_expired)
370{
371 struct gm_if *gm_ifp = sg->iface;
372 enum gm_sg_state prev, desired;
373 bool new_join;
374 struct gm_sg *grp = NULL;
375
376 if (!pim_addr_is_any(sg->sgaddr.src))
377 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
378 else
379 assert(sg->state != GM_SG_PRUNE);
380
381 if (gm_packet_sg_subs_count(sg->subs_positive)) {
382 desired = GM_SG_JOIN;
383 assert(!sg->t_sg_expire);
384 } else if ((sg->state == GM_SG_JOIN ||
385 sg->state == GM_SG_JOIN_EXPIRING) &&
386 !has_expired)
387 desired = GM_SG_JOIN_EXPIRING;
388 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
389 desired = GM_SG_NOINFO;
390 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
391 gm_packet_sg_subs_count(sg->subs_negative)) {
392 if ((sg->state == GM_SG_NOPRUNE ||
393 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
394 !has_expired)
395 desired = GM_SG_NOPRUNE_EXPIRING;
396 else
397 desired = GM_SG_PRUNE;
398 } else if (gm_packet_sg_subs_count(sg->subs_negative))
399 desired = GM_SG_NOPRUNE;
400 else
401 desired = GM_SG_NOINFO;
402
403 if (desired != sg->state && !gm_ifp->stopping) {
95b13dc5 404 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
405 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
406 gm_states[desired]);
407
408 if (desired == GM_SG_JOIN_EXPIRING ||
409 desired == GM_SG_NOPRUNE_EXPIRING) {
410 struct gm_query_timers timers;
411
412 timers.qrv = gm_ifp->cur_qrv;
413 timers.max_resp_ms = gm_ifp->cur_max_resp;
414 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
415 timers.fuzz = gm_ifp->cfg_timing_fuzz;
416
417 gm_expiry_calc(&timers);
418 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
419
420 THREAD_OFF(sg->t_sg_query);
51b4991f 421 sg->n_query = gm_ifp->cur_lmqc;
5e5034b0
DL
422 sg->query_sbit = false;
423 gm_trigger_specific(sg);
424 }
425 }
426 prev = sg->state;
427 sg->state = desired;
428
429 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
430 new_join = false;
431 else
432 new_join = gm_sg_state_want_join(desired);
433
434 if (new_join && !sg->tib_joined) {
435 /* this will retry if join previously failed */
436 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
437 gm_ifp->ifp, &sg->oil);
438 if (!sg->tib_joined)
439 zlog_warn(
440 "MLD join for %pSG%%%s not propagated into TIB",
441 &sg->sgaddr, gm_ifp->ifp->name);
442 else
443 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
444 gm_ifp->ifp->name);
445
446 } else if (sg->tib_joined && !new_join) {
447 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
448
449 sg->oil = NULL;
450 sg->tib_joined = false;
451 }
452
453 if (desired == GM_SG_NOINFO) {
454 assertf((!sg->t_sg_expire &&
455 !gm_packet_sg_subs_count(sg->subs_positive) &&
456 !gm_packet_sg_subs_count(sg->subs_negative)),
457 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
458 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
459 sg->t_sg_expire, gm_states[prev], gm_states[desired],
460 gm_packet_sg_subs_count(sg->subs_positive),
461 gm_packet_sg_subs_count(sg->subs_negative), grp);
462
a96d64b0 463 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
464 zlog_debug(log_sg(sg, "dropping"));
465
466 gm_sgs_del(gm_ifp->sgs, sg);
467 gm_sg_free(sg);
468 }
469}
470
471/****************************************************************************/
472
473/* the following bunch of functions deals with transferring state from
474 * received packets into gm_packet_state. As a reminder, the querier is
475 * structured to keep all items received in one packet together, since they
476 * will share expiry timers and thus allows efficient handling.
477 */
478
479static void gm_packet_free(struct gm_packet_state *pkt)
480{
481 gm_packet_expires_del(pkt->iface->expires, pkt);
482 gm_packets_del(pkt->subscriber->packets, pkt);
483 gm_subscriber_drop(&pkt->subscriber);
484 XFREE(MTYPE_GM_STATE, pkt);
485}
486
487static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
488 struct gm_sg *sg, bool is_excl,
489 bool is_src)
490{
491 struct gm_packet_sg *item;
492
493 assert(pkt->n_active < pkt->n_sg);
494
495 item = &pkt->items[pkt->n_active];
496 item->sg = sg;
497 item->is_excl = is_excl;
498 item->is_src = is_src;
499 item->offset = pkt->n_active;
500
501 pkt->n_active++;
502 return item;
503}
504
505static bool gm_packet_sg_drop(struct gm_packet_sg *item)
506{
507 struct gm_packet_state *pkt;
508 size_t i;
509
510 assert(item->sg);
511
512 pkt = gm_packet_sg2state(item);
513 if (item->sg->most_recent == item)
514 item->sg->most_recent = NULL;
515
516 for (i = 0; i < item->n_exclude; i++) {
517 struct gm_packet_sg *excl_item;
518
519 excl_item = item + 1 + i;
520 if (!excl_item->sg)
521 continue;
522
523 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
524 excl_item->sg = NULL;
525 pkt->n_active--;
526
527 assert(pkt->n_active > 0);
528 }
529
530 if (item->is_excl && item->is_src)
531 gm_packet_sg_subs_del(item->sg->subs_negative, item);
532 else
533 gm_packet_sg_subs_del(item->sg->subs_positive, item);
534 item->sg = NULL;
535 pkt->n_active--;
536
537 if (!pkt->n_active) {
538 gm_packet_free(pkt);
539 return true;
540 }
541 return false;
542}
543
544static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
545{
546 for (size_t i = 0; i < pkt->n_sg; i++) {
547 struct gm_sg *sg = pkt->items[i].sg;
548 bool deleted;
549
550 if (!sg)
551 continue;
552
a96d64b0 553 if (trace && PIM_DEBUG_GM_TRACE)
5e5034b0
DL
554 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
555 &pkt->subscriber->addr);
556 deleted = gm_packet_sg_drop(&pkt->items[i]);
557
558 gm_sg_update(sg, true);
559 if (deleted)
560 break;
561 }
562}
563
564static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
565 struct gm_subscriber *subscriber,
566 pim_addr grp, pim_addr *srcs,
567 size_t n_src, enum gm_sub_sense sense)
568{
569 struct gm_sg *sg;
570 struct gm_packet_sg *old_src;
571 size_t i;
572
573 for (i = 0; i < n_src; i++) {
574 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
575 if (!sg)
576 continue;
577
578 old_src = gm_packet_sg_find(sg, sense, subscriber);
579 if (!old_src)
580 continue;
581
582 gm_packet_sg_drop(old_src);
583 gm_sg_update(sg, false);
584 }
585}
586
587static void gm_sg_expiry_cancel(struct gm_sg *sg)
588{
a96d64b0 589 if (sg->t_sg_expire && PIM_DEBUG_GM_TRACE)
5e5034b0
DL
590 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
591 THREAD_OFF(sg->t_sg_expire);
592 sg->query_sbit = true;
593}
594
595/* first pass: process all changes resulting in removal of state:
596 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
597 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
598 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
599 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
600 * note *replacing* state is NOT considered *removing* state here
601 *
602 * everything else is thrown into pkt for creation of state in pass 2
603 */
604static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
605 struct mld_v2_rec_hdr *rechdr)
606{
607 /* NB: pkt->subscriber can be NULL here if the subscriber was not
608 * previously seen!
609 */
610 struct gm_subscriber *subscriber = pkt->subscriber;
611 struct gm_sg *grp;
612 struct gm_packet_sg *old_grp = NULL;
613 struct gm_packet_sg *item;
614 size_t n_src = ntohs(rechdr->n_src);
615 size_t j;
616 bool is_excl = false;
617
618 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
619 if (grp && subscriber)
620 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
621
622 assert(old_grp == NULL || old_grp->is_excl);
623
624 switch (rechdr->type) {
625 case MLD_RECTYPE_IS_EXCLUDE:
626 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
627 /* this always replaces or creates state */
628 is_excl = true;
629 if (!grp)
630 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
631
632 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
633 item->n_exclude = n_src;
634
635 /* [EXCL_INCL_SG_NOTE] referenced below
636 *
637 * in theory, we should drop any S,G that the host may have
638 * previously added in INCLUDE mode. In practice, this is both
639 * incredibly rare and entirely irrelevant. It only makes any
640 * difference if an S,G that the host previously had on the
641 * INCLUDE list is now on the blocked list for EXCLUDE, which
642 * we can cover in processing the S,G list in pass2_excl().
643 *
644 * Other S,G from the host are simply left to expire
645 * "naturally" through general expiry.
646 */
647 break;
648
649 case MLD_RECTYPE_IS_INCLUDE:
650 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
651 if (old_grp) {
652 /* INCLUDE has no *,G state, so old_grp here refers to
653 * previous EXCLUDE => delete it
654 */
655 gm_packet_sg_drop(old_grp);
656 gm_sg_update(grp, false);
657 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
658 }
659 break;
660
661 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
662 if (old_grp) {
663 /* remove S,Gs from EXCLUDE, and then we're done */
664 gm_packet_sg_remove_sources(pkt->iface, subscriber,
665 rechdr->grp, rechdr->srcs,
666 n_src, GM_SUB_NEG);
667 return;
668 }
669 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
670 * idential to IS_INCLUDE (because the list of sources in
671 * IS_INCLUDE is not exhaustive)
672 */
673 break;
674
675 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
676 if (old_grp) {
677 /* this is intentionally not implemented because it
678 * would be complicated as hell. we only take the list
679 * of blocked sources from full group state records
680 */
681 return;
682 }
683
684 if (subscriber)
685 gm_packet_sg_remove_sources(pkt->iface, subscriber,
686 rechdr->grp, rechdr->srcs,
687 n_src, GM_SUB_POS);
688 return;
689 }
690
691 for (j = 0; j < n_src; j++) {
692 struct gm_sg *sg;
693
694 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
695 if (!sg)
696 sg = gm_sg_make(pkt->iface, rechdr->grp,
697 rechdr->srcs[j]);
698
699 gm_packet_sg_setup(pkt, sg, is_excl, true);
700 }
701}
702
703/* second pass: creating/updating/refreshing state. All the items from the
704 * received packet have already been thrown into gm_packet_state.
705 */
706
707static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
708{
709 struct gm_packet_sg *item = &pkt->items[i];
710 struct gm_packet_sg *old = NULL;
711 struct gm_sg *sg = item->sg;
712
713 /* EXCLUDE state was already dropped in pass1 */
714 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
715
716 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
717 if (old)
718 gm_packet_sg_drop(old);
719
720 pkt->n_active++;
721 gm_packet_sg_subs_add(sg->subs_positive, item);
722
723 sg->most_recent = item;
724 gm_sg_expiry_cancel(sg);
725 gm_sg_update(sg, false);
726}
727
728static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
729{
730 struct gm_packet_sg *item = &pkt->items[offs];
731 struct gm_packet_sg *old_grp, *item_dup;
732 struct gm_sg *sg_grp = item->sg;
733 size_t i;
734
735 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
736 if (old_grp) {
737 for (i = 0; i < item->n_exclude; i++) {
738 struct gm_packet_sg *item_src, *old_src;
739
740 item_src = &pkt->items[offs + 1 + i];
741 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
742 pkt->subscriber);
743 if (old_src)
744 gm_packet_sg_drop(old_src);
745
746 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
747 * items left over if the host previously had INCLUDE
748 * mode going. Remove them here if we find any.
749 */
750 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
751 pkt->subscriber);
752 if (old_src)
753 gm_packet_sg_drop(old_src);
754 }
755
756 /* the previous loop has removed the S,G entries which are
757 * still excluded after this update. So anything left on the
758 * old item was previously excluded but is now included
759 * => need to trigger update on S,G
760 */
761 for (i = 0; i < old_grp->n_exclude; i++) {
762 struct gm_packet_sg *old_src;
763 struct gm_sg *old_sg_src;
764
765 old_src = old_grp + 1 + i;
766 old_sg_src = old_src->sg;
767 if (!old_sg_src)
768 continue;
769
770 gm_packet_sg_drop(old_src);
771 gm_sg_update(old_sg_src, false);
772 }
773
774 gm_packet_sg_drop(old_grp);
775 }
776
777 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
778 assert(!item_dup);
779 pkt->n_active++;
780
781 sg_grp->most_recent = item;
782 gm_sg_expiry_cancel(sg_grp);
783
784 for (i = 0; i < item->n_exclude; i++) {
785 struct gm_packet_sg *item_src;
786
787 item_src = &pkt->items[offs + 1 + i];
788 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
789 item_src);
790
791 if (item_dup)
792 item_src->sg = NULL;
793 else {
794 pkt->n_active++;
795 gm_sg_update(item_src->sg, false);
796 }
797 }
798
799 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
800 * to get lower PIM churn/flapping
801 */
802 gm_sg_update(sg_grp, false);
803}
804
805CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
806/* on receiving a query, we need to update our robustness/query interval to
807 * match, so we correctly process group/source specific queries after last
808 * member leaves
809 */
810
811static void gm_handle_v2_report(struct gm_if *gm_ifp,
812 const struct sockaddr_in6 *pkt_src, char *data,
813 size_t len)
814{
815 struct mld_v2_report_hdr *hdr;
816 size_t i, n_records, max_entries;
817 struct gm_packet_state *pkt;
818
819 if (len < sizeof(*hdr)) {
55eb347d 820 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
821 zlog_debug(log_pkt_src(
822 "malformed MLDv2 report (truncated header)"));
aa2f9349 823 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
824 return;
825 }
826
aa2f9349
DL
827 /* errors after this may at least partially process the packet */
828 gm_ifp->stats.rx_new_report++;
829
5e5034b0
DL
830 hdr = (struct mld_v2_report_hdr *)data;
831 data += sizeof(*hdr);
832 len -= sizeof(*hdr);
833
834 /* can't have more *,G and S,G items than there is space for ipv6
835 * addresses, so just use this to allocate temporary buffer
836 */
837 max_entries = len / sizeof(pim_addr);
838 pkt = XCALLOC(MTYPE_GM_STATE,
839 offsetof(struct gm_packet_state, items[max_entries]));
840 pkt->n_sg = max_entries;
841 pkt->iface = gm_ifp;
842 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
843
844 n_records = ntohs(hdr->n_records);
845
846 /* validate & remove state in v2_pass1() */
847 for (i = 0; i < n_records; i++) {
848 struct mld_v2_rec_hdr *rechdr;
849 size_t n_src, record_size;
850
851 if (len < sizeof(*rechdr)) {
852 zlog_warn(log_pkt_src(
853 "malformed MLDv2 report (truncated record header)"));
aa2f9349 854 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
855 break;
856 }
857
858 rechdr = (struct mld_v2_rec_hdr *)data;
859 data += sizeof(*rechdr);
860 len -= sizeof(*rechdr);
861
862 n_src = ntohs(rechdr->n_src);
863 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
864
865 if (len < record_size) {
866 zlog_warn(log_pkt_src(
867 "malformed MLDv2 report (truncated source list)"));
aa2f9349 868 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
869 break;
870 }
871 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
872 zlog_warn(
873 log_pkt_src(
874 "malformed MLDv2 report (invalid group %pI6)"),
875 &rechdr->grp);
aa2f9349 876 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
877 break;
878 }
879
880 data += record_size;
881 len -= record_size;
882
883 gm_handle_v2_pass1(pkt, rechdr);
884 }
885
886 if (!pkt->n_active) {
887 gm_subscriber_drop(&pkt->subscriber);
888 XFREE(MTYPE_GM_STATE, pkt);
889 return;
890 }
891
892 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
893 offsetof(struct gm_packet_state, items[pkt->n_active]));
894 pkt->n_sg = pkt->n_active;
895 pkt->n_active = 0;
896
897 monotime(&pkt->received);
898 if (!pkt->subscriber)
899 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
900 gm_packets_add_tail(pkt->subscriber->packets, pkt);
901 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
902
903 for (i = 0; i < pkt->n_sg; i++)
904 if (!pkt->items[i].is_excl)
905 gm_handle_v2_pass2_incl(pkt, i);
906 else {
907 gm_handle_v2_pass2_excl(pkt, i);
908 i += pkt->items[i].n_exclude;
909 }
910
911 if (pkt->n_active == 0)
912 gm_packet_free(pkt);
913}
914
915static void gm_handle_v1_report(struct gm_if *gm_ifp,
916 const struct sockaddr_in6 *pkt_src, char *data,
917 size_t len)
918{
919 struct mld_v1_pkt *hdr;
920 struct gm_packet_state *pkt;
921 struct gm_sg *grp;
922 struct gm_packet_sg *item;
923 size_t max_entries;
924
925 if (len < sizeof(*hdr)) {
55eb347d 926 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
927 zlog_debug(log_pkt_src(
928 "malformed MLDv1 report (truncated)"));
aa2f9349 929 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
930 return;
931 }
932
aa2f9349
DL
933 gm_ifp->stats.rx_old_report++;
934
5e5034b0
DL
935 hdr = (struct mld_v1_pkt *)data;
936
937 max_entries = 1;
938 pkt = XCALLOC(MTYPE_GM_STATE,
939 offsetof(struct gm_packet_state, items[max_entries]));
940 pkt->n_sg = max_entries;
941 pkt->iface = gm_ifp;
942 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
943
944 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
945
946 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
947 if (!grp)
948 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
949
950 item = gm_packet_sg_setup(pkt, grp, true, false);
951 item->n_exclude = 0;
952 CPP_NOTICE("set v1-seen timer on grp here");
953
954 /* } */
955
956 /* pass2 will count n_active back up to 1. Also since a v1 report
957 * has exactly 1 group, we can skip the realloc() that v2 needs here.
958 */
959 assert(pkt->n_active == 1);
960 pkt->n_sg = pkt->n_active;
961 pkt->n_active = 0;
962
963 monotime(&pkt->received);
964 if (!pkt->subscriber)
965 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
966 gm_packets_add_tail(pkt->subscriber->packets, pkt);
967 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
968
969 /* pass2 covers installing state & removing old state; all the v1
970 * compat is handled at this point.
971 *
972 * Note that "old state" may be v2; subscribers will switch from v2
973 * reports to v1 reports when the querier changes from v2 to v1. So,
974 * limiting this to v1 would be wrong.
975 */
976 gm_handle_v2_pass2_excl(pkt, 0);
977
978 if (pkt->n_active == 0)
979 gm_packet_free(pkt);
980}
981
982static void gm_handle_v1_leave(struct gm_if *gm_ifp,
983 const struct sockaddr_in6 *pkt_src, char *data,
984 size_t len)
985{
986 struct mld_v1_pkt *hdr;
987 struct gm_subscriber *subscriber;
988 struct gm_sg *grp;
989 struct gm_packet_sg *old_grp;
990
991 if (len < sizeof(*hdr)) {
55eb347d 992 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
993 zlog_debug(log_pkt_src(
994 "malformed MLDv1 leave (truncated)"));
aa2f9349 995 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
996 return;
997 }
998
aa2f9349
DL
999 gm_ifp->stats.rx_old_leave++;
1000
5e5034b0
DL
1001 hdr = (struct mld_v1_pkt *)data;
1002
1003 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
1004 if (!subscriber)
1005 return;
1006
1007 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1008
1009 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
1010 if (grp) {
1011 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
1012 if (old_grp) {
1013 gm_packet_sg_drop(old_grp);
1014 gm_sg_update(grp, false);
1015 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1016 }
1017 }
1018
1019 /* } */
1020
1021 /* nothing more to do here, pass2 is no-op for leaves */
1022 gm_subscriber_drop(&subscriber);
1023}
1024
1025/* for each general query received (or sent), a timer is started to expire
1026 * _everything_ at the appropriate time (including robustness multiplier).
1027 *
1028 * So when this timer hits, all packets - with all of their items - that were
1029 * received *before* the query are aged out, and state updated accordingly.
1030 * Note that when we receive a refresh/update, the previous/old packet is
1031 * already dropped and replaced with a new one, so in normal steady-state
1032 * operation, this timer won't be doing anything.
1033 *
1034 * Additionally, if a subscriber actively leaves a group, that goes through
1035 * its own path too and won't hit this. This is really only triggered when a
1036 * host straight up disappears.
1037 */
1038static void gm_t_expire(struct thread *t)
1039{
1040 struct gm_if *gm_ifp = THREAD_ARG(t);
1041 struct gm_packet_state *pkt;
1042
1043 zlog_info(log_ifp("general expiry timer"));
1044
1045 while (gm_ifp->n_pending) {
1046 struct gm_general_pending *pend = gm_ifp->pending;
1047 struct timeval remain;
1048 int64_t remain_ms;
1049
1050 remain_ms = monotime_until(&pend->expiry, &remain);
1051 if (remain_ms > 0) {
95b13dc5 1052 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1053 zlog_debug(
1054 log_ifp("next general expiry in %" PRId64 "ms"),
1055 remain_ms / 1000);
1056
1057 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1058 &remain, &gm_ifp->t_expire);
1059 return;
1060 }
1061
1062 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1063 if (timercmp(&pkt->received, &pend->query, >=))
1064 break;
1065
55eb347d 1066 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1067 zlog_debug(log_ifp("expire packet %p"), pkt);
1068 gm_packet_drop(pkt, true);
1069 }
1070
1071 gm_ifp->n_pending--;
1072 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1073 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1074 }
1075
95b13dc5 1076 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1077 zlog_debug(log_ifp("next general expiry waiting for query"));
1078}
1079
1080/* NB: the receive handlers will also run when sending packets, since we
1081 * receive our own packets back in.
1082 */
1083static void gm_handle_q_general(struct gm_if *gm_ifp,
1084 struct gm_query_timers *timers)
1085{
1086 struct timeval now, expiry;
1087 struct gm_general_pending *pend;
1088
1089 monotime(&now);
1090 timeradd(&now, &timers->expire_wait, &expiry);
1091
1092 while (gm_ifp->n_pending) {
1093 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1094
1095 if (timercmp(&pend->expiry, &expiry, <))
1096 break;
1097
1098 /* if we end up here, the last item in pending[] has an expiry
1099 * later than the expiry for this query. But our query time
1100 * (now) is later than that of the item (because, well, that's
1101 * how time works.) This makes this query meaningless since
1102 * it's "supersetted" within the preexisting query
1103 */
1104
a96d64b0 1105 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1106 zlog_debug(
1107 log_ifp("zapping supersetted general timer %pTVMu"),
1108 &pend->expiry);
1109
1110 gm_ifp->n_pending--;
1111 if (!gm_ifp->n_pending)
1112 THREAD_OFF(gm_ifp->t_expire);
1113 }
1114
1115 /* people might be messing with their configs or something */
1116 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1117 return;
1118
1119 pend = &gm_ifp->pending[gm_ifp->n_pending];
1120 pend->query = now;
1121 pend->expiry = expiry;
1122
1123 if (!gm_ifp->n_pending++) {
a96d64b0 1124 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1125 zlog_debug(
1126 log_ifp("starting general timer @ 0: %pTVMu"),
1127 &pend->expiry);
1128 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1129 &timers->expire_wait, &gm_ifp->t_expire);
a96d64b0 1130 } else if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1131 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1132 gm_ifp->n_pending, &pend->expiry);
1133}
1134
1135static void gm_t_sg_expire(struct thread *t)
1136{
1137 struct gm_sg *sg = THREAD_ARG(t);
1138 struct gm_if *gm_ifp = sg->iface;
1139 struct gm_packet_sg *item;
1140
1141 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1142 sg->state == GM_SG_NOPRUNE_EXPIRING,
1143 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1144
1145 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1146 /* this will also drop EXCLUDE mode S,G lists together with
1147 * the *,G entry
1148 */
1149 gm_packet_sg_drop(item);
1150
1151 /* subs_negative items are only timed out together with the *,G entry
1152 * since we won't get any reports for a group-and-source query
1153 */
1154 gm_sg_update(sg, true);
1155}
1156
1157static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1158 struct timeval ref)
1159{
1160 struct gm_packet_state *pkt;
1161
1162 if (!sg->most_recent) {
1163 struct gm_packet_state *best_pkt = NULL;
1164 struct gm_packet_sg *item;
1165
1166 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1167 pkt = gm_packet_sg2state(item);
1168
1169 if (!best_pkt ||
1170 timercmp(&pkt->received, &best_pkt->received, >)) {
1171 best_pkt = pkt;
1172 sg->most_recent = item;
1173 }
1174 }
1175 }
1176 if (sg->most_recent) {
1177 struct timeval fuzz;
1178
1179 pkt = gm_packet_sg2state(sg->most_recent);
1180
1181 /* this shouldn't happen on plain old real ethernet segment,
1182 * but on something like a VXLAN or VPLS it is very possible
1183 * that we get a report before the query that triggered it.
1184 * (imagine a triangle scenario with 3 datacenters, it's very
1185 * possible A->B + B->C is faster than A->C due to odd routing)
1186 *
1187 * This makes a little tolerance allowance to handle that case.
1188 */
1189 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1190
1191 if (timercmp(&fuzz, &ref, >))
1192 return true;
1193 }
1194 return false;
1195}
1196
1197static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1198 struct timeval expire_wait)
1199{
1200 struct timeval now;
1201
1202 if (!sg)
1203 return;
1204 if (sg->state == GM_SG_PRUNE)
1205 return;
1206
1207 monotime(&now);
1208 if (gm_sg_check_recent(gm_ifp, sg, now))
1209 return;
1210
a96d64b0 1211 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1212 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1213
1214 if (sg->t_sg_expire) {
1215 struct timeval remain;
1216
1217 remain = thread_timer_remain(sg->t_sg_expire);
1218 if (timercmp(&remain, &expire_wait, <=))
1219 return;
1220
1221 THREAD_OFF(sg->t_sg_expire);
1222 }
1223
1224 thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1225 &sg->t_sg_expire);
1226}
1227
1228static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1229 struct gm_query_timers *timers, pim_addr grp,
1230 const pim_addr *srcs, size_t n_src)
1231{
1232 struct gm_sg *sg;
1233 size_t i;
1234
1235 for (i = 0; i < n_src; i++) {
1236 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1237 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1238 }
1239}
1240
1241static void gm_t_grp_expire(struct thread *t)
1242{
1243 /* if we're here, that means when we received the group-specific query
1244 * there was one or more active S,G for this group. For *,G the timer
1245 * in sg->t_sg_expire is running separately and gets cancelled when we
1246 * receive a report, so that work is left to gm_t_sg_expire and we
1247 * shouldn't worry about it here.
1248 */
1249 struct gm_grp_pending *pend = THREAD_ARG(t);
1250 struct gm_if *gm_ifp = pend->iface;
1251 struct gm_sg *sg, *sg_start, sg_ref = {};
1252
95b13dc5 1253 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1254 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1255
1256 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1257 * could technically be gt to skip a possible *,G
1258 */
1259 sg_ref.sgaddr.grp = pend->grp;
1260 sg_ref.sgaddr.src = PIMADDR_ANY;
1261 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1262
1263 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1264 struct gm_packet_sg *item;
1265
1266 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1267 break;
1268 if (pim_addr_is_any(sg->sgaddr.src))
1269 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1270 continue;
1271 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1272 continue;
1273
1274 /* we may also have a group-source-specific query going on in
1275 * parallel. But if we received nothing for the *,G query,
1276 * the S,G query is kinda irrelevant.
1277 */
1278 THREAD_OFF(sg->t_sg_expire);
1279
1280 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1281 /* this will also drop the EXCLUDE S,G lists */
1282 gm_packet_sg_drop(item);
1283
1284 gm_sg_update(sg, true);
1285 }
1286
1287 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1288 XFREE(MTYPE_GM_GRP_PENDING, pend);
1289}
1290
1291static void gm_handle_q_group(struct gm_if *gm_ifp,
1292 struct gm_query_timers *timers, pim_addr grp)
1293{
1294 struct gm_sg *sg, sg_ref = {};
1295 struct gm_grp_pending *pend, pend_ref = {};
1296
1297 sg_ref.sgaddr.grp = grp;
1298 sg_ref.sgaddr.src = PIMADDR_ANY;
1299 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1300 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1301
1302 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1303 /* we have nothing at all for this group - don't waste RAM */
1304 return;
1305
1306 if (pim_addr_is_any(sg->sgaddr.src)) {
1307 /* actually found *,G entry here */
a96d64b0 1308 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1309 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1310 &grp);
1311 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1312
1313 sg = gm_sgs_next(gm_ifp->sgs, sg);
1314 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1315 /* no S,G for this group */
1316 return;
1317 }
1318
1319 pend_ref.grp = grp;
1320 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1321
1322 if (pend) {
1323 struct timeval remain;
1324
1325 remain = thread_timer_remain(pend->t_expire);
1326 if (timercmp(&remain, &timers->expire_wait, <=))
1327 return;
1328
1329 THREAD_OFF(pend->t_expire);
1330 } else {
1331 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1332 pend->grp = grp;
1333 pend->iface = gm_ifp;
1334 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1335 }
1336
1337 monotime(&pend->query);
1338 thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
1339 &timers->expire_wait, &pend->t_expire);
1340
a96d64b0 1341 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1342 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1343 pend->t_expire);
1344}
1345
1346static void gm_bump_querier(struct gm_if *gm_ifp)
1347{
1348 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1349
1350 THREAD_OFF(gm_ifp->t_query);
1351
1352 if (pim_addr_is_any(pim_ifp->ll_lowest))
1353 return;
1354 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1355 return;
1356
1357 gm_ifp->n_startup = gm_ifp->cur_qrv;
1358
1359 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1360}
1361
1362static void gm_t_other_querier(struct thread *t)
1363{
1364 struct gm_if *gm_ifp = THREAD_ARG(t);
1365 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1366
1367 zlog_info(log_ifp("other querier timer expired"));
1368
1369 gm_ifp->querier = pim_ifp->ll_lowest;
1370 gm_ifp->n_startup = gm_ifp->cur_qrv;
1371
1372 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1373}
1374
1375static void gm_handle_query(struct gm_if *gm_ifp,
1376 const struct sockaddr_in6 *pkt_src,
1377 pim_addr *pkt_dst, char *data, size_t len)
1378{
1379 struct mld_v2_query_hdr *hdr;
1380 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1381 struct gm_query_timers timers;
1382 bool general_query;
1383
1384 if (len < sizeof(struct mld_v2_query_hdr) &&
1385 len != sizeof(struct mld_v1_pkt)) {
1386 zlog_warn(log_pkt_src("invalid query size"));
aa2f9349 1387 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1388 return;
1389 }
1390
1391 hdr = (struct mld_v2_query_hdr *)data;
1392 general_query = pim_addr_is_any(hdr->grp);
1393
1394 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1395 zlog_warn(log_pkt_src(
1396 "malformed MLDv2 query (invalid group %pI6)"),
1397 &hdr->grp);
aa2f9349 1398 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1399 return;
1400 }
1401
1402 if (len >= sizeof(struct mld_v2_query_hdr)) {
1403 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1404
1405 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1406 zlog_warn(log_pkt_src(
1407 "malformed MLDv2 query (truncated source list)"));
aa2f9349 1408 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1409 return;
1410 }
1411
1412 if (general_query && src_space) {
1413 zlog_warn(log_pkt_src(
1414 "malformed MLDv2 query (general query with non-empty source list)"));
aa2f9349 1415 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1416 return;
1417 }
1418 }
1419
1420 /* accepting queries unicast to us (or addressed to a wrong group)
1421 * can mess up querier election as well as cause us to terminate
1422 * traffic (since after a unicast query no reports will be coming in)
1423 */
1424 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1425 if (pim_addr_is_any(hdr->grp)) {
1426 zlog_warn(
1427 log_pkt_src(
1428 "wrong destination %pPA for general query"),
1429 pkt_dst);
aa2f9349 1430 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1431 return;
1432 }
1433
1434 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
aa2f9349 1435 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1436 zlog_warn(
1437 log_pkt_src(
1438 "wrong destination %pPA for group specific query"),
1439 pkt_dst);
1440 return;
1441 }
1442 }
1443
1444 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
95b13dc5 1445 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
1446 zlog_debug(
1447 log_pkt_src("replacing elected querier %pPA"),
1448 &gm_ifp->querier);
1449
1450 gm_ifp->querier = pkt_src->sin6_addr;
1451 }
1452
1453 if (len == sizeof(struct mld_v1_pkt)) {
1454 timers.qrv = gm_ifp->cur_qrv;
1455 timers.max_resp_ms = hdr->max_resp_code;
1456 timers.qqic_ms = gm_ifp->cur_query_intv;
1457 } else {
1458 timers.qrv = (hdr->flags & 0x7) ?: 8;
1459 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1460 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1461 }
1462 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1463
1464 gm_expiry_calc(&timers);
1465
a96d64b0 1466 if (PIM_DEBUG_GM_TRACE_DETAIL)
5e5034b0
DL
1467 zlog_debug(
1468 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1469 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1470 &timers.expire_wait);
1471
1472 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1473 unsigned int other_ms;
1474
1475 THREAD_OFF(gm_ifp->t_query);
1476 THREAD_OFF(gm_ifp->t_other_querier);
1477
1478 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1479 thread_add_timer_msec(router->master, gm_t_other_querier,
1480 gm_ifp, other_ms,
1481 &gm_ifp->t_other_querier);
1482 }
1483
1484 if (len == sizeof(struct mld_v1_pkt)) {
aa2f9349 1485 if (general_query) {
5e5034b0 1486 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1487 gm_ifp->stats.rx_query_old_general++;
1488 } else {
5e5034b0 1489 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1490 gm_ifp->stats.rx_query_old_group++;
1491 }
5e5034b0
DL
1492 return;
1493 }
1494
1495 /* v2 query - [S]uppress bit */
aa2f9349
DL
1496 if (hdr->flags & 0x8) {
1497 gm_ifp->stats.rx_query_new_sbit++;
5e5034b0 1498 return;
aa2f9349 1499 }
5e5034b0 1500
aa2f9349 1501 if (general_query) {
5e5034b0 1502 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1503 gm_ifp->stats.rx_query_new_general++;
1504 } else if (!ntohs(hdr->n_src)) {
5e5034b0 1505 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1506 gm_ifp->stats.rx_query_new_group++;
1507 } else {
5e5034b0
DL
1508 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1509 ntohs(hdr->n_src));
aa2f9349
DL
1510 gm_ifp->stats.rx_query_new_groupsrc++;
1511 }
5e5034b0
DL
1512}
1513
1514static void gm_rx_process(struct gm_if *gm_ifp,
1515 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1516 void *data, size_t pktlen)
1517{
1518 struct icmp6_plain_hdr *icmp6 = data;
1519 uint16_t pkt_csum, ref_csum;
1520 struct ipv6_ph ph6 = {
1521 .src = pkt_src->sin6_addr,
1522 .dst = *pkt_dst,
1523 .ulpl = htons(pktlen),
1524 .next_hdr = IPPROTO_ICMPV6,
1525 };
1526
1527 pkt_csum = icmp6->icmp6_cksum;
1528 icmp6->icmp6_cksum = 0;
1529 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1530
1531 if (pkt_csum != ref_csum) {
1532 zlog_warn(
1533 log_pkt_src(
1534 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1535 pkt_dst, pkt_csum, ref_csum);
aa2f9349 1536 gm_ifp->stats.rx_drop_csum++;
5e5034b0
DL
1537 return;
1538 }
1539
1540 data = (icmp6 + 1);
1541 pktlen -= sizeof(*icmp6);
1542
1543 switch (icmp6->icmp6_type) {
1544 case ICMP6_MLD_QUERY:
1545 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1546 break;
1547 case ICMP6_MLD_V1_REPORT:
1548 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1549 break;
1550 case ICMP6_MLD_V1_DONE:
1551 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1552 break;
1553 case ICMP6_MLD_V2_REPORT:
1554 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1555 break;
1556 }
1557}
1558
1559static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1560 uint16_t alert_type)
1561{
1562 uint8_t *hopopt_end;
1563
1564 if (hopopt_len < 8)
1565 return false;
1566 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1567 return false;
1568
1569 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1570 hopopts += 2;
1571
1572 while (hopopts < hopopt_end) {
1573 if (hopopts[0] == IP6OPT_PAD1) {
1574 hopopts++;
1575 continue;
1576 }
1577
1578 if (hopopts > hopopt_end - 2)
1579 break;
1580 if (hopopts > hopopt_end - 2 - hopopts[1])
1581 break;
1582
1583 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1584 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1585
1586 if (have_type == alert_type)
1587 return true;
1588 }
1589
1590 hopopts += 2 + hopopts[1];
1591 }
1592 return false;
1593}
1594
1595static void gm_t_recv(struct thread *t)
1596{
df655593 1597 struct pim_instance *pim = THREAD_ARG(t);
5e5034b0
DL
1598 union {
1599 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1600 CMSG_SPACE(256) /* hop options */ +
1601 CMSG_SPACE(sizeof(int)) /* hopcount */];
1602 struct cmsghdr align;
1603 } cmsgbuf;
1604 struct cmsghdr *cmsg;
1605 struct in6_pktinfo *pktinfo = NULL;
1606 uint8_t *hopopts = NULL;
1607 size_t hopopt_len = 0;
1608 int *hoplimit = NULL;
1609 char rxbuf[2048];
1610 struct msghdr mh[1] = {};
1611 struct iovec iov[1];
5784a878 1612 struct sockaddr_in6 pkt_src[1] = {};
5e5034b0
DL
1613 ssize_t nread;
1614 size_t pktlen;
1615
df655593
DL
1616 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
1617 &pim->t_gm_recv);
5e5034b0
DL
1618
1619 iov->iov_base = rxbuf;
1620 iov->iov_len = sizeof(rxbuf);
1621
1622 mh->msg_name = pkt_src;
1623 mh->msg_namelen = sizeof(pkt_src);
1624 mh->msg_control = cmsgbuf.buf;
1625 mh->msg_controllen = sizeof(cmsgbuf.buf);
1626 mh->msg_iov = iov;
1627 mh->msg_iovlen = array_size(iov);
1628 mh->msg_flags = 0;
1629
df655593 1630 nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
5e5034b0 1631 if (nread <= 0) {
df655593
DL
1632 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1633 pim->gm_rx_drop_sys++;
5e5034b0
DL
1634 return;
1635 }
1636
1637 if ((size_t)nread > sizeof(rxbuf)) {
1638 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1639 iov->iov_len = nread;
1640 }
df655593 1641 nread = recvmsg(pim->gm_socket, mh, 0);
5e5034b0 1642 if (nread <= 0) {
df655593
DL
1643 zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
1644 pim->gm_rx_drop_sys++;
5e5034b0
DL
1645 goto out_free;
1646 }
1647
df655593
DL
1648 struct interface *ifp;
1649
1650 ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
1651 if (!ifp || !ifp->info)
1652 goto out_free;
1653
1654 struct pim_interface *pim_ifp = ifp->info;
1655 struct gm_if *gm_ifp = pim_ifp->mld;
1656
1657 if (!gm_ifp)
5e5034b0
DL
1658 goto out_free;
1659
1660 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1661 if (cmsg->cmsg_level != SOL_IPV6)
1662 continue;
1663
1664 switch (cmsg->cmsg_type) {
1665 case IPV6_PKTINFO:
1666 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1667 break;
1668 case IPV6_HOPOPTS:
1669 hopopts = CMSG_DATA(cmsg);
1670 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1671 break;
1672 case IPV6_HOPLIMIT:
1673 hoplimit = (int *)CMSG_DATA(cmsg);
1674 break;
1675 }
1676 }
1677
1678 if (!pktinfo || !hoplimit) {
1679 zlog_err(log_ifp(
1680 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
df655593 1681 pim->gm_rx_drop_sys++;
5e5034b0
DL
1682 goto out_free;
1683 }
1684
1685 if (*hoplimit != 1) {
1686 zlog_err(log_pkt_src("packet with hop limit != 1"));
aa2f9349
DL
1687 /* spoofing attempt => count on srcaddr counter */
1688 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1689 goto out_free;
1690 }
1691
1692 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1693 zlog_err(log_pkt_src(
1694 "packet without IPv6 Router Alert MLD option"));
aa2f9349 1695 gm_ifp->stats.rx_drop_ra++;
5e5034b0
DL
1696 goto out_free;
1697 }
1698
1699 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1700 /* reports from :: happen in normal operation for DAD, so
1701 * don't spam log messages about this
1702 */
1703 goto out_free;
1704
1705 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1706 zlog_warn(log_pkt_src("packet from invalid source address"));
aa2f9349 1707 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1708 goto out_free;
1709 }
1710
1711 pktlen = nread;
1712 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1713 zlog_warn(log_pkt_src("truncated packet"));
aa2f9349 1714 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1715 goto out_free;
1716 }
1717
1718 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1719 pktlen);
1720
1721out_free:
1722 if (iov->iov_base != rxbuf)
1723 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1724}
1725
1726static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1727 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1728{
1729 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1730 struct sockaddr_in6 dstaddr = {
1731 .sin6_family = AF_INET6,
1732 .sin6_scope_id = gm_ifp->ifp->ifindex,
1733 };
1734 struct {
1735 struct icmp6_plain_hdr hdr;
1736 struct mld_v2_query_hdr v2_query;
1737 } query = {
1738 /* clang-format off */
1739 .hdr = {
1740 .icmp6_type = ICMP6_MLD_QUERY,
1741 .icmp6_code = 0,
1742 },
1743 .v2_query = {
1744 .grp = grp,
1745 },
1746 /* clang-format on */
1747 };
1748 struct ipv6_ph ph6 = {
1749 .src = pim_ifp->ll_lowest,
1750 .ulpl = htons(sizeof(query)),
1751 .next_hdr = IPPROTO_ICMPV6,
1752 };
1753 union {
df655593
DL
1754 char buf[CMSG_SPACE(8) /* hop options */ +
1755 CMSG_SPACE(sizeof(struct in6_pktinfo))];
5e5034b0
DL
1756 struct cmsghdr align;
1757 } cmsg = {};
1758 struct cmsghdr *cmh;
1759 struct msghdr mh[1] = {};
1760 struct iovec iov[3];
1761 size_t iov_len;
1762 ssize_t ret, expect_ret;
1763 uint8_t *dp;
df655593 1764 struct in6_pktinfo *pktinfo;
5e5034b0
DL
1765
1766 if (if_is_loopback(gm_ifp->ifp)) {
1767 /* Linux is a bit odd with multicast on loopback */
1768 ph6.src = in6addr_loopback;
1769 dstaddr.sin6_addr = in6addr_loopback;
1770 } else if (pim_addr_is_any(grp))
1771 dstaddr.sin6_addr = gm_all_hosts;
1772 else
1773 dstaddr.sin6_addr = grp;
1774
1775 query.v2_query.max_resp_code =
1776 mld_max_resp_encode(gm_ifp->cur_max_resp);
1777 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1778 if (s_bit)
1779 query.v2_query.flags |= 0x08;
1780 query.v2_query.qqic =
1781 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1782 query.v2_query.n_src = htons(n_srcs);
1783
1784 ph6.dst = dstaddr.sin6_addr;
1785
1786 /* ph6 not included in sendmsg */
1787 iov[0].iov_base = &ph6;
1788 iov[0].iov_len = sizeof(ph6);
1789 iov[1].iov_base = &query;
1790 if (gm_ifp->cur_version == GM_MLDV1) {
1791 iov_len = 2;
1792 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1793 } else if (!n_srcs) {
1794 iov_len = 2;
1795 iov[1].iov_len = sizeof(query);
1796 } else {
1797 iov[1].iov_len = sizeof(query);
1798 iov[2].iov_base = (void *)srcs;
1799 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1800 iov_len = 3;
1801 }
1802
1803 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1804
55eb347d 1805 if (PIM_DEBUG_GM_PACKETS)
5e5034b0
DL
1806 zlog_debug(
1807 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1808 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1809
1810 mh->msg_name = &dstaddr;
1811 mh->msg_namelen = sizeof(dstaddr);
1812 mh->msg_iov = iov + 1;
1813 mh->msg_iovlen = iov_len - 1;
1814 mh->msg_control = &cmsg;
1815 mh->msg_controllen = sizeof(cmsg.buf);
df655593 1816
5e5034b0
DL
1817 cmh = CMSG_FIRSTHDR(mh);
1818 cmh->cmsg_level = IPPROTO_IPV6;
1819 cmh->cmsg_type = IPV6_HOPOPTS;
1820 cmh->cmsg_len = CMSG_LEN(8);
1821 dp = CMSG_DATA(cmh);
1822 *dp++ = 0; /* next header */
1823 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1824 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1825 *dp++ = 2; /* length */
1826 *dp++ = 0; /* value (2 bytes) */
1827 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1828 *dp++ = 0; /* pad0 */
1829 *dp++ = 0; /* pad0 */
1830
df655593
DL
1831 cmh = CMSG_NXTHDR(mh, cmh);
1832 cmh->cmsg_level = IPPROTO_IPV6;
1833 cmh->cmsg_type = IPV6_PKTINFO;
1834 cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
1835 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
1836 pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
1837 pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
1838
5e5034b0
DL
1839 expect_ret = iov[1].iov_len;
1840 if (iov_len == 3)
1841 expect_ret += iov[2].iov_len;
1842
1843 frr_with_privs (&pimd_privs) {
df655593 1844 ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
5e5034b0
DL
1845 }
1846
aa2f9349 1847 if (ret != expect_ret) {
5e5034b0 1848 zlog_warn(log_ifp("failed to send query: %m"));
aa2f9349
DL
1849 gm_ifp->stats.tx_query_fail++;
1850 } else {
1851 if (gm_ifp->cur_version == GM_MLDV1) {
1852 if (pim_addr_is_any(grp))
1853 gm_ifp->stats.tx_query_old_general++;
1854 else
1855 gm_ifp->stats.tx_query_old_group++;
1856 } else {
1857 if (pim_addr_is_any(grp))
1858 gm_ifp->stats.tx_query_new_general++;
1859 else if (!n_srcs)
1860 gm_ifp->stats.tx_query_new_group++;
1861 else
1862 gm_ifp->stats.tx_query_new_groupsrc++;
1863 }
1864 }
5e5034b0
DL
1865}
1866
1867static void gm_t_query(struct thread *t)
1868{
1869 struct gm_if *gm_ifp = THREAD_ARG(t);
1870 unsigned int timer_ms = gm_ifp->cur_query_intv;
1871
1872 if (gm_ifp->n_startup) {
1873 timer_ms /= 4;
1874 gm_ifp->n_startup--;
1875 }
1876
1877 thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1878 &gm_ifp->t_query);
1879
1880 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1881}
1882
1883static void gm_t_sg_query(struct thread *t)
1884{
1885 struct gm_sg *sg = THREAD_ARG(t);
1886
1887 gm_trigger_specific(sg);
1888}
1889
1890/* S,G specific queries (triggered by a member leaving) get a little slack
1891 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1892 */
1893static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1894{
1895 struct gm_if *gm_ifp = pend_gsq->iface;
1896
1897 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1898 pend_gsq->s_bit);
1899
1900 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1901 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1902}
1903
1904static void gm_t_gsq_pend(struct thread *t)
1905{
1906 struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
1907
1908 gm_send_specific(pend_gsq);
1909}
1910
1911static void gm_trigger_specific(struct gm_sg *sg)
1912{
1913 struct gm_if *gm_ifp = sg->iface;
1914 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1915 struct gm_gsq_pending *pend_gsq, ref = {};
1916
1917 sg->n_query--;
1918 if (sg->n_query)
1919 thread_add_timer_msec(router->master, gm_t_sg_query, sg,
1920 gm_ifp->cur_query_intv_trig,
1921 &sg->t_sg_query);
1922
1923 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1924 return;
df655593 1925 if (gm_ifp->pim->gm_socket == -1)
5e5034b0
DL
1926 return;
1927
a96d64b0 1928 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
1929 zlog_debug(log_sg(sg, "triggered query"));
1930
1931 if (pim_addr_is_any(sg->sgaddr.src)) {
1932 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1933 return;
1934 }
1935
1936 ref.grp = sg->sgaddr.grp;
1937 ref.s_bit = sg->query_sbit;
1938
1939 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1940 if (!pend_gsq) {
1941 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1942 pend_gsq->grp = sg->sgaddr.grp;
1943 pend_gsq->s_bit = sg->query_sbit;
1944 pend_gsq->iface = gm_ifp;
1945 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1946
1947 thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1948 &gm_ifp->cfg_timing_fuzz,
1949 &pend_gsq->t_send);
1950 }
1951
1952 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1953
1954 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1955 pend_gsq->n_src++;
1956
1957 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1958 THREAD_OFF(pend_gsq->t_send);
1959 gm_send_specific(pend_gsq);
1960 pend_gsq = NULL;
1961 }
1962}
1963
df655593 1964static void gm_vrf_socket_incref(struct pim_instance *pim)
5e5034b0 1965{
df655593 1966 struct vrf *vrf = pim->vrf;
5e5034b0
DL
1967 int ret, intval;
1968 struct icmp6_filter filter[1];
1969
df655593
DL
1970 if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
1971 return;
5e5034b0
DL
1972
1973 ICMP6_FILTER_SETBLOCKALL(filter);
1974 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1975 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1976 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1977 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1978
1979 frr_with_privs (&pimd_privs) {
df655593
DL
1980 pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
1981 vrf->vrf_id, vrf->name);
1982 if (pim->gm_socket < 0) {
1983 zlog_err("(VRF %s) could not create MLD socket: %m",
1984 vrf->name);
5e5034b0
DL
1985 return;
1986 }
1987
df655593
DL
1988 ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
1989 filter, sizeof(filter));
5e5034b0 1990 if (ret)
df655593
DL
1991 zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
1992 vrf->name);
5e5034b0
DL
1993
1994 intval = 1;
df655593 1995 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
5e5034b0
DL
1996 &intval, sizeof(intval));
1997 if (ret)
df655593
DL
1998 zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
1999 vrf->name);
5e5034b0
DL
2000
2001 intval = 1;
df655593 2002 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
5e5034b0
DL
2003 &intval, sizeof(intval));
2004 if (ret)
df655593
DL
2005 zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
2006 vrf->name);
5e5034b0
DL
2007
2008 intval = 1;
df655593 2009 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
5e5034b0
DL
2010 &intval, sizeof(intval));
2011 if (ret)
df655593
DL
2012 zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
2013 vrf->name);
5e5034b0
DL
2014
2015 intval = 1;
df655593 2016 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
5e5034b0
DL
2017 &intval, sizeof(intval));
2018 if (ret)
2019 zlog_err(
df655593
DL
2020 "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
2021 vrf->name);
5e5034b0
DL
2022
2023 intval = 1;
df655593 2024 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
5e5034b0
DL
2025 &intval, sizeof(intval));
2026 if (ret)
df655593
DL
2027 zlog_err(
2028 "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
2029 vrf->name);
5e5034b0
DL
2030
2031 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2032 * RX filtering in Linux. It only means "receive all groups
2033 * that something on the system has joined". To actually
2034 * receive *all* MLD packets - which is what we need -
2035 * multicast routing must be enabled on the interface. And
2036 * this only works for MLD packets specifically.
2037 *
2038 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2039 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2040 *
2041 * Also note that the code there explicitly checks for the IPv6
2042 * router alert MLD option (which is required by the RFC to be
2043 * on MLD packets.) That implies trying to support hosts which
2044 * erroneously don't add that option is just not possible.
2045 */
2046 intval = 1;
df655593 2047 ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
5e5034b0
DL
2048 &intval, sizeof(intval));
2049 if (ret)
2050 zlog_info(
df655593
DL
2051 "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2052 vrf->name);
2053 }
5e5034b0 2054
df655593
DL
2055 thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
2056 &pim->t_gm_recv);
2057}
2058
2059static void gm_vrf_socket_decref(struct pim_instance *pim)
2060{
2061 if (--pim->gm_socket_if_count)
2062 return;
2063
2064 THREAD_OFF(pim->t_gm_recv);
2065 close(pim->gm_socket);
2066 pim->gm_socket = -1;
2067}
2068
2069static void gm_start(struct interface *ifp)
2070{
2071 struct pim_interface *pim_ifp = ifp->info;
2072 struct gm_if *gm_ifp;
2073
2074 assert(pim_ifp);
2075 assert(pim_ifp->pim);
2076 assert(pim_ifp->mroute_vif_index >= 0);
2077 assert(!pim_ifp->mld);
2078
2079 gm_vrf_socket_incref(pim_ifp->pim);
2080
2081 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
2082 gm_ifp->ifp = ifp;
2083 pim_ifp->mld = gm_ifp;
2084 gm_ifp->pim = pim_ifp->pim;
2085 monotime(&gm_ifp->started);
2086
2087 zlog_info(log_ifp("starting MLD"));
2088
2089 if (pim_ifp->mld_version == 1)
2090 gm_ifp->cur_version = GM_MLDV1;
2091 else
2092 gm_ifp->cur_version = GM_MLDV2;
2093
914710d7 2094 gm_ifp->cur_qrv = pim_ifp->gm_default_robustness_variable;
df655593 2095 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
707a9e9a
A
2096 gm_ifp->cur_query_intv_trig =
2097 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
914710d7 2098 gm_ifp->cur_max_resp = pim_ifp->gm_query_max_response_time_dsec * 100;
51b4991f 2099 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
df655593
DL
2100
2101 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
2102 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
2103
2104 gm_sgs_init(gm_ifp->sgs);
2105 gm_subscribers_init(gm_ifp->subscribers);
2106 gm_packet_expires_init(gm_ifp->expires);
2107 gm_grp_pends_init(gm_ifp->grp_pends);
2108 gm_gsq_pends_init(gm_ifp->gsq_pends);
2109
2110 frr_with_privs (&pimd_privs) {
5e5034b0 2111 struct ipv6_mreq mreq;
df655593 2112 int ret;
5e5034b0
DL
2113
2114 /* all-MLDv2 group */
2115 mreq.ipv6mr_multiaddr = gm_all_routers;
2116 mreq.ipv6mr_interface = ifp->ifindex;
df655593
DL
2117 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2118 IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
5e5034b0
DL
2119 if (ret)
2120 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2121 ifp->name);
2122 }
5e5034b0
DL
2123}
2124
e309780f 2125void gm_group_delete(struct gm_if *gm_ifp)
5e5034b0 2126{
e309780f 2127 struct gm_sg *sg;
5e5034b0
DL
2128 struct gm_packet_state *pkt;
2129 struct gm_grp_pending *pend_grp;
2130 struct gm_gsq_pending *pend_gsq;
2131 struct gm_subscriber *subscriber;
e309780f
SP
2132
2133 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2134 gm_packet_drop(pkt, false);
2135
2136 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2137 THREAD_OFF(pend_grp->t_expire);
2138 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2139 }
2140
2141 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2142 THREAD_OFF(pend_gsq->t_send);
2143 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2144 }
2145
2146 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2147 THREAD_OFF(sg->t_sg_expire);
2148 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2149 &sg->sgaddr);
2150 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2151 &sg->sgaddr);
2152
2153 gm_sg_free(sg);
2154 }
2155 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2156 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2157 &subscriber->addr);
2158 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2159 }
2160}
2161
2162void gm_ifp_teardown(struct interface *ifp)
2163{
2164 struct pim_interface *pim_ifp = ifp->info;
2165 struct gm_if *gm_ifp;
5e5034b0
DL
2166
2167 if (!pim_ifp || !pim_ifp->mld)
2168 return;
2169
2170 gm_ifp = pim_ifp->mld;
2171 gm_ifp->stopping = true;
95b13dc5 2172 if (PIM_DEBUG_GM_EVENTS)
5e5034b0
DL
2173 zlog_debug(log_ifp("MLD stop"));
2174
2175 THREAD_OFF(gm_ifp->t_query);
2176 THREAD_OFF(gm_ifp->t_other_querier);
5e5034b0
DL
2177 THREAD_OFF(gm_ifp->t_expire);
2178
df655593
DL
2179 frr_with_privs (&pimd_privs) {
2180 struct ipv6_mreq mreq;
2181 int ret;
2182
2183 /* all-MLDv2 group */
2184 mreq.ipv6mr_multiaddr = gm_all_routers;
2185 mreq.ipv6mr_interface = ifp->ifindex;
2186 ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
2187 IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
2188 if (ret)
2189 zlog_err(
2190 "(%s) failed to leave ff02::16 (all-MLDv2): %m",
2191 ifp->name);
5e5034b0
DL
2192 }
2193
df655593
DL
2194 gm_vrf_socket_decref(gm_ifp->pim);
2195
e309780f 2196 gm_group_delete(gm_ifp);
5e5034b0
DL
2197
2198 gm_grp_pends_fini(gm_ifp->grp_pends);
2199 gm_packet_expires_fini(gm_ifp->expires);
2200 gm_subscribers_fini(gm_ifp->subscribers);
2201 gm_sgs_fini(gm_ifp->sgs);
2202
2203 XFREE(MTYPE_GM_IFACE, gm_ifp);
2204 pim_ifp->mld = NULL;
2205}
2206
2207static void gm_update_ll(struct interface *ifp)
2208{
2209 struct pim_interface *pim_ifp = ifp->info;
f4e8f5d4 2210 struct gm_if *gm_ifp = pim_ifp->mld;
5e5034b0
DL
2211 bool was_querier;
2212
2213 was_querier =
2214 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2215 !pim_addr_is_any(gm_ifp->querier);
2216
2217 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2218 if (was_querier)
2219 gm_ifp->querier = pim_ifp->ll_lowest;
2220 THREAD_OFF(gm_ifp->t_query);
2221
2222 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2223 if (was_querier)
2224 zlog_info(log_ifp(
2225 "lost link-local address, stopping querier"));
2226 return;
2227 }
2228
2229 if (was_querier)
2230 zlog_info(log_ifp("new link-local %pPA while querier"),
2231 &gm_ifp->cur_ll_lowest);
2232 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2233 pim_addr_is_any(gm_ifp->querier)) {
2234 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2235 &gm_ifp->cur_ll_lowest);
2236 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2237 } else
2238 return;
2239
5e5034b0
DL
2240 gm_ifp->n_startup = gm_ifp->cur_qrv;
2241 thread_execute(router->master, gm_t_query, gm_ifp, 0);
2242}
2243
2244void gm_ifp_update(struct interface *ifp)
2245{
2246 struct pim_interface *pim_ifp = ifp->info;
2247 struct gm_if *gm_ifp;
2248 bool changed = false;
2249
2250 if (!pim_ifp)
2251 return;
2252 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2253 pim_ifp->mroute_vif_index < 0) {
2254 gm_ifp_teardown(ifp);
2255 return;
2256 }
2257
5c1b3cd2
A
2258 /*
2259 * If ipv6 mld is not enabled on interface, do not start mld activites.
2260 */
2261 if (!pim_ifp->gm_enable)
2262 return;
2263
2264 if (!pim_ifp->mld) {
2265 changed = true;
5e5034b0 2266 gm_start(ifp);
5c1b3cd2 2267 }
5e5034b0
DL
2268
2269 gm_ifp = pim_ifp->mld;
2270 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2271 gm_update_ll(ifp);
2272
2273 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2274
2275 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2276 gm_ifp->cur_query_intv = cfg_query_intv;
707a9e9a
A
2277 changed = true;
2278 }
2279
2280 unsigned int cfg_query_intv_trig =
2281 pim_ifp->gm_specific_query_max_response_time_dsec * 100;
2282
2283 if (gm_ifp->cur_query_intv_trig != cfg_query_intv_trig) {
2284 gm_ifp->cur_query_intv_trig = cfg_query_intv_trig;
5e5034b0
DL
2285 changed = true;
2286 }
2287
914710d7
A
2288 unsigned int cfg_max_response =
2289 pim_ifp->gm_query_max_response_time_dsec * 100;
2290
2291 if (gm_ifp->cur_max_resp != cfg_max_response)
2292 gm_ifp->cur_max_resp = cfg_max_response;
2293
51b4991f
A
2294 if (gm_ifp->cur_lmqc != pim_ifp->gm_last_member_query_count)
2295 gm_ifp->cur_lmqc = pim_ifp->gm_last_member_query_count;
2296
5e5034b0
DL
2297 enum gm_version cfg_version;
2298
2299 if (pim_ifp->mld_version == 1)
2300 cfg_version = GM_MLDV1;
2301 else
2302 cfg_version = GM_MLDV2;
2303 if (gm_ifp->cur_version != cfg_version) {
2304 gm_ifp->cur_version = cfg_version;
2305 changed = true;
2306 }
2307
2308 if (changed) {
a96d64b0 2309 if (PIM_DEBUG_GM_TRACE)
5e5034b0
DL
2310 zlog_debug(log_ifp(
2311 "MLD querier config changed, querying"));
2312 gm_bump_querier(gm_ifp);
2313 }
2314}
2315
d2951219
DL
2316/*
2317 * CLI (show commands only)
2318 */
5e5034b0
DL
2319
2320#include "lib/command.h"
2321
2322#ifndef VTYSH_EXTRACT_PL
2323#include "pimd/pim6_mld_clippy.c"
2324#endif
2325
d2951219
DL
2326static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2327 int *err)
2328{
2329 struct vrf *ret;
2330
2331 if (!vrf_str)
2332 return vrf_lookup_by_id(VRF_DEFAULT);
2333 if (!strcmp(vrf_str, "all"))
2334 return NULL;
2335 ret = vrf_lookup_by_name(vrf_str);
2336 if (ret)
2337 return ret;
2338
2339 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2340 *err = CMD_WARNING;
2341 return NULL;
2342}
2343
2344static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2345{
2346 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2347 struct gm_if *gm_ifp;
2348 bool querier;
2349 size_t i;
2350
2351 if (!pim_ifp) {
2352 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2353 return;
2354 }
2355
2356 gm_ifp = pim_ifp->mld;
2357 if (!gm_ifp) {
2358 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2359 return;
2360 }
2361
2362 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2363
2364 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2365 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2366 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2367 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2368 querier ? " (this system)" : "");
2369 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2370 vty_out(vty, " Other querier timer: %pTH\n",
2371 gm_ifp->t_other_querier);
2372 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2373 vty_out(vty, " Query interval: %ums\n",
2374 gm_ifp->cur_query_intv);
2375 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2376 vty_out(vty, " Last member query intv.: %ums\n",
2377 gm_ifp->cur_query_intv_trig);
2378 vty_out(vty, " %u expiry timers from general queries:\n",
2379 gm_ifp->n_pending);
2380 for (i = 0; i < gm_ifp->n_pending; i++) {
2381 struct gm_general_pending *p = &gm_ifp->pending[i];
2382
2383 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2384 &p->query, &p->expiry);
2385 }
2386 vty_out(vty, " %zu expiry timers from *,G queries\n",
2387 gm_grp_pends_count(gm_ifp->grp_pends));
2388 vty_out(vty, " %zu expiry timers from S,G queries\n",
2389 gm_gsq_pends_count(gm_ifp->gsq_pends));
2390 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2391 gm_sgs_count(gm_ifp->sgs),
2392 gm_subscribers_count(gm_ifp->subscribers),
2393 gm_packet_expires_count(gm_ifp->expires));
2394 vty_out(vty, "\n");
2395}
2396
2397static void gm_show_if_one(struct vty *vty, struct interface *ifp,
2398 json_object *js_if)
2399{
2400 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2401 struct gm_if *gm_ifp = pim_ifp->mld;
2402 bool querier;
2403
2404 if (!gm_ifp) {
2405 if (js_if)
2406 json_object_string_add(js_if, "state", "down");
2407 else
2408 vty_out(vty, "%-16s %5s\n", ifp->name, "down");
2409 return;
2410 }
2411
2412 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2413
2414 if (js_if) {
cb406d5c 2415 json_object_string_add(js_if, "name", ifp->name);
d2951219
DL
2416 json_object_string_add(js_if, "state", "up");
2417 json_object_string_addf(js_if, "version", "%d",
2418 gm_ifp->cur_version);
2419 json_object_string_addf(js_if, "upTime", "%pTVMs",
2420 &gm_ifp->started);
2421 json_object_boolean_add(js_if, "querier", querier);
2422 json_object_string_addf(js_if, "querierIp", "%pPA",
2423 &gm_ifp->querier);
2424 if (querier)
2425 json_object_string_addf(js_if, "queryTimer", "%pTH",
2426 gm_ifp->t_query);
2427 else
2428 json_object_string_addf(js_if, "otherQuerierTimer",
2429 "%pTH",
2430 gm_ifp->t_other_querier);
cb406d5c
A
2431 json_object_int_add(js_if, "timerRobustnessValue",
2432 gm_ifp->cur_qrv);
6b94500d
A
2433 json_object_int_add(js_if, "lastMemberQueryCount",
2434 gm_ifp->cur_lmqc);
cb406d5c
A
2435 json_object_int_add(js_if, "timerQueryIntervalMsec",
2436 gm_ifp->cur_query_intv);
2437 json_object_int_add(js_if, "timerQueryResponseTimerMsec",
2438 gm_ifp->cur_max_resp);
2439 json_object_int_add(js_if, "timerLastMemberQueryIntervalMsec",
2440 gm_ifp->cur_query_intv_trig);
d2951219
DL
2441 } else {
2442 vty_out(vty, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n",
2443 ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier,
2444 querier ? "query" : "other",
2445 querier ? gm_ifp->t_query : gm_ifp->t_other_querier,
2446 &gm_ifp->started);
2447 }
2448}
2449
2450static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2451 bool detail, json_object *js)
2452{
2453 struct interface *ifp;
2454 json_object *js_vrf;
2455
2456 if (js) {
2457 js_vrf = json_object_new_object();
2458 json_object_object_add(js, vrf->name, js_vrf);
2459 }
2460
2461 FOR_ALL_INTERFACES (vrf, ifp) {
2462 json_object *js_if = NULL;
2463
2464 if (ifname && strcmp(ifp->name, ifname))
2465 continue;
2466 if (detail && !js) {
2467 gm_show_if_one_detail(vty, ifp);
2468 continue;
2469 }
2470
2471 if (!ifp->info)
2472 continue;
2473 if (js) {
2474 js_if = json_object_new_object();
2475 json_object_object_add(js_vrf, ifp->name, js_if);
2476 }
2477
2478 gm_show_if_one(vty, ifp, js_if);
2479 }
2480}
2481
2482static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2483 bool detail, json_object *js)
2484{
2485 if (!js && !detail)
2486 vty_out(vty, "%-16s %-5s V %-25s %-18s %s\n", "Interface",
2487 "State", "Querier", "Timer", "Uptime");
2488
2489 if (vrf)
2490 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2491 else
2492 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2493 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2494}
2495
2496DEFPY(gm_show_interface,
2497 gm_show_interface_cmd,
ad56b07c 2498 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME | detail$detail] [json$json]",
d2951219
DL
2499 SHOW_STR
2500 IPV6_STR
2501 MLD_STR
2502 VRF_FULL_CMD_HELP_STR
2503 "MLD interface information\n"
ad56b07c 2504 "Interface name\n"
d2951219
DL
2505 "Detailed output\n"
2506 JSON_STR)
2507{
2508 int ret = CMD_SUCCESS;
2509 struct vrf *vrf;
2510 json_object *js = NULL;
2511
2512 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2513 if (ret != CMD_SUCCESS)
2514 return ret;
2515
2516 if (json)
2517 js = json_object_new_object();
2518 gm_show_if(vty, vrf, ifname, !!detail, js);
2519 return vty_json(vty, js);
2520}
2521
2522static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2523 json_object *js_if)
2524{
2525 struct gm_if_stats *stats = &gm_ifp->stats;
2526 /* clang-format off */
2527 struct {
2528 const char *text;
2529 const char *js_key;
2530 uint64_t *val;
2531 } *item, items[] = {
2532 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2533 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2534 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2535
2536 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2537 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2538 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2539 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2540 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2541 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2542
2543 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2544 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2545 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2546 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2547 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2548 { "TX errors", "txErrors", &stats->tx_query_fail },
2549
d2951219
DL
2550 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2551 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2552 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2553 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2554 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2555 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2556 };
2557 /* clang-format on */
2558
2559 for (item = items; item < items + array_size(items); item++) {
2560 if (js_if)
2561 json_object_int_add(js_if, item->js_key, *item->val);
2562 else
2563 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2564 *item->val);
2565 }
2566}
2567
2568static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2569 const char *ifname, json_object *js)
2570{
2571 struct interface *ifp;
2572 json_object *js_vrf;
2573
2574 if (js) {
2575 js_vrf = json_object_new_object();
2576 json_object_object_add(js, vrf->name, js_vrf);
2577 }
2578
2579 FOR_ALL_INTERFACES (vrf, ifp) {
2580 struct pim_interface *pim_ifp;
2581 struct gm_if *gm_ifp;
2582 json_object *js_if = NULL;
2583
2584 if (ifname && strcmp(ifp->name, ifname))
2585 continue;
2586
2587 if (!ifp->info)
2588 continue;
2589 pim_ifp = ifp->info;
2590 if (!pim_ifp->mld)
2591 continue;
2592 gm_ifp = pim_ifp->mld;
2593
2594 if (js) {
2595 js_if = json_object_new_object();
2596 json_object_object_add(js_vrf, ifp->name, js_if);
2597 } else {
2598 vty_out(vty, "Interface: %s\n", ifp->name);
2599 }
2600 gm_show_stats_one(vty, gm_ifp, js_if);
2601 if (!js)
2602 vty_out(vty, "\n");
2603 }
2604}
2605
2606DEFPY(gm_show_interface_stats,
2607 gm_show_interface_stats_cmd,
2608 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2609 SHOW_STR
2610 IPV6_STR
2611 MLD_STR
2612 VRF_FULL_CMD_HELP_STR
2613 "MLD statistics\n"
2614 INTERFACE_STR
2615 "Interface name\n"
2616 JSON_STR)
2617{
2618 int ret = CMD_SUCCESS;
2619 struct vrf *vrf;
2620 json_object *js = NULL;
2621
2622 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2623 if (ret != CMD_SUCCESS)
2624 return ret;
2625
2626 if (json)
2627 js = json_object_new_object();
2628
2629 if (vrf)
2630 gm_show_stats_vrf(vty, vrf, ifname, js);
2631 else
2632 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2633 gm_show_stats_vrf(vty, vrf, ifname, js);
2634 return vty_json(vty, js);
2635}
2636
2637static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2638 const struct prefix_ipv6 *groups,
2639 const struct prefix_ipv6 *sources, bool detail,
2640 json_object *js_if)
2641{
2642 struct gm_sg *sg, *sg_start;
2643 json_object *js_group = NULL;
2644 pim_addr js_grpaddr = PIMADDR_ANY;
2645 struct gm_subscriber sub_ref = {}, *sub_untracked;
2646
2647 if (groups) {
2648 struct gm_sg sg_ref = {};
2649
2650 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2651 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2652 } else
2653 sg_start = gm_sgs_first(gm_ifp->sgs);
2654
2655 sub_ref.addr = gm_dummy_untracked;
2656 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2657 /* NB: sub_untracked may be NULL if no untracked joins exist */
2658
2659 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2660 struct timeval *recent = NULL, *untracked = NULL;
2661 json_object *js_src;
2662
2663 if (groups) {
2664 struct prefix grp_p;
2665
2666 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2667 if (!prefix_match(groups, &grp_p))
2668 break;
2669 }
2670
2671 if (sources) {
2672 struct prefix src_p;
2673
2674 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2675 if (!prefix_match(sources, &src_p))
2676 continue;
2677 }
2678
2679 if (sg->most_recent) {
2680 struct gm_packet_state *packet;
2681
2682 packet = gm_packet_sg2state(sg->most_recent);
2683 recent = &packet->received;
2684 }
2685
2686 if (sub_untracked) {
2687 struct gm_packet_state *packet;
2688 struct gm_packet_sg *item;
2689
2690 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2691 if (item) {
2692 packet = gm_packet_sg2state(item);
2693 untracked = &packet->received;
2694 }
2695 }
2696
2697 if (!js_if) {
2698 FMT_NSTD_BEGIN; /* %.0p */
2699 vty_out(vty,
2700 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2701 &sg->sgaddr.grp, &sg->sgaddr.src,
2702 gm_states[sg->state], recent, untracked,
2703 &sg->created);
2704
2705 if (!detail)
2706 continue;
2707
2708 struct gm_packet_sg *item;
2709 struct gm_packet_state *packet;
2710
2711 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2712 packet = gm_packet_sg2state(item);
2713
2714 if (packet->subscriber == sub_untracked)
2715 continue;
2716 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2717 &packet->subscriber->addr, "(JOIN)",
2718 &packet->received);
2719 }
2720 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2721 packet = gm_packet_sg2state(item);
2722
2723 if (packet->subscriber == sub_untracked)
2724 continue;
2725 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2726 &packet->subscriber->addr, "(PRUNE)",
2727 &packet->received);
2728 }
2729 FMT_NSTD_END; /* %.0p */
2730 continue;
2731 }
2732 /* if (js_if) */
2733
2734 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2735 js_group = json_object_new_object();
2736 json_object_object_addf(js_if, js_group, "%pPA",
2737 &sg->sgaddr.grp);
2738 js_grpaddr = sg->sgaddr.grp;
2739 }
2740
2741 js_src = json_object_new_object();
2742 json_object_object_addf(js_group, js_src, "%pPA",
2743 &sg->sgaddr.src);
2744
2745 json_object_string_add(js_src, "state", gm_states[sg->state]);
2746 json_object_string_addf(js_src, "created", "%pTVMs",
2747 &sg->created);
2748 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2749
2750 if (untracked)
2751 json_object_string_addf(js_src, "untrackedLastSeen",
2752 "%pTVMs", untracked);
2753 if (!detail)
2754 continue;
2755
2756 json_object *js_subs;
2757 struct gm_packet_sg *item;
2758 struct gm_packet_state *packet;
2759
2760 js_subs = json_object_new_object();
2761 json_object_object_add(js_src, "joinedBy", js_subs);
2762 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2763 packet = gm_packet_sg2state(item);
2764 if (packet->subscriber == sub_untracked)
2765 continue;
2766
2767 json_object *js_sub;
2768
2769 js_sub = json_object_new_object();
2770 json_object_object_addf(js_subs, js_sub, "%pPA",
2771 &packet->subscriber->addr);
2772 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2773 &packet->received);
2774 }
2775
2776 js_subs = json_object_new_object();
2777 json_object_object_add(js_src, "prunedBy", js_subs);
2778 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2779 packet = gm_packet_sg2state(item);
2780 if (packet->subscriber == sub_untracked)
2781 continue;
2782
2783 json_object *js_sub;
2784
2785 js_sub = json_object_new_object();
2786 json_object_object_addf(js_subs, js_sub, "%pPA",
2787 &packet->subscriber->addr);
2788 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2789 &packet->received);
2790 }
2791 }
2792}
2793
2794static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2795 const char *ifname,
2796 const struct prefix_ipv6 *groups,
2797 const struct prefix_ipv6 *sources, bool detail,
2798 json_object *js)
2799{
2800 struct interface *ifp;
2801 json_object *js_vrf;
2802
2803 if (js) {
2804 js_vrf = json_object_new_object();
2805 json_object_object_add(js, vrf->name, js_vrf);
2806 }
2807
2808 FOR_ALL_INTERFACES (vrf, ifp) {
2809 struct pim_interface *pim_ifp;
2810 struct gm_if *gm_ifp;
2811 json_object *js_if = NULL;
2812
2813 if (ifname && strcmp(ifp->name, ifname))
2814 continue;
2815
2816 if (!ifp->info)
2817 continue;
2818 pim_ifp = ifp->info;
2819 if (!pim_ifp->mld)
2820 continue;
2821 gm_ifp = pim_ifp->mld;
2822
2823 if (js) {
2824 js_if = json_object_new_object();
2825 json_object_object_add(js_vrf, ifp->name, js_if);
2826 }
2827
2828 if (!js && !ifname)
2829 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2830
2831 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2832 }
2833}
2834
2835DEFPY(gm_show_interface_joins,
2836 gm_show_interface_joins_cmd,
2837 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2838 SHOW_STR
2839 IPV6_STR
2840 MLD_STR
2841 VRF_FULL_CMD_HELP_STR
2842 "MLD joined groups & sources\n"
2843 INTERFACE_STR
2844 "Interface name\n"
2845 "Limit output to group range\n"
2846 "Show groups covered by this prefix\n"
2847 "Limit output to source range\n"
2848 "Show sources covered by this prefix\n"
2849 "Show details, including tracked receivers\n"
2850 JSON_STR)
2851{
2852 int ret = CMD_SUCCESS;
2853 struct vrf *vrf;
2854 json_object *js = NULL;
2855
2856 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2857 if (ret != CMD_SUCCESS)
2858 return ret;
2859
2860 if (json)
2861 js = json_object_new_object();
2862 else
2863 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2864 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2865
2866 if (vrf)
2867 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2868 js);
2869 else
2870 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2871 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2872 !!detail, js);
2873 return vty_json(vty, js);
2874}
2875
cdc1b770
SG
2876static void gm_show_groups(struct vty *vty, struct vrf *vrf, bool uj)
2877{
2878 struct interface *ifp;
2879 struct ttable *tt = NULL;
2880 char *table;
2881 json_object *json = NULL;
2882 json_object *json_iface = NULL;
2883 json_object *json_group = NULL;
2884 json_object *json_groups = NULL;
2885 struct pim_instance *pim = vrf->info;
2886
2887 if (uj) {
2888 json = json_object_new_object();
2889 json_object_int_add(json, "totalGroups", pim->gm_group_count);
2890 json_object_int_add(json, "watermarkLimit",
2891 pim->gm_watermark_limit);
2892 } else {
2893 /* Prepare table. */
2894 tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
2895 ttable_add_row(tt, "Interface|Group|Version|Uptime");
2896 tt->style.cell.rpad = 2;
2897 tt->style.corner = '+';
2898 ttable_restyle(tt);
2899
2900 vty_out(vty, "Total MLD groups: %u\n", pim->gm_group_count);
2901 vty_out(vty, "Watermark warn limit(%s): %u\n",
2902 pim->gm_watermark_limit ? "Set" : "Not Set",
2903 pim->gm_watermark_limit);
2904 }
2905
2906 /* scan interfaces */
2907 FOR_ALL_INTERFACES (vrf, ifp) {
2908
2909 struct pim_interface *pim_ifp = ifp->info;
2910 struct gm_if *gm_ifp;
2911 struct gm_sg *sg;
2912
2913 if (!pim_ifp)
2914 continue;
2915
2916 gm_ifp = pim_ifp->mld;
2917 if (!gm_ifp)
2918 continue;
2919
2920 /* scan mld groups */
2921 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2922
2923 if (uj) {
2924 json_object_object_get_ex(json, ifp->name,
2925 &json_iface);
2926
2927 if (!json_iface) {
2928 json_iface = json_object_new_object();
2929 json_object_pim_ifp_add(json_iface,
2930 ifp);
2931 json_object_object_add(json, ifp->name,
2932 json_iface);
2933 json_groups = json_object_new_array();
2934 json_object_object_add(json_iface,
2935 "groups",
2936 json_groups);
2937 }
2938
2939 json_group = json_object_new_object();
2940 json_object_string_addf(json_group, "group",
2941 "%pPAs",
2942 &sg->sgaddr.grp);
2943
2944 json_object_int_add(json_group, "version",
2945 pim_ifp->mld_version);
2946 json_object_string_addf(json_group, "uptime",
2947 "%pTVMs", &sg->created);
2948 json_object_array_add(json_groups, json_group);
2949 } else {
2950 ttable_add_row(tt, "%s|%pPAs|%d|%pTVMs",
2951 ifp->name, &sg->sgaddr.grp,
2952 pim_ifp->mld_version,
2953 &sg->created);
2954 }
2955 } /* scan gm groups */
2956 } /* scan interfaces */
2957
2958 if (uj)
2959 vty_json(vty, json);
2960 else {
2961 /* Dump the generated table. */
2962 table = ttable_dump(tt, "\n");
2963 vty_out(vty, "%s\n", table);
2964 XFREE(MTYPE_TMP, table);
2965 ttable_del(tt);
2966 }
2967}
2968
2969DEFPY(gm_show_mld_groups,
2970 gm_show_mld_groups_cmd,
2971 "show ipv6 mld [vrf <VRF|all>$vrf_str] groups [json$json]",
2972 SHOW_STR
2973 IPV6_STR
2974 MLD_STR
2975 VRF_FULL_CMD_HELP_STR
2976 MLD_GROUP_STR
2977 JSON_STR)
2978{
2979 int ret = CMD_SUCCESS;
2980 struct vrf *vrf;
2981
2982 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2983 if (ret != CMD_SUCCESS)
2984 return ret;
2985
2986 if (vrf)
2987 gm_show_groups(vty, vrf, !!json);
2988 else
2989 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2990 gm_show_groups(vty, vrf, !!json);
2991
2992 return CMD_SUCCESS;
2993}
2994
5e5034b0
DL
2995DEFPY(gm_debug_show,
2996 gm_debug_show_cmd,
2997 "debug show mld interface IFNAME",
2998 DEBUG_STR
2999 SHOW_STR
3000 "MLD"
3001 INTERFACE_STR
3002 "interface name")
3003{
3004 struct interface *ifp;
3005 struct pim_interface *pim_ifp;
3006 struct gm_if *gm_ifp;
3007
3008 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
3009 if (!ifp) {
3010 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
3011 return CMD_WARNING;
3012 }
3013
3014 pim_ifp = ifp->info;
3015 if (!pim_ifp) {
3016 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
3017 return CMD_WARNING;
3018 }
3019
3020 gm_ifp = pim_ifp->mld;
3021 if (!gm_ifp) {
3022 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
3023 return CMD_WARNING;
3024 }
3025
3026 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
3027 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
3028 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
3029 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
5e5034b0
DL
3030 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
3031
3032 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
3033 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
3034 int64_t query, expiry;
3035
3036 query = monotime_since(&gm_ifp->pending[i].query, NULL);
3037 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
3038
3039 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
3040 i, query / 1000, expiry / 1000);
3041 }
3042
3043 struct gm_sg *sg;
3044 struct gm_packet_state *pkt;
3045 struct gm_packet_sg *item;
3046 struct gm_subscriber *subscriber;
3047
3048 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
3049 frr_each (gm_sgs, gm_ifp->sgs, sg) {
3050 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
3051 sg->t_sg_expire);
3052
3053 vty_out(vty, "\t @pos:%zu\n",
3054 gm_packet_sg_subs_count(sg->subs_positive));
3055 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
3056 pkt = gm_packet_sg2state(item);
3057
3058 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
3059 item->is_src ? "S" : "",
3060 item->is_excl ? "E" : "",
3061 &pkt->subscriber->addr, pkt->subscriber, pkt,
3062 item->offset);
3063
3064 assert(item->sg == sg);
3065 }
3066 vty_out(vty, "\t @neg:%zu\n",
3067 gm_packet_sg_subs_count(sg->subs_negative));
3068 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
3069 pkt = gm_packet_sg2state(item);
3070
3071 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
3072 item->is_src ? "S" : "",
3073 item->is_excl ? "E" : "",
3074 &pkt->subscriber->addr, pkt->subscriber, pkt,
3075 item->offset);
3076
3077 assert(item->sg == sg);
3078 }
3079 }
3080
3081 vty_out(vty, "\n%zu subscribers:\n",
3082 gm_subscribers_count(gm_ifp->subscribers));
3083 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
3084 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
3085 subscriber, gm_packets_count(subscriber->packets));
3086
3087 frr_each (gm_packets, subscriber->packets, pkt) {
3088 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
3089 pkt,
3090 monotime_since(&pkt->received, NULL) *
3091 0.000001f,
3092 pkt->n_active, pkt->n_sg);
3093
3094 for (size_t i = 0; i < pkt->n_sg; i++) {
3095 item = pkt->items + i;
3096
3097 vty_out(vty, "\t\t[%zu]", i);
3098
3099 if (!item->sg) {
3100 vty_out(vty, " inactive\n");
3101 continue;
3102 }
3103
3104 vty_out(vty, " %s%s %pSG nE=%u\n",
3105 item->is_src ? "S" : "",
3106 item->is_excl ? "E" : "",
3107 &item->sg->sgaddr, item->n_exclude);
3108 }
3109 }
3110 }
3111
3112 return CMD_SUCCESS;
3113}
3114
3115DEFPY(gm_debug_iface_cfg,
3116 gm_debug_iface_cfg_cmd,
3117 "debug ipv6 mld {"
3118 "robustness (0-7)|"
3119 "query-max-response-time (1-8387584)"
3120 "}",
3121 DEBUG_STR
3122 IPV6_STR
3123 "Multicast Listener Discovery\n"
3124 "QRV\nQRV\n"
3125 "maxresp\nmaxresp\n")
3126{
3127 VTY_DECLVAR_CONTEXT(interface, ifp);
3128 struct pim_interface *pim_ifp;
3129 struct gm_if *gm_ifp;
3130 bool changed = false;
3131
3132 pim_ifp = ifp->info;
3133 if (!pim_ifp) {
3134 vty_out(vty, "%% no PIM state for interface %pSQq\n",
3135 ifp->name);
3136 return CMD_WARNING;
3137 }
3138 gm_ifp = pim_ifp->mld;
3139 if (!gm_ifp) {
3140 vty_out(vty, "%% no MLD state for interface %pSQq\n",
3141 ifp->name);
3142 return CMD_WARNING;
3143 }
3144
3145 if (robustness_str && gm_ifp->cur_qrv != robustness) {
3146 gm_ifp->cur_qrv = robustness;
3147 changed = true;
3148 }
3149 if (query_max_response_time_str &&
3150 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
3151 gm_ifp->cur_max_resp = query_max_response_time;
3152 changed = true;
3153 }
3154
3155 if (changed) {
3156 vty_out(vty, "%% MLD querier config changed, bumping\n");
3157 gm_bump_querier(gm_ifp);
3158 }
3159 return CMD_SUCCESS;
3160}
3161
3162void gm_cli_init(void);
3163
3164void gm_cli_init(void)
3165{
d2951219
DL
3166 install_element(VIEW_NODE, &gm_show_interface_cmd);
3167 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
3168 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
cdc1b770 3169 install_element(VIEW_NODE, &gm_show_mld_groups_cmd);
d2951219 3170
5e5034b0
DL
3171 install_element(VIEW_NODE, &gm_debug_show_cmd);
3172 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
3173}