]> git.proxmox.com Git - mirror_frr.git/blame - pimd/pim6_mld.c
pim6d: add some show commands for MLD
[mirror_frr.git] / pimd / pim6_mld.c
CommitLineData
5e5034b0
DL
1/*
2 * PIMv6 MLD querier
3 * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; see the file COPYING; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20/*
21 * keep pim6_mld.h open when working on this code. Most data structures are
22 * commented in the header.
23 *
24 * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
25 * that this code will replace the old IGMP querier at some point.
26 */
27
28#include <zebra.h>
29#include <netinet/ip6.h>
30
31#include "lib/memory.h"
32#include "lib/jhash.h"
33#include "lib/prefix.h"
34#include "lib/checksum.h"
35#include "lib/thread.h"
36
37#include "pimd/pim6_mld.h"
38#include "pimd/pim6_mld_protocol.h"
39#include "pimd/pim_memory.h"
40#include "pimd/pim_instance.h"
41#include "pimd/pim_iface.h"
42#include "pimd/pim_util.h"
43#include "pimd/pim_tib.h"
44#include "pimd/pimd.h"
45
46#ifndef IPV6_MULTICAST_ALL
47#define IPV6_MULTICAST_ALL 29
48#endif
49
50DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
51DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
52DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
53DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
54DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
55DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
56DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
57
58static void gm_t_query(struct thread *t);
59static void gm_trigger_specific(struct gm_sg *sg);
60static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
61 struct timeval expire_wait);
62
63/* shorthand for log messages */
64#define log_ifp(msg) \
65 "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
66#define log_pkt_src(msg) \
67 "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
68 &pkt_src->sin6_addr
69#define log_sg(sg, msg) \
70 "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
71 sg->iface->ifp->name, &sg->sgaddr
72
73/* clang-format off */
74#if PIM_IPV == 6
75static const pim_addr gm_all_hosts = {
76 .s6_addr = {
77 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
79 },
80};
81static const pim_addr gm_all_routers = {
82 .s6_addr = {
83 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
85 },
86};
87/* MLDv1 does not allow subscriber tracking due to report suppression
88 * hence, the source address is replaced with ffff:...:ffff
89 */
90static const pim_addr gm_dummy_untracked = {
91 .s6_addr = {
92 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
93 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
94 },
95};
96#else
97/* 224.0.0.1 */
98static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
99/* 224.0.0.22 */
100static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
101static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
102#endif
103/* clang-format on */
104
105#define IPV6_MULTICAST_SCOPE_LINK 2
106
107static inline uint8_t in6_multicast_scope(const pim_addr *addr)
108{
109 return addr->s6_addr[1] & 0xf;
110}
111
112static inline bool in6_multicast_nofwd(const pim_addr *addr)
113{
114 return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
115}
116
117/*
118 * (S,G) -> subscriber,(S,G)
119 */
120
121static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
122 const struct gm_packet_sg *b)
123{
124 const struct gm_packet_state *s_a, *s_b;
125
126 s_a = gm_packet_sg2state(a);
127 s_b = gm_packet_sg2state(b);
128 return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
129}
130
131DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
132 gm_packet_sg_cmp);
133
134static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
135 enum gm_sub_sense sense,
136 struct gm_subscriber *sub)
137{
138 struct {
139 struct gm_packet_state hdr;
140 struct gm_packet_sg item;
141 } ref = {
142 /* clang-format off */
143 .hdr = {
144 .subscriber = sub,
145 },
146 .item = {
147 .offset = 0,
148 },
149 /* clang-format on */
150 };
151
152 return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
153}
154
155/*
156 * interface -> (*,G),pending
157 */
158
159static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
160 const struct gm_grp_pending *b)
161{
162 return IPV6_ADDR_CMP(&a->grp, &b->grp);
163}
164
165DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
166 gm_grp_pending_cmp);
167
168/*
169 * interface -> ([S1,S2,...],G),pending
170 */
171
172static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
173 const struct gm_gsq_pending *b)
174{
175 if (a->s_bit != b->s_bit)
176 return numcmp(a->s_bit, b->s_bit);
177
178 return IPV6_ADDR_CMP(&a->grp, &b->grp);
179}
180
181static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
182{
183 uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
184
185 return jhash(&a->grp, sizeof(a->grp), seed);
186}
187
188DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
189 gm_gsq_pending_hash);
190
191/*
192 * interface -> (S,G)
193 */
194
195static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
196{
197 return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
198}
199
200DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
201
202static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
203 pim_addr src)
204{
205 struct gm_sg ref = {};
206
207 ref.sgaddr.grp = grp;
208 ref.sgaddr.src = src;
209 return gm_sgs_find(gm_ifp->sgs, &ref);
210}
211
212static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
213 pim_addr src)
214{
215 struct gm_sg *ret, *prev;
216
217 ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
218 ret->sgaddr.grp = grp;
219 ret->sgaddr.src = src;
220 ret->iface = gm_ifp;
221 prev = gm_sgs_add(gm_ifp->sgs, ret);
222
223 if (prev) {
224 XFREE(MTYPE_GM_SG, ret);
225 ret = prev;
226 } else {
aa2f9349 227 monotime(&ret->created);
5e5034b0
DL
228 gm_packet_sg_subs_init(ret->subs_positive);
229 gm_packet_sg_subs_init(ret->subs_negative);
230 }
231 return ret;
232}
233
234/*
235 * interface -> packets, sorted by expiry (because add_tail insert order)
236 */
237
238DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
239
240/*
241 * subscriber -> packets
242 */
243
244DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
245
246/*
247 * interface -> subscriber
248 */
249
250static int gm_subscriber_cmp(const struct gm_subscriber *a,
251 const struct gm_subscriber *b)
252{
253 return IPV6_ADDR_CMP(&a->addr, &b->addr);
254}
255
256static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
257{
258 return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
259}
260
261DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
262 gm_subscriber_hash);
263
264static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
265 pim_addr addr)
266{
267 struct gm_subscriber ref = {}, *ret;
268
269 ref.addr = addr;
270 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
271 if (ret)
272 ret->refcount++;
273 return ret;
274}
275
276static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
277 pim_addr addr)
278{
279 struct gm_subscriber ref = {}, *ret;
280
281 ref.addr = addr;
282 ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
283
284 if (!ret) {
285 ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
286 ret->iface = gm_ifp;
287 ret->addr = addr;
288 ret->refcount = 1;
aa2f9349 289 monotime(&ret->created);
5e5034b0
DL
290 gm_packets_init(ret->packets);
291
292 gm_subscribers_add(gm_ifp->subscribers, ret);
293 }
294 return ret;
295}
296
297static void gm_subscriber_drop(struct gm_subscriber **subp)
298{
299 struct gm_subscriber *sub = *subp;
300 struct gm_if *gm_ifp;
301
302 if (!sub)
303 return;
304 gm_ifp = sub->iface;
305
306 *subp = NULL;
307 sub->refcount--;
308
309 if (sub->refcount)
310 return;
311
312 gm_subscribers_del(gm_ifp->subscribers, sub);
313 XFREE(MTYPE_GM_SUBSCRIBER, sub);
314}
315
316/****************************************************************************/
317
318/* bundle query timer values for combined v1/v2 handling */
319struct gm_query_timers {
320 unsigned int qrv;
321 unsigned int max_resp_ms;
322 unsigned int qqic_ms;
323
324 struct timeval fuzz;
325 struct timeval expire_wait;
326};
327
328static void gm_expiry_calc(struct gm_query_timers *timers)
329{
330 unsigned int expire =
331 (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
332 ldiv_t exp_div = ldiv(expire, 1000);
333
334 timers->expire_wait.tv_sec = exp_div.quot;
335 timers->expire_wait.tv_usec = exp_div.rem * 1000;
336 timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
337}
338
339static void gm_sg_free(struct gm_sg *sg)
340{
341 /* t_sg_expiry is handled before this is reached */
342 THREAD_OFF(sg->t_sg_query);
343 gm_packet_sg_subs_fini(sg->subs_negative);
344 gm_packet_sg_subs_fini(sg->subs_positive);
345 XFREE(MTYPE_GM_SG, sg);
346}
347
348/* clang-format off */
349static const char *const gm_states[] = {
350 [GM_SG_NOINFO] = "NOINFO",
351 [GM_SG_JOIN] = "JOIN",
352 [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
353 [GM_SG_PRUNE] = "PRUNE",
354 [GM_SG_NOPRUNE] = "NOPRUNE",
355 [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
356};
357/* clang-format on */
358
359CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
360/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
361 * joined. Whether we actually want/need to support this is a separate
362 * question - it is almost never used. In fact this is exactly what RFC5790
363 * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
364 */
365
366static void gm_sg_update(struct gm_sg *sg, bool has_expired)
367{
368 struct gm_if *gm_ifp = sg->iface;
369 enum gm_sg_state prev, desired;
370 bool new_join;
371 struct gm_sg *grp = NULL;
372
373 if (!pim_addr_is_any(sg->sgaddr.src))
374 grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
375 else
376 assert(sg->state != GM_SG_PRUNE);
377
378 if (gm_packet_sg_subs_count(sg->subs_positive)) {
379 desired = GM_SG_JOIN;
380 assert(!sg->t_sg_expire);
381 } else if ((sg->state == GM_SG_JOIN ||
382 sg->state == GM_SG_JOIN_EXPIRING) &&
383 !has_expired)
384 desired = GM_SG_JOIN_EXPIRING;
385 else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
386 desired = GM_SG_NOINFO;
387 else if (gm_packet_sg_subs_count(grp->subs_positive) ==
388 gm_packet_sg_subs_count(sg->subs_negative)) {
389 if ((sg->state == GM_SG_NOPRUNE ||
390 sg->state == GM_SG_NOPRUNE_EXPIRING) &&
391 !has_expired)
392 desired = GM_SG_NOPRUNE_EXPIRING;
393 else
394 desired = GM_SG_PRUNE;
395 } else if (gm_packet_sg_subs_count(sg->subs_negative))
396 desired = GM_SG_NOPRUNE;
397 else
398 desired = GM_SG_NOINFO;
399
400 if (desired != sg->state && !gm_ifp->stopping) {
401 if (PIM_DEBUG_IGMP_EVENTS)
402 zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
403 gm_states[desired]);
404
405 if (desired == GM_SG_JOIN_EXPIRING ||
406 desired == GM_SG_NOPRUNE_EXPIRING) {
407 struct gm_query_timers timers;
408
409 timers.qrv = gm_ifp->cur_qrv;
410 timers.max_resp_ms = gm_ifp->cur_max_resp;
411 timers.qqic_ms = gm_ifp->cur_query_intv_trig;
412 timers.fuzz = gm_ifp->cfg_timing_fuzz;
413
414 gm_expiry_calc(&timers);
415 gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
416
417 THREAD_OFF(sg->t_sg_query);
418 sg->n_query = gm_ifp->cur_qrv;
419 sg->query_sbit = false;
420 gm_trigger_specific(sg);
421 }
422 }
423 prev = sg->state;
424 sg->state = desired;
425
426 if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
427 new_join = false;
428 else
429 new_join = gm_sg_state_want_join(desired);
430
431 if (new_join && !sg->tib_joined) {
432 /* this will retry if join previously failed */
433 sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
434 gm_ifp->ifp, &sg->oil);
435 if (!sg->tib_joined)
436 zlog_warn(
437 "MLD join for %pSG%%%s not propagated into TIB",
438 &sg->sgaddr, gm_ifp->ifp->name);
439 else
440 zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
441 gm_ifp->ifp->name);
442
443 } else if (sg->tib_joined && !new_join) {
444 tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
445
446 sg->oil = NULL;
447 sg->tib_joined = false;
448 }
449
450 if (desired == GM_SG_NOINFO) {
451 assertf((!sg->t_sg_expire &&
452 !gm_packet_sg_subs_count(sg->subs_positive) &&
453 !gm_packet_sg_subs_count(sg->subs_negative)),
454 "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
455 &sg->sgaddr, gm_ifp->ifp->name, has_expired,
456 sg->t_sg_expire, gm_states[prev], gm_states[desired],
457 gm_packet_sg_subs_count(sg->subs_positive),
458 gm_packet_sg_subs_count(sg->subs_negative), grp);
459
460 if (PIM_DEBUG_IGMP_TRACE)
461 zlog_debug(log_sg(sg, "dropping"));
462
463 gm_sgs_del(gm_ifp->sgs, sg);
464 gm_sg_free(sg);
465 }
466}
467
468/****************************************************************************/
469
470/* the following bunch of functions deals with transferring state from
471 * received packets into gm_packet_state. As a reminder, the querier is
472 * structured to keep all items received in one packet together, since they
473 * will share expiry timers and thus allows efficient handling.
474 */
475
476static void gm_packet_free(struct gm_packet_state *pkt)
477{
478 gm_packet_expires_del(pkt->iface->expires, pkt);
479 gm_packets_del(pkt->subscriber->packets, pkt);
480 gm_subscriber_drop(&pkt->subscriber);
481 XFREE(MTYPE_GM_STATE, pkt);
482}
483
484static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
485 struct gm_sg *sg, bool is_excl,
486 bool is_src)
487{
488 struct gm_packet_sg *item;
489
490 assert(pkt->n_active < pkt->n_sg);
491
492 item = &pkt->items[pkt->n_active];
493 item->sg = sg;
494 item->is_excl = is_excl;
495 item->is_src = is_src;
496 item->offset = pkt->n_active;
497
498 pkt->n_active++;
499 return item;
500}
501
502static bool gm_packet_sg_drop(struct gm_packet_sg *item)
503{
504 struct gm_packet_state *pkt;
505 size_t i;
506
507 assert(item->sg);
508
509 pkt = gm_packet_sg2state(item);
510 if (item->sg->most_recent == item)
511 item->sg->most_recent = NULL;
512
513 for (i = 0; i < item->n_exclude; i++) {
514 struct gm_packet_sg *excl_item;
515
516 excl_item = item + 1 + i;
517 if (!excl_item->sg)
518 continue;
519
520 gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
521 excl_item->sg = NULL;
522 pkt->n_active--;
523
524 assert(pkt->n_active > 0);
525 }
526
527 if (item->is_excl && item->is_src)
528 gm_packet_sg_subs_del(item->sg->subs_negative, item);
529 else
530 gm_packet_sg_subs_del(item->sg->subs_positive, item);
531 item->sg = NULL;
532 pkt->n_active--;
533
534 if (!pkt->n_active) {
535 gm_packet_free(pkt);
536 return true;
537 }
538 return false;
539}
540
541static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
542{
543 for (size_t i = 0; i < pkt->n_sg; i++) {
544 struct gm_sg *sg = pkt->items[i].sg;
545 bool deleted;
546
547 if (!sg)
548 continue;
549
550 if (trace && PIM_DEBUG_IGMP_TRACE)
551 zlog_debug(log_sg(sg, "general-dropping from %pPA"),
552 &pkt->subscriber->addr);
553 deleted = gm_packet_sg_drop(&pkt->items[i]);
554
555 gm_sg_update(sg, true);
556 if (deleted)
557 break;
558 }
559}
560
561static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
562 struct gm_subscriber *subscriber,
563 pim_addr grp, pim_addr *srcs,
564 size_t n_src, enum gm_sub_sense sense)
565{
566 struct gm_sg *sg;
567 struct gm_packet_sg *old_src;
568 size_t i;
569
570 for (i = 0; i < n_src; i++) {
571 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
572 if (!sg)
573 continue;
574
575 old_src = gm_packet_sg_find(sg, sense, subscriber);
576 if (!old_src)
577 continue;
578
579 gm_packet_sg_drop(old_src);
580 gm_sg_update(sg, false);
581 }
582}
583
584static void gm_sg_expiry_cancel(struct gm_sg *sg)
585{
586 if (sg->t_sg_expire && PIM_DEBUG_IGMP_TRACE)
587 zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
588 THREAD_OFF(sg->t_sg_expire);
589 sg->query_sbit = true;
590}
591
592/* first pass: process all changes resulting in removal of state:
593 * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
594 * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
595 * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
596 * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
597 * note *replacing* state is NOT considered *removing* state here
598 *
599 * everything else is thrown into pkt for creation of state in pass 2
600 */
601static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
602 struct mld_v2_rec_hdr *rechdr)
603{
604 /* NB: pkt->subscriber can be NULL here if the subscriber was not
605 * previously seen!
606 */
607 struct gm_subscriber *subscriber = pkt->subscriber;
608 struct gm_sg *grp;
609 struct gm_packet_sg *old_grp = NULL;
610 struct gm_packet_sg *item;
611 size_t n_src = ntohs(rechdr->n_src);
612 size_t j;
613 bool is_excl = false;
614
615 grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
616 if (grp && subscriber)
617 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
618
619 assert(old_grp == NULL || old_grp->is_excl);
620
621 switch (rechdr->type) {
622 case MLD_RECTYPE_IS_EXCLUDE:
623 case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
624 /* this always replaces or creates state */
625 is_excl = true;
626 if (!grp)
627 grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
628
629 item = gm_packet_sg_setup(pkt, grp, is_excl, false);
630 item->n_exclude = n_src;
631
632 /* [EXCL_INCL_SG_NOTE] referenced below
633 *
634 * in theory, we should drop any S,G that the host may have
635 * previously added in INCLUDE mode. In practice, this is both
636 * incredibly rare and entirely irrelevant. It only makes any
637 * difference if an S,G that the host previously had on the
638 * INCLUDE list is now on the blocked list for EXCLUDE, which
639 * we can cover in processing the S,G list in pass2_excl().
640 *
641 * Other S,G from the host are simply left to expire
642 * "naturally" through general expiry.
643 */
644 break;
645
646 case MLD_RECTYPE_IS_INCLUDE:
647 case MLD_RECTYPE_CHANGE_TO_INCLUDE:
648 if (old_grp) {
649 /* INCLUDE has no *,G state, so old_grp here refers to
650 * previous EXCLUDE => delete it
651 */
652 gm_packet_sg_drop(old_grp);
653 gm_sg_update(grp, false);
654 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
655 }
656 break;
657
658 case MLD_RECTYPE_ALLOW_NEW_SOURCES:
659 if (old_grp) {
660 /* remove S,Gs from EXCLUDE, and then we're done */
661 gm_packet_sg_remove_sources(pkt->iface, subscriber,
662 rechdr->grp, rechdr->srcs,
663 n_src, GM_SUB_NEG);
664 return;
665 }
666 /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
667 * idential to IS_INCLUDE (because the list of sources in
668 * IS_INCLUDE is not exhaustive)
669 */
670 break;
671
672 case MLD_RECTYPE_BLOCK_OLD_SOURCES:
673 if (old_grp) {
674 /* this is intentionally not implemented because it
675 * would be complicated as hell. we only take the list
676 * of blocked sources from full group state records
677 */
678 return;
679 }
680
681 if (subscriber)
682 gm_packet_sg_remove_sources(pkt->iface, subscriber,
683 rechdr->grp, rechdr->srcs,
684 n_src, GM_SUB_POS);
685 return;
686 }
687
688 for (j = 0; j < n_src; j++) {
689 struct gm_sg *sg;
690
691 sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
692 if (!sg)
693 sg = gm_sg_make(pkt->iface, rechdr->grp,
694 rechdr->srcs[j]);
695
696 gm_packet_sg_setup(pkt, sg, is_excl, true);
697 }
698}
699
700/* second pass: creating/updating/refreshing state. All the items from the
701 * received packet have already been thrown into gm_packet_state.
702 */
703
704static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
705{
706 struct gm_packet_sg *item = &pkt->items[i];
707 struct gm_packet_sg *old = NULL;
708 struct gm_sg *sg = item->sg;
709
710 /* EXCLUDE state was already dropped in pass1 */
711 assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
712
713 old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
714 if (old)
715 gm_packet_sg_drop(old);
716
717 pkt->n_active++;
718 gm_packet_sg_subs_add(sg->subs_positive, item);
719
720 sg->most_recent = item;
721 gm_sg_expiry_cancel(sg);
722 gm_sg_update(sg, false);
723}
724
725static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
726{
727 struct gm_packet_sg *item = &pkt->items[offs];
728 struct gm_packet_sg *old_grp, *item_dup;
729 struct gm_sg *sg_grp = item->sg;
730 size_t i;
731
732 old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
733 if (old_grp) {
734 for (i = 0; i < item->n_exclude; i++) {
735 struct gm_packet_sg *item_src, *old_src;
736
737 item_src = &pkt->items[offs + 1 + i];
738 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
739 pkt->subscriber);
740 if (old_src)
741 gm_packet_sg_drop(old_src);
742
743 /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
744 * items left over if the host previously had INCLUDE
745 * mode going. Remove them here if we find any.
746 */
747 old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
748 pkt->subscriber);
749 if (old_src)
750 gm_packet_sg_drop(old_src);
751 }
752
753 /* the previous loop has removed the S,G entries which are
754 * still excluded after this update. So anything left on the
755 * old item was previously excluded but is now included
756 * => need to trigger update on S,G
757 */
758 for (i = 0; i < old_grp->n_exclude; i++) {
759 struct gm_packet_sg *old_src;
760 struct gm_sg *old_sg_src;
761
762 old_src = old_grp + 1 + i;
763 old_sg_src = old_src->sg;
764 if (!old_sg_src)
765 continue;
766
767 gm_packet_sg_drop(old_src);
768 gm_sg_update(old_sg_src, false);
769 }
770
771 gm_packet_sg_drop(old_grp);
772 }
773
774 item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
775 assert(!item_dup);
776 pkt->n_active++;
777
778 sg_grp->most_recent = item;
779 gm_sg_expiry_cancel(sg_grp);
780
781 for (i = 0; i < item->n_exclude; i++) {
782 struct gm_packet_sg *item_src;
783
784 item_src = &pkt->items[offs + 1 + i];
785 item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
786 item_src);
787
788 if (item_dup)
789 item_src->sg = NULL;
790 else {
791 pkt->n_active++;
792 gm_sg_update(item_src->sg, false);
793 }
794 }
795
796 /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
797 * to get lower PIM churn/flapping
798 */
799 gm_sg_update(sg_grp, false);
800}
801
802CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
803/* on receiving a query, we need to update our robustness/query interval to
804 * match, so we correctly process group/source specific queries after last
805 * member leaves
806 */
807
808static void gm_handle_v2_report(struct gm_if *gm_ifp,
809 const struct sockaddr_in6 *pkt_src, char *data,
810 size_t len)
811{
812 struct mld_v2_report_hdr *hdr;
813 size_t i, n_records, max_entries;
814 struct gm_packet_state *pkt;
815
816 if (len < sizeof(*hdr)) {
817 if (PIM_DEBUG_IGMP_PACKETS)
818 zlog_debug(log_pkt_src(
819 "malformed MLDv2 report (truncated header)"));
aa2f9349 820 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
821 return;
822 }
823
aa2f9349
DL
824 /* errors after this may at least partially process the packet */
825 gm_ifp->stats.rx_new_report++;
826
5e5034b0
DL
827 hdr = (struct mld_v2_report_hdr *)data;
828 data += sizeof(*hdr);
829 len -= sizeof(*hdr);
830
831 /* can't have more *,G and S,G items than there is space for ipv6
832 * addresses, so just use this to allocate temporary buffer
833 */
834 max_entries = len / sizeof(pim_addr);
835 pkt = XCALLOC(MTYPE_GM_STATE,
836 offsetof(struct gm_packet_state, items[max_entries]));
837 pkt->n_sg = max_entries;
838 pkt->iface = gm_ifp;
839 pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
840
841 n_records = ntohs(hdr->n_records);
842
843 /* validate & remove state in v2_pass1() */
844 for (i = 0; i < n_records; i++) {
845 struct mld_v2_rec_hdr *rechdr;
846 size_t n_src, record_size;
847
848 if (len < sizeof(*rechdr)) {
849 zlog_warn(log_pkt_src(
850 "malformed MLDv2 report (truncated record header)"));
aa2f9349 851 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
852 break;
853 }
854
855 rechdr = (struct mld_v2_rec_hdr *)data;
856 data += sizeof(*rechdr);
857 len -= sizeof(*rechdr);
858
859 n_src = ntohs(rechdr->n_src);
860 record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
861
862 if (len < record_size) {
863 zlog_warn(log_pkt_src(
864 "malformed MLDv2 report (truncated source list)"));
aa2f9349 865 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
866 break;
867 }
868 if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
869 zlog_warn(
870 log_pkt_src(
871 "malformed MLDv2 report (invalid group %pI6)"),
872 &rechdr->grp);
aa2f9349 873 gm_ifp->stats.rx_trunc_report++;
5e5034b0
DL
874 break;
875 }
876
877 data += record_size;
878 len -= record_size;
879
880 gm_handle_v2_pass1(pkt, rechdr);
881 }
882
883 if (!pkt->n_active) {
884 gm_subscriber_drop(&pkt->subscriber);
885 XFREE(MTYPE_GM_STATE, pkt);
886 return;
887 }
888
889 pkt = XREALLOC(MTYPE_GM_STATE, pkt,
890 offsetof(struct gm_packet_state, items[pkt->n_active]));
891 pkt->n_sg = pkt->n_active;
892 pkt->n_active = 0;
893
894 monotime(&pkt->received);
895 if (!pkt->subscriber)
896 pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
897 gm_packets_add_tail(pkt->subscriber->packets, pkt);
898 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
899
900 for (i = 0; i < pkt->n_sg; i++)
901 if (!pkt->items[i].is_excl)
902 gm_handle_v2_pass2_incl(pkt, i);
903 else {
904 gm_handle_v2_pass2_excl(pkt, i);
905 i += pkt->items[i].n_exclude;
906 }
907
908 if (pkt->n_active == 0)
909 gm_packet_free(pkt);
910}
911
912static void gm_handle_v1_report(struct gm_if *gm_ifp,
913 const struct sockaddr_in6 *pkt_src, char *data,
914 size_t len)
915{
916 struct mld_v1_pkt *hdr;
917 struct gm_packet_state *pkt;
918 struct gm_sg *grp;
919 struct gm_packet_sg *item;
920 size_t max_entries;
921
922 if (len < sizeof(*hdr)) {
923 if (PIM_DEBUG_IGMP_PACKETS)
924 zlog_debug(log_pkt_src(
925 "malformed MLDv1 report (truncated)"));
aa2f9349 926 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
927 return;
928 }
929
aa2f9349
DL
930 gm_ifp->stats.rx_old_report++;
931
5e5034b0
DL
932 hdr = (struct mld_v1_pkt *)data;
933
934 max_entries = 1;
935 pkt = XCALLOC(MTYPE_GM_STATE,
936 offsetof(struct gm_packet_state, items[max_entries]));
937 pkt->n_sg = max_entries;
938 pkt->iface = gm_ifp;
939 pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
940
941 /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
942
943 grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
944 if (!grp)
945 grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
946
947 item = gm_packet_sg_setup(pkt, grp, true, false);
948 item->n_exclude = 0;
949 CPP_NOTICE("set v1-seen timer on grp here");
950
951 /* } */
952
953 /* pass2 will count n_active back up to 1. Also since a v1 report
954 * has exactly 1 group, we can skip the realloc() that v2 needs here.
955 */
956 assert(pkt->n_active == 1);
957 pkt->n_sg = pkt->n_active;
958 pkt->n_active = 0;
959
960 monotime(&pkt->received);
961 if (!pkt->subscriber)
962 pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
963 gm_packets_add_tail(pkt->subscriber->packets, pkt);
964 gm_packet_expires_add_tail(gm_ifp->expires, pkt);
965
966 /* pass2 covers installing state & removing old state; all the v1
967 * compat is handled at this point.
968 *
969 * Note that "old state" may be v2; subscribers will switch from v2
970 * reports to v1 reports when the querier changes from v2 to v1. So,
971 * limiting this to v1 would be wrong.
972 */
973 gm_handle_v2_pass2_excl(pkt, 0);
974
975 if (pkt->n_active == 0)
976 gm_packet_free(pkt);
977}
978
979static void gm_handle_v1_leave(struct gm_if *gm_ifp,
980 const struct sockaddr_in6 *pkt_src, char *data,
981 size_t len)
982{
983 struct mld_v1_pkt *hdr;
984 struct gm_subscriber *subscriber;
985 struct gm_sg *grp;
986 struct gm_packet_sg *old_grp;
987
988 if (len < sizeof(*hdr)) {
989 if (PIM_DEBUG_IGMP_PACKETS)
990 zlog_debug(log_pkt_src(
991 "malformed MLDv1 leave (truncated)"));
aa2f9349 992 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
993 return;
994 }
995
aa2f9349
DL
996 gm_ifp->stats.rx_old_leave++;
997
5e5034b0
DL
998 hdr = (struct mld_v1_pkt *)data;
999
1000 subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
1001 if (!subscriber)
1002 return;
1003
1004 /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
1005
1006 grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
1007 if (grp) {
1008 old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
1009 if (old_grp) {
1010 gm_packet_sg_drop(old_grp);
1011 gm_sg_update(grp, false);
1012 CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
1013 }
1014 }
1015
1016 /* } */
1017
1018 /* nothing more to do here, pass2 is no-op for leaves */
1019 gm_subscriber_drop(&subscriber);
1020}
1021
1022/* for each general query received (or sent), a timer is started to expire
1023 * _everything_ at the appropriate time (including robustness multiplier).
1024 *
1025 * So when this timer hits, all packets - with all of their items - that were
1026 * received *before* the query are aged out, and state updated accordingly.
1027 * Note that when we receive a refresh/update, the previous/old packet is
1028 * already dropped and replaced with a new one, so in normal steady-state
1029 * operation, this timer won't be doing anything.
1030 *
1031 * Additionally, if a subscriber actively leaves a group, that goes through
1032 * its own path too and won't hit this. This is really only triggered when a
1033 * host straight up disappears.
1034 */
1035static void gm_t_expire(struct thread *t)
1036{
1037 struct gm_if *gm_ifp = THREAD_ARG(t);
1038 struct gm_packet_state *pkt;
1039
1040 zlog_info(log_ifp("general expiry timer"));
1041
1042 while (gm_ifp->n_pending) {
1043 struct gm_general_pending *pend = gm_ifp->pending;
1044 struct timeval remain;
1045 int64_t remain_ms;
1046
1047 remain_ms = monotime_until(&pend->expiry, &remain);
1048 if (remain_ms > 0) {
1049 if (PIM_DEBUG_IGMP_EVENTS)
1050 zlog_debug(
1051 log_ifp("next general expiry in %" PRId64 "ms"),
1052 remain_ms / 1000);
1053
1054 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1055 &remain, &gm_ifp->t_expire);
1056 return;
1057 }
1058
1059 while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
1060 if (timercmp(&pkt->received, &pend->query, >=))
1061 break;
1062
1063 if (PIM_DEBUG_IGMP_PACKETS)
1064 zlog_debug(log_ifp("expire packet %p"), pkt);
1065 gm_packet_drop(pkt, true);
1066 }
1067
1068 gm_ifp->n_pending--;
1069 memmove(gm_ifp->pending, gm_ifp->pending + 1,
1070 gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
1071 }
1072
1073 if (PIM_DEBUG_IGMP_EVENTS)
1074 zlog_debug(log_ifp("next general expiry waiting for query"));
1075}
1076
1077/* NB: the receive handlers will also run when sending packets, since we
1078 * receive our own packets back in.
1079 */
1080static void gm_handle_q_general(struct gm_if *gm_ifp,
1081 struct gm_query_timers *timers)
1082{
1083 struct timeval now, expiry;
1084 struct gm_general_pending *pend;
1085
1086 monotime(&now);
1087 timeradd(&now, &timers->expire_wait, &expiry);
1088
1089 while (gm_ifp->n_pending) {
1090 pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
1091
1092 if (timercmp(&pend->expiry, &expiry, <))
1093 break;
1094
1095 /* if we end up here, the last item in pending[] has an expiry
1096 * later than the expiry for this query. But our query time
1097 * (now) is later than that of the item (because, well, that's
1098 * how time works.) This makes this query meaningless since
1099 * it's "supersetted" within the preexisting query
1100 */
1101
1102 if (PIM_DEBUG_IGMP_TRACE_DETAIL)
1103 zlog_debug(
1104 log_ifp("zapping supersetted general timer %pTVMu"),
1105 &pend->expiry);
1106
1107 gm_ifp->n_pending--;
1108 if (!gm_ifp->n_pending)
1109 THREAD_OFF(gm_ifp->t_expire);
1110 }
1111
1112 /* people might be messing with their configs or something */
1113 if (gm_ifp->n_pending == array_size(gm_ifp->pending))
1114 return;
1115
1116 pend = &gm_ifp->pending[gm_ifp->n_pending];
1117 pend->query = now;
1118 pend->expiry = expiry;
1119
1120 if (!gm_ifp->n_pending++) {
1121 if (PIM_DEBUG_IGMP_TRACE)
1122 zlog_debug(
1123 log_ifp("starting general timer @ 0: %pTVMu"),
1124 &pend->expiry);
1125 thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
1126 &timers->expire_wait, &gm_ifp->t_expire);
1127 } else if (PIM_DEBUG_IGMP_TRACE)
1128 zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
1129 gm_ifp->n_pending, &pend->expiry);
1130}
1131
1132static void gm_t_sg_expire(struct thread *t)
1133{
1134 struct gm_sg *sg = THREAD_ARG(t);
1135 struct gm_if *gm_ifp = sg->iface;
1136 struct gm_packet_sg *item;
1137
1138 assertf(sg->state == GM_SG_JOIN_EXPIRING ||
1139 sg->state == GM_SG_NOPRUNE_EXPIRING,
1140 "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
1141
1142 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1143 /* this will also drop EXCLUDE mode S,G lists together with
1144 * the *,G entry
1145 */
1146 gm_packet_sg_drop(item);
1147
1148 /* subs_negative items are only timed out together with the *,G entry
1149 * since we won't get any reports for a group-and-source query
1150 */
1151 gm_sg_update(sg, true);
1152}
1153
1154static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
1155 struct timeval ref)
1156{
1157 struct gm_packet_state *pkt;
1158
1159 if (!sg->most_recent) {
1160 struct gm_packet_state *best_pkt = NULL;
1161 struct gm_packet_sg *item;
1162
1163 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
1164 pkt = gm_packet_sg2state(item);
1165
1166 if (!best_pkt ||
1167 timercmp(&pkt->received, &best_pkt->received, >)) {
1168 best_pkt = pkt;
1169 sg->most_recent = item;
1170 }
1171 }
1172 }
1173 if (sg->most_recent) {
1174 struct timeval fuzz;
1175
1176 pkt = gm_packet_sg2state(sg->most_recent);
1177
1178 /* this shouldn't happen on plain old real ethernet segment,
1179 * but on something like a VXLAN or VPLS it is very possible
1180 * that we get a report before the query that triggered it.
1181 * (imagine a triangle scenario with 3 datacenters, it's very
1182 * possible A->B + B->C is faster than A->C due to odd routing)
1183 *
1184 * This makes a little tolerance allowance to handle that case.
1185 */
1186 timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
1187
1188 if (timercmp(&fuzz, &ref, >))
1189 return true;
1190 }
1191 return false;
1192}
1193
1194static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
1195 struct timeval expire_wait)
1196{
1197 struct timeval now;
1198
1199 if (!sg)
1200 return;
1201 if (sg->state == GM_SG_PRUNE)
1202 return;
1203
1204 monotime(&now);
1205 if (gm_sg_check_recent(gm_ifp, sg, now))
1206 return;
1207
1208 if (PIM_DEBUG_IGMP_TRACE)
1209 zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
1210
1211 if (sg->t_sg_expire) {
1212 struct timeval remain;
1213
1214 remain = thread_timer_remain(sg->t_sg_expire);
1215 if (timercmp(&remain, &expire_wait, <=))
1216 return;
1217
1218 THREAD_OFF(sg->t_sg_expire);
1219 }
1220
1221 thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
1222 &sg->t_sg_expire);
1223}
1224
1225static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
1226 struct gm_query_timers *timers, pim_addr grp,
1227 const pim_addr *srcs, size_t n_src)
1228{
1229 struct gm_sg *sg;
1230 size_t i;
1231
1232 for (i = 0; i < n_src; i++) {
1233 sg = gm_sg_find(gm_ifp, grp, srcs[i]);
1234 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1235 }
1236}
1237
1238static void gm_t_grp_expire(struct thread *t)
1239{
1240 /* if we're here, that means when we received the group-specific query
1241 * there was one or more active S,G for this group. For *,G the timer
1242 * in sg->t_sg_expire is running separately and gets cancelled when we
1243 * receive a report, so that work is left to gm_t_sg_expire and we
1244 * shouldn't worry about it here.
1245 */
1246 struct gm_grp_pending *pend = THREAD_ARG(t);
1247 struct gm_if *gm_ifp = pend->iface;
1248 struct gm_sg *sg, *sg_start, sg_ref = {};
1249
1250 if (PIM_DEBUG_IGMP_EVENTS)
1251 zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
1252
1253 /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
1254 * could technically be gt to skip a possible *,G
1255 */
1256 sg_ref.sgaddr.grp = pend->grp;
1257 sg_ref.sgaddr.src = PIMADDR_ANY;
1258 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1259
1260 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
1261 struct gm_packet_sg *item;
1262
1263 if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
1264 break;
1265 if (pim_addr_is_any(sg->sgaddr.src))
1266 /* handled by gm_t_sg_expire / sg->t_sg_expire */
1267 continue;
1268 if (gm_sg_check_recent(gm_ifp, sg, pend->query))
1269 continue;
1270
1271 /* we may also have a group-source-specific query going on in
1272 * parallel. But if we received nothing for the *,G query,
1273 * the S,G query is kinda irrelevant.
1274 */
1275 THREAD_OFF(sg->t_sg_expire);
1276
1277 frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
1278 /* this will also drop the EXCLUDE S,G lists */
1279 gm_packet_sg_drop(item);
1280
1281 gm_sg_update(sg, true);
1282 }
1283
1284 gm_grp_pends_del(gm_ifp->grp_pends, pend);
1285 XFREE(MTYPE_GM_GRP_PENDING, pend);
1286}
1287
1288static void gm_handle_q_group(struct gm_if *gm_ifp,
1289 struct gm_query_timers *timers, pim_addr grp)
1290{
1291 struct gm_sg *sg, sg_ref = {};
1292 struct gm_grp_pending *pend, pend_ref = {};
1293
1294 sg_ref.sgaddr.grp = grp;
1295 sg_ref.sgaddr.src = PIMADDR_ANY;
1296 /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
1297 sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
1298
1299 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1300 /* we have nothing at all for this group - don't waste RAM */
1301 return;
1302
1303 if (pim_addr_is_any(sg->sgaddr.src)) {
1304 /* actually found *,G entry here */
1305 if (PIM_DEBUG_IGMP_TRACE)
1306 zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
1307 &grp);
1308 gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
1309
1310 sg = gm_sgs_next(gm_ifp->sgs, sg);
1311 if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
1312 /* no S,G for this group */
1313 return;
1314 }
1315
1316 pend_ref.grp = grp;
1317 pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
1318
1319 if (pend) {
1320 struct timeval remain;
1321
1322 remain = thread_timer_remain(pend->t_expire);
1323 if (timercmp(&remain, &timers->expire_wait, <=))
1324 return;
1325
1326 THREAD_OFF(pend->t_expire);
1327 } else {
1328 pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
1329 pend->grp = grp;
1330 pend->iface = gm_ifp;
1331 gm_grp_pends_add(gm_ifp->grp_pends, pend);
1332 }
1333
1334 monotime(&pend->query);
1335 thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
1336 &timers->expire_wait, &pend->t_expire);
1337
1338 if (PIM_DEBUG_IGMP_TRACE)
1339 zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
1340 pend->t_expire);
1341}
1342
1343static void gm_bump_querier(struct gm_if *gm_ifp)
1344{
1345 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1346
1347 THREAD_OFF(gm_ifp->t_query);
1348
1349 if (pim_addr_is_any(pim_ifp->ll_lowest))
1350 return;
1351 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1352 return;
1353
1354 gm_ifp->n_startup = gm_ifp->cur_qrv;
1355
1356 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1357}
1358
1359static void gm_t_other_querier(struct thread *t)
1360{
1361 struct gm_if *gm_ifp = THREAD_ARG(t);
1362 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1363
1364 zlog_info(log_ifp("other querier timer expired"));
1365
1366 gm_ifp->querier = pim_ifp->ll_lowest;
1367 gm_ifp->n_startup = gm_ifp->cur_qrv;
1368
1369 thread_execute(router->master, gm_t_query, gm_ifp, 0);
1370}
1371
1372static void gm_handle_query(struct gm_if *gm_ifp,
1373 const struct sockaddr_in6 *pkt_src,
1374 pim_addr *pkt_dst, char *data, size_t len)
1375{
1376 struct mld_v2_query_hdr *hdr;
1377 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1378 struct gm_query_timers timers;
1379 bool general_query;
1380
1381 if (len < sizeof(struct mld_v2_query_hdr) &&
1382 len != sizeof(struct mld_v1_pkt)) {
1383 zlog_warn(log_pkt_src("invalid query size"));
aa2f9349 1384 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1385 return;
1386 }
1387
1388 hdr = (struct mld_v2_query_hdr *)data;
1389 general_query = pim_addr_is_any(hdr->grp);
1390
1391 if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
1392 zlog_warn(log_pkt_src(
1393 "malformed MLDv2 query (invalid group %pI6)"),
1394 &hdr->grp);
aa2f9349 1395 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1396 return;
1397 }
1398
1399 if (len >= sizeof(struct mld_v2_query_hdr)) {
1400 size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
1401
1402 if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
1403 zlog_warn(log_pkt_src(
1404 "malformed MLDv2 query (truncated source list)"));
aa2f9349 1405 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1406 return;
1407 }
1408
1409 if (general_query && src_space) {
1410 zlog_warn(log_pkt_src(
1411 "malformed MLDv2 query (general query with non-empty source list)"));
aa2f9349 1412 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1413 return;
1414 }
1415 }
1416
1417 /* accepting queries unicast to us (or addressed to a wrong group)
1418 * can mess up querier election as well as cause us to terminate
1419 * traffic (since after a unicast query no reports will be coming in)
1420 */
1421 if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
1422 if (pim_addr_is_any(hdr->grp)) {
1423 zlog_warn(
1424 log_pkt_src(
1425 "wrong destination %pPA for general query"),
1426 pkt_dst);
aa2f9349 1427 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1428 return;
1429 }
1430
1431 if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
aa2f9349 1432 gm_ifp->stats.rx_drop_dstaddr++;
5e5034b0
DL
1433 zlog_warn(
1434 log_pkt_src(
1435 "wrong destination %pPA for group specific query"),
1436 pkt_dst);
1437 return;
1438 }
1439 }
1440
1441 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
1442 if (PIM_DEBUG_IGMP_EVENTS)
1443 zlog_debug(
1444 log_pkt_src("replacing elected querier %pPA"),
1445 &gm_ifp->querier);
1446
1447 gm_ifp->querier = pkt_src->sin6_addr;
1448 }
1449
1450 if (len == sizeof(struct mld_v1_pkt)) {
1451 timers.qrv = gm_ifp->cur_qrv;
1452 timers.max_resp_ms = hdr->max_resp_code;
1453 timers.qqic_ms = gm_ifp->cur_query_intv;
1454 } else {
1455 timers.qrv = (hdr->flags & 0x7) ?: 8;
1456 timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
1457 timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
1458 }
1459 timers.fuzz = gm_ifp->cfg_timing_fuzz;
1460
1461 gm_expiry_calc(&timers);
1462
1463 if (PIM_DEBUG_IGMP_TRACE_DETAIL)
1464 zlog_debug(
1465 log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
1466 timers.qrv, timers.max_resp_ms, timers.qqic_ms,
1467 &timers.expire_wait);
1468
1469 if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
1470 unsigned int other_ms;
1471
1472 THREAD_OFF(gm_ifp->t_query);
1473 THREAD_OFF(gm_ifp->t_other_querier);
1474
1475 other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
1476 thread_add_timer_msec(router->master, gm_t_other_querier,
1477 gm_ifp, other_ms,
1478 &gm_ifp->t_other_querier);
1479 }
1480
1481 if (len == sizeof(struct mld_v1_pkt)) {
aa2f9349 1482 if (general_query) {
5e5034b0 1483 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1484 gm_ifp->stats.rx_query_old_general++;
1485 } else {
5e5034b0 1486 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1487 gm_ifp->stats.rx_query_old_group++;
1488 }
5e5034b0
DL
1489 return;
1490 }
1491
1492 /* v2 query - [S]uppress bit */
aa2f9349
DL
1493 if (hdr->flags & 0x8) {
1494 gm_ifp->stats.rx_query_new_sbit++;
5e5034b0 1495 return;
aa2f9349 1496 }
5e5034b0 1497
aa2f9349 1498 if (general_query) {
5e5034b0 1499 gm_handle_q_general(gm_ifp, &timers);
aa2f9349
DL
1500 gm_ifp->stats.rx_query_new_general++;
1501 } else if (!ntohs(hdr->n_src)) {
5e5034b0 1502 gm_handle_q_group(gm_ifp, &timers, hdr->grp);
aa2f9349
DL
1503 gm_ifp->stats.rx_query_new_group++;
1504 } else {
5e5034b0
DL
1505 gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
1506 ntohs(hdr->n_src));
aa2f9349
DL
1507 gm_ifp->stats.rx_query_new_groupsrc++;
1508 }
5e5034b0
DL
1509}
1510
1511static void gm_rx_process(struct gm_if *gm_ifp,
1512 const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
1513 void *data, size_t pktlen)
1514{
1515 struct icmp6_plain_hdr *icmp6 = data;
1516 uint16_t pkt_csum, ref_csum;
1517 struct ipv6_ph ph6 = {
1518 .src = pkt_src->sin6_addr,
1519 .dst = *pkt_dst,
1520 .ulpl = htons(pktlen),
1521 .next_hdr = IPPROTO_ICMPV6,
1522 };
1523
1524 pkt_csum = icmp6->icmp6_cksum;
1525 icmp6->icmp6_cksum = 0;
1526 ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
1527
1528 if (pkt_csum != ref_csum) {
1529 zlog_warn(
1530 log_pkt_src(
1531 "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
1532 pkt_dst, pkt_csum, ref_csum);
aa2f9349 1533 gm_ifp->stats.rx_drop_csum++;
5e5034b0
DL
1534 return;
1535 }
1536
1537 data = (icmp6 + 1);
1538 pktlen -= sizeof(*icmp6);
1539
1540 switch (icmp6->icmp6_type) {
1541 case ICMP6_MLD_QUERY:
1542 gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
1543 break;
1544 case ICMP6_MLD_V1_REPORT:
1545 gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
1546 break;
1547 case ICMP6_MLD_V1_DONE:
1548 gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
1549 break;
1550 case ICMP6_MLD_V2_REPORT:
1551 gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
1552 break;
1553 }
1554}
1555
1556static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
1557 uint16_t alert_type)
1558{
1559 uint8_t *hopopt_end;
1560
1561 if (hopopt_len < 8)
1562 return false;
1563 if (hopopt_len < (hopopts[1] + 1U) * 8U)
1564 return false;
1565
1566 hopopt_end = hopopts + (hopopts[1] + 1) * 8;
1567 hopopts += 2;
1568
1569 while (hopopts < hopopt_end) {
1570 if (hopopts[0] == IP6OPT_PAD1) {
1571 hopopts++;
1572 continue;
1573 }
1574
1575 if (hopopts > hopopt_end - 2)
1576 break;
1577 if (hopopts > hopopt_end - 2 - hopopts[1])
1578 break;
1579
1580 if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
1581 uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
1582
1583 if (have_type == alert_type)
1584 return true;
1585 }
1586
1587 hopopts += 2 + hopopts[1];
1588 }
1589 return false;
1590}
1591
1592static void gm_t_recv(struct thread *t)
1593{
1594 struct gm_if *gm_ifp = THREAD_ARG(t);
1595 union {
1596 char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
1597 CMSG_SPACE(256) /* hop options */ +
1598 CMSG_SPACE(sizeof(int)) /* hopcount */];
1599 struct cmsghdr align;
1600 } cmsgbuf;
1601 struct cmsghdr *cmsg;
1602 struct in6_pktinfo *pktinfo = NULL;
1603 uint8_t *hopopts = NULL;
1604 size_t hopopt_len = 0;
1605 int *hoplimit = NULL;
1606 char rxbuf[2048];
1607 struct msghdr mh[1] = {};
1608 struct iovec iov[1];
1609 struct sockaddr_in6 pkt_src[1];
1610 ssize_t nread;
1611 size_t pktlen;
1612
1613 thread_add_read(router->master, gm_t_recv, gm_ifp, gm_ifp->sock,
1614 &gm_ifp->t_recv);
1615
1616 iov->iov_base = rxbuf;
1617 iov->iov_len = sizeof(rxbuf);
1618
1619 mh->msg_name = pkt_src;
1620 mh->msg_namelen = sizeof(pkt_src);
1621 mh->msg_control = cmsgbuf.buf;
1622 mh->msg_controllen = sizeof(cmsgbuf.buf);
1623 mh->msg_iov = iov;
1624 mh->msg_iovlen = array_size(iov);
1625 mh->msg_flags = 0;
1626
1627 nread = recvmsg(gm_ifp->sock, mh, MSG_PEEK | MSG_TRUNC);
1628 if (nread <= 0) {
1629 zlog_err(log_ifp("RX error: %m"));
aa2f9349 1630 gm_ifp->stats.rx_drop_sys++;
5e5034b0
DL
1631 return;
1632 }
1633
1634 if ((size_t)nread > sizeof(rxbuf)) {
1635 iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
1636 iov->iov_len = nread;
1637 }
1638 nread = recvmsg(gm_ifp->sock, mh, 0);
1639 if (nread <= 0) {
1640 zlog_err(log_ifp("RX error: %m"));
aa2f9349 1641 gm_ifp->stats.rx_drop_sys++;
5e5034b0
DL
1642 goto out_free;
1643 }
1644
1645 if ((int)pkt_src->sin6_scope_id != gm_ifp->ifp->ifindex)
1646 goto out_free;
1647
1648 for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
1649 if (cmsg->cmsg_level != SOL_IPV6)
1650 continue;
1651
1652 switch (cmsg->cmsg_type) {
1653 case IPV6_PKTINFO:
1654 pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
1655 break;
1656 case IPV6_HOPOPTS:
1657 hopopts = CMSG_DATA(cmsg);
1658 hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
1659 break;
1660 case IPV6_HOPLIMIT:
1661 hoplimit = (int *)CMSG_DATA(cmsg);
1662 break;
1663 }
1664 }
1665
1666 if (!pktinfo || !hoplimit) {
1667 zlog_err(log_ifp(
1668 "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
aa2f9349 1669 gm_ifp->stats.rx_drop_sys++;
5e5034b0
DL
1670 goto out_free;
1671 }
1672
1673 if (*hoplimit != 1) {
1674 zlog_err(log_pkt_src("packet with hop limit != 1"));
aa2f9349
DL
1675 /* spoofing attempt => count on srcaddr counter */
1676 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1677 goto out_free;
1678 }
1679
1680 if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
1681 zlog_err(log_pkt_src(
1682 "packet without IPv6 Router Alert MLD option"));
aa2f9349 1683 gm_ifp->stats.rx_drop_ra++;
5e5034b0
DL
1684 goto out_free;
1685 }
1686
1687 if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
1688 /* reports from :: happen in normal operation for DAD, so
1689 * don't spam log messages about this
1690 */
1691 goto out_free;
1692
1693 if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
1694 zlog_warn(log_pkt_src("packet from invalid source address"));
aa2f9349 1695 gm_ifp->stats.rx_drop_srcaddr++;
5e5034b0
DL
1696 goto out_free;
1697 }
1698
1699 pktlen = nread;
1700 if (pktlen < sizeof(struct icmp6_plain_hdr)) {
1701 zlog_warn(log_pkt_src("truncated packet"));
aa2f9349 1702 gm_ifp->stats.rx_drop_malformed++;
5e5034b0
DL
1703 goto out_free;
1704 }
1705
1706 gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
1707 pktlen);
1708
1709out_free:
1710 if (iov->iov_base != rxbuf)
1711 XFREE(MTYPE_GM_PACKET, iov->iov_base);
1712}
1713
1714static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
1715 const pim_addr *srcs, size_t n_srcs, bool s_bit)
1716{
1717 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1718 struct sockaddr_in6 dstaddr = {
1719 .sin6_family = AF_INET6,
1720 .sin6_scope_id = gm_ifp->ifp->ifindex,
1721 };
1722 struct {
1723 struct icmp6_plain_hdr hdr;
1724 struct mld_v2_query_hdr v2_query;
1725 } query = {
1726 /* clang-format off */
1727 .hdr = {
1728 .icmp6_type = ICMP6_MLD_QUERY,
1729 .icmp6_code = 0,
1730 },
1731 .v2_query = {
1732 .grp = grp,
1733 },
1734 /* clang-format on */
1735 };
1736 struct ipv6_ph ph6 = {
1737 .src = pim_ifp->ll_lowest,
1738 .ulpl = htons(sizeof(query)),
1739 .next_hdr = IPPROTO_ICMPV6,
1740 };
1741 union {
1742 char buf[CMSG_SPACE(8)];
1743 struct cmsghdr align;
1744 } cmsg = {};
1745 struct cmsghdr *cmh;
1746 struct msghdr mh[1] = {};
1747 struct iovec iov[3];
1748 size_t iov_len;
1749 ssize_t ret, expect_ret;
1750 uint8_t *dp;
1751
1752 if (if_is_loopback(gm_ifp->ifp)) {
1753 /* Linux is a bit odd with multicast on loopback */
1754 ph6.src = in6addr_loopback;
1755 dstaddr.sin6_addr = in6addr_loopback;
1756 } else if (pim_addr_is_any(grp))
1757 dstaddr.sin6_addr = gm_all_hosts;
1758 else
1759 dstaddr.sin6_addr = grp;
1760
1761 query.v2_query.max_resp_code =
1762 mld_max_resp_encode(gm_ifp->cur_max_resp);
1763 query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
1764 if (s_bit)
1765 query.v2_query.flags |= 0x08;
1766 query.v2_query.qqic =
1767 igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
1768 query.v2_query.n_src = htons(n_srcs);
1769
1770 ph6.dst = dstaddr.sin6_addr;
1771
1772 /* ph6 not included in sendmsg */
1773 iov[0].iov_base = &ph6;
1774 iov[0].iov_len = sizeof(ph6);
1775 iov[1].iov_base = &query;
1776 if (gm_ifp->cur_version == GM_MLDV1) {
1777 iov_len = 2;
1778 iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
1779 } else if (!n_srcs) {
1780 iov_len = 2;
1781 iov[1].iov_len = sizeof(query);
1782 } else {
1783 iov[1].iov_len = sizeof(query);
1784 iov[2].iov_base = (void *)srcs;
1785 iov[2].iov_len = n_srcs * sizeof(srcs[0]);
1786 iov_len = 3;
1787 }
1788
1789 query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
1790
1791 if (PIM_DEBUG_IGMP_PACKETS)
1792 zlog_debug(
1793 log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
1794 &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
1795
1796 mh->msg_name = &dstaddr;
1797 mh->msg_namelen = sizeof(dstaddr);
1798 mh->msg_iov = iov + 1;
1799 mh->msg_iovlen = iov_len - 1;
1800 mh->msg_control = &cmsg;
1801 mh->msg_controllen = sizeof(cmsg.buf);
1802 cmh = CMSG_FIRSTHDR(mh);
1803 cmh->cmsg_level = IPPROTO_IPV6;
1804 cmh->cmsg_type = IPV6_HOPOPTS;
1805 cmh->cmsg_len = CMSG_LEN(8);
1806 dp = CMSG_DATA(cmh);
1807 *dp++ = 0; /* next header */
1808 *dp++ = 0; /* length (8-byte blocks, minus 1) */
1809 *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
1810 *dp++ = 2; /* length */
1811 *dp++ = 0; /* value (2 bytes) */
1812 *dp++ = 0; /* value (2 bytes) (0 = MLD) */
1813 *dp++ = 0; /* pad0 */
1814 *dp++ = 0; /* pad0 */
1815
1816 expect_ret = iov[1].iov_len;
1817 if (iov_len == 3)
1818 expect_ret += iov[2].iov_len;
1819
1820 frr_with_privs (&pimd_privs) {
1821 ret = sendmsg(gm_ifp->sock, mh, 0);
1822 }
1823
aa2f9349 1824 if (ret != expect_ret) {
5e5034b0 1825 zlog_warn(log_ifp("failed to send query: %m"));
aa2f9349
DL
1826 gm_ifp->stats.tx_query_fail++;
1827 } else {
1828 if (gm_ifp->cur_version == GM_MLDV1) {
1829 if (pim_addr_is_any(grp))
1830 gm_ifp->stats.tx_query_old_general++;
1831 else
1832 gm_ifp->stats.tx_query_old_group++;
1833 } else {
1834 if (pim_addr_is_any(grp))
1835 gm_ifp->stats.tx_query_new_general++;
1836 else if (!n_srcs)
1837 gm_ifp->stats.tx_query_new_group++;
1838 else
1839 gm_ifp->stats.tx_query_new_groupsrc++;
1840 }
1841 }
5e5034b0
DL
1842}
1843
1844static void gm_t_query(struct thread *t)
1845{
1846 struct gm_if *gm_ifp = THREAD_ARG(t);
1847 unsigned int timer_ms = gm_ifp->cur_query_intv;
1848
1849 if (gm_ifp->n_startup) {
1850 timer_ms /= 4;
1851 gm_ifp->n_startup--;
1852 }
1853
1854 thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
1855 &gm_ifp->t_query);
1856
1857 gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
1858}
1859
1860static void gm_t_sg_query(struct thread *t)
1861{
1862 struct gm_sg *sg = THREAD_ARG(t);
1863
1864 gm_trigger_specific(sg);
1865}
1866
1867/* S,G specific queries (triggered by a member leaving) get a little slack
1868 * time so we can bundle queries for [S1,S2,S3,...],G into the same query
1869 */
1870static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
1871{
1872 struct gm_if *gm_ifp = pend_gsq->iface;
1873
1874 gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
1875 pend_gsq->s_bit);
1876
1877 gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
1878 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
1879}
1880
1881static void gm_t_gsq_pend(struct thread *t)
1882{
1883 struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
1884
1885 gm_send_specific(pend_gsq);
1886}
1887
1888static void gm_trigger_specific(struct gm_sg *sg)
1889{
1890 struct gm_if *gm_ifp = sg->iface;
1891 struct pim_interface *pim_ifp = gm_ifp->ifp->info;
1892 struct gm_gsq_pending *pend_gsq, ref = {};
1893
1894 sg->n_query--;
1895 if (sg->n_query)
1896 thread_add_timer_msec(router->master, gm_t_sg_query, sg,
1897 gm_ifp->cur_query_intv_trig,
1898 &sg->t_sg_query);
1899
1900 if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
1901 return;
1902 if (gm_ifp->sock == -1)
1903 return;
1904
1905 if (PIM_DEBUG_IGMP_TRACE)
1906 zlog_debug(log_sg(sg, "triggered query"));
1907
1908 if (pim_addr_is_any(sg->sgaddr.src)) {
1909 gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
1910 return;
1911 }
1912
1913 ref.grp = sg->sgaddr.grp;
1914 ref.s_bit = sg->query_sbit;
1915
1916 pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
1917 if (!pend_gsq) {
1918 pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
1919 pend_gsq->grp = sg->sgaddr.grp;
1920 pend_gsq->s_bit = sg->query_sbit;
1921 pend_gsq->iface = gm_ifp;
1922 gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
1923
1924 thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
1925 &gm_ifp->cfg_timing_fuzz,
1926 &pend_gsq->t_send);
1927 }
1928
1929 assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
1930
1931 pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
1932 pend_gsq->n_src++;
1933
1934 if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
1935 THREAD_OFF(pend_gsq->t_send);
1936 gm_send_specific(pend_gsq);
1937 pend_gsq = NULL;
1938 }
1939}
1940
1941static void gm_start(struct interface *ifp)
1942{
1943 struct pim_interface *pim_ifp = ifp->info;
1944 struct gm_if *gm_ifp;
1945 int ret, intval;
1946 struct icmp6_filter filter[1];
1947
1948 assert(pim_ifp);
1949 assert(pim_ifp->pim);
1950 assert(pim_ifp->mroute_vif_index >= 0);
1951 assert(!pim_ifp->mld);
1952
1953 gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
1954 gm_ifp->ifp = ifp;
1955 pim_ifp->mld = gm_ifp;
1956 gm_ifp->pim = pim_ifp->pim;
aa2f9349 1957 monotime(&gm_ifp->started);
5e5034b0
DL
1958
1959 zlog_info(log_ifp("starting MLD"));
1960
1961 if (pim_ifp->mld_version == 1)
1962 gm_ifp->cur_version = GM_MLDV1;
1963 else
1964 gm_ifp->cur_version = GM_MLDV2;
1965
1966 /* hardcoded for dev without CLI */
1967 gm_ifp->cur_qrv = 2;
1968 gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
1969 gm_ifp->cur_query_intv_trig = gm_ifp->cur_query_intv;
1970 gm_ifp->cur_max_resp = 250;
1971
1972 gm_ifp->cfg_timing_fuzz.tv_sec = 0;
1973 gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
1974
1975 gm_sgs_init(gm_ifp->sgs);
1976 gm_subscribers_init(gm_ifp->subscribers);
1977 gm_packet_expires_init(gm_ifp->expires);
1978 gm_grp_pends_init(gm_ifp->grp_pends);
1979 gm_gsq_pends_init(gm_ifp->gsq_pends);
1980
1981 ICMP6_FILTER_SETBLOCKALL(filter);
1982 ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
1983 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
1984 ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
1985 ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
1986
1987 frr_with_privs (&pimd_privs) {
1988 gm_ifp->sock = socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6);
1989 if (gm_ifp->sock < 0) {
1990 zlog_err("(%s) could not create MLD socket: %m",
1991 ifp->name);
1992 return;
1993 }
1994
1995 ret = setsockopt(gm_ifp->sock, SOL_ICMPV6, ICMP6_FILTER, filter,
1996 sizeof(filter));
1997 if (ret)
1998 zlog_err("(%s) failed to set ICMP6_FILTER: %m",
1999 ifp->name);
2000
2001 intval = 1;
2002 ret = setsockopt(gm_ifp->sock, SOL_IPV6, IPV6_RECVPKTINFO,
2003 &intval, sizeof(intval));
2004 if (ret)
2005 zlog_err("(%s) failed to set IPV6_RECVPKTINFO: %m",
2006 ifp->name);
2007
2008 intval = 1;
2009 ret = setsockopt(gm_ifp->sock, SOL_IPV6, IPV6_RECVHOPOPTS,
2010 &intval, sizeof(intval));
2011 if (ret)
2012 zlog_err("(%s) failed to set IPV6_HOPOPTS: %m",
2013 ifp->name);
2014
2015 intval = 1;
2016 ret = setsockopt(gm_ifp->sock, SOL_IPV6, IPV6_RECVHOPLIMIT,
2017 &intval, sizeof(intval));
2018 if (ret)
2019 zlog_err("(%s) failed to set IPV6_HOPLIMIT: %m",
2020 ifp->name);
2021
2022 intval = 1;
2023 ret = setsockopt(gm_ifp->sock, SOL_IPV6, IPV6_MULTICAST_LOOP,
2024 &intval, sizeof(intval));
2025 if (ret)
2026 zlog_err(
2027 "(%s) failed to disable IPV6_MULTICAST_LOOP: %m",
2028 ifp->name);
2029
2030 intval = 1;
2031 ret = setsockopt(gm_ifp->sock, SOL_IPV6, IPV6_MULTICAST_HOPS,
2032 &intval, sizeof(intval));
2033 if (ret)
2034 zlog_err("(%s) failed to set IPV6_MULTICAST_HOPS: %m",
2035 ifp->name);
2036
2037 /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
2038 * RX filtering in Linux. It only means "receive all groups
2039 * that something on the system has joined". To actually
2040 * receive *all* MLD packets - which is what we need -
2041 * multicast routing must be enabled on the interface. And
2042 * this only works for MLD packets specifically.
2043 *
2044 * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
2045 * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
2046 *
2047 * Also note that the code there explicitly checks for the IPv6
2048 * router alert MLD option (which is required by the RFC to be
2049 * on MLD packets.) That implies trying to support hosts which
2050 * erroneously don't add that option is just not possible.
2051 */
2052 intval = 1;
2053 ret = setsockopt(gm_ifp->sock, SOL_IPV6, IPV6_MULTICAST_ALL,
2054 &intval, sizeof(intval));
2055 if (ret)
2056 zlog_info(
2057 "(%s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
2058 ifp->name);
2059
2060 struct ipv6_mreq mreq;
2061
2062 /* all-MLDv2 group */
2063 mreq.ipv6mr_multiaddr = gm_all_routers;
2064 mreq.ipv6mr_interface = ifp->ifindex;
2065 ret = setsockopt(gm_ifp->sock, SOL_IPV6, IPV6_JOIN_GROUP, &mreq,
2066 sizeof(mreq));
2067 if (ret)
2068 zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
2069 ifp->name);
2070 }
2071
2072 thread_add_read(router->master, gm_t_recv, gm_ifp, gm_ifp->sock,
2073 &gm_ifp->t_recv);
2074}
2075
2076void gm_ifp_teardown(struct interface *ifp)
2077{
2078 struct pim_interface *pim_ifp = ifp->info;
2079 struct gm_if *gm_ifp;
2080 struct gm_packet_state *pkt;
2081 struct gm_grp_pending *pend_grp;
2082 struct gm_gsq_pending *pend_gsq;
2083 struct gm_subscriber *subscriber;
2084 struct gm_sg *sg;
2085
2086 if (!pim_ifp || !pim_ifp->mld)
2087 return;
2088
2089 gm_ifp = pim_ifp->mld;
2090 gm_ifp->stopping = true;
2091 if (PIM_DEBUG_IGMP_EVENTS)
2092 zlog_debug(log_ifp("MLD stop"));
2093
2094 THREAD_OFF(gm_ifp->t_query);
2095 THREAD_OFF(gm_ifp->t_other_querier);
2096 THREAD_OFF(gm_ifp->t_recv);
2097 THREAD_OFF(gm_ifp->t_expire);
2098
2099 if (gm_ifp->sock != -1) {
2100 close(gm_ifp->sock);
2101 gm_ifp->sock = -1;
2102 }
2103
2104 while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
2105 gm_packet_drop(pkt, false);
2106
2107 while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
2108 THREAD_OFF(pend_grp->t_expire);
2109 XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
2110 }
2111
2112 while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
2113 THREAD_OFF(pend_gsq->t_send);
2114 XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
2115 }
2116
2117 while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
2118 THREAD_OFF(sg->t_sg_expire);
2119 assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
2120 &sg->sgaddr);
2121 assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
2122 &sg->sgaddr);
2123
2124 gm_sg_free(sg);
2125 }
2126
2127 while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
2128 assertf(!gm_packets_count(subscriber->packets), "%pPA",
2129 &subscriber->addr);
2130 XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
2131 }
2132
2133 gm_grp_pends_fini(gm_ifp->grp_pends);
2134 gm_packet_expires_fini(gm_ifp->expires);
2135 gm_subscribers_fini(gm_ifp->subscribers);
2136 gm_sgs_fini(gm_ifp->sgs);
2137
2138 XFREE(MTYPE_GM_IFACE, gm_ifp);
2139 pim_ifp->mld = NULL;
2140}
2141
2142static void gm_update_ll(struct interface *ifp)
2143{
2144 struct pim_interface *pim_ifp = ifp->info;
2145 struct gm_if *gm_ifp = pim_ifp ? pim_ifp->mld : NULL;
2146 struct sockaddr_in6 sa = {.sin6_family = AF_INET6};
2147 int rc;
2148 bool was_querier;
2149
2150 was_querier =
2151 !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
2152 !pim_addr_is_any(gm_ifp->querier);
2153
2154 gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
2155 if (was_querier)
2156 gm_ifp->querier = pim_ifp->ll_lowest;
2157 THREAD_OFF(gm_ifp->t_query);
2158
2159 if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
2160 if (was_querier)
2161 zlog_info(log_ifp(
2162 "lost link-local address, stopping querier"));
2163 return;
2164 }
2165
2166 if (was_querier)
2167 zlog_info(log_ifp("new link-local %pPA while querier"),
2168 &gm_ifp->cur_ll_lowest);
2169 else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
2170 pim_addr_is_any(gm_ifp->querier)) {
2171 zlog_info(log_ifp("new link-local %pPA, becoming querier"),
2172 &gm_ifp->cur_ll_lowest);
2173 gm_ifp->querier = gm_ifp->cur_ll_lowest;
2174 } else
2175 return;
2176
2177 /* we're querier */
2178 sa.sin6_addr = pim_ifp->ll_lowest;
2179 sa.sin6_scope_id = ifp->ifindex;
2180
2181 frr_with_privs (&pimd_privs) {
2182 rc = bind(gm_ifp->sock, (struct sockaddr *)&sa, sizeof(sa));
2183 }
2184 if (rc)
2185 zlog_err(log_ifp("bind to %pPA failed: %m"),
2186 &pim_ifp->ll_lowest);
2187
2188 gm_ifp->n_startup = gm_ifp->cur_qrv;
2189 thread_execute(router->master, gm_t_query, gm_ifp, 0);
2190}
2191
2192void gm_ifp_update(struct interface *ifp)
2193{
2194 struct pim_interface *pim_ifp = ifp->info;
2195 struct gm_if *gm_ifp;
2196 bool changed = false;
2197
2198 if (!pim_ifp)
2199 return;
2200 if (!if_is_operative(ifp) || !pim_ifp->pim ||
2201 pim_ifp->mroute_vif_index < 0) {
2202 gm_ifp_teardown(ifp);
2203 return;
2204 }
2205
2206 if (!pim_ifp->mld)
2207 gm_start(ifp);
2208
2209 gm_ifp = pim_ifp->mld;
2210 if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
2211 gm_update_ll(ifp);
2212
2213 unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
2214
2215 if (gm_ifp->cur_query_intv != cfg_query_intv) {
2216 gm_ifp->cur_query_intv = cfg_query_intv;
2217 gm_ifp->cur_query_intv_trig = cfg_query_intv;
2218 changed = true;
2219 }
2220
2221 enum gm_version cfg_version;
2222
2223 if (pim_ifp->mld_version == 1)
2224 cfg_version = GM_MLDV1;
2225 else
2226 cfg_version = GM_MLDV2;
2227 if (gm_ifp->cur_version != cfg_version) {
2228 gm_ifp->cur_version = cfg_version;
2229 changed = true;
2230 }
2231
2232 if (changed) {
2233 if (PIM_DEBUG_IGMP_TRACE)
2234 zlog_debug(log_ifp(
2235 "MLD querier config changed, querying"));
2236 gm_bump_querier(gm_ifp);
2237 }
2238}
2239
d2951219
DL
2240/*
2241 * CLI (show commands only)
2242 */
5e5034b0
DL
2243
2244#include "lib/command.h"
2245
2246#ifndef VTYSH_EXTRACT_PL
2247#include "pimd/pim6_mld_clippy.c"
2248#endif
2249
d2951219
DL
2250#define MLD_STR "Multicast Listener Discovery\n"
2251
2252static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
2253 int *err)
2254{
2255 struct vrf *ret;
2256
2257 if (!vrf_str)
2258 return vrf_lookup_by_id(VRF_DEFAULT);
2259 if (!strcmp(vrf_str, "all"))
2260 return NULL;
2261 ret = vrf_lookup_by_name(vrf_str);
2262 if (ret)
2263 return ret;
2264
2265 vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
2266 *err = CMD_WARNING;
2267 return NULL;
2268}
2269
2270static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
2271{
2272 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2273 struct gm_if *gm_ifp;
2274 bool querier;
2275 size_t i;
2276
2277 if (!pim_ifp) {
2278 vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
2279 return;
2280 }
2281
2282 gm_ifp = pim_ifp->mld;
2283 if (!gm_ifp) {
2284 vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
2285 return;
2286 }
2287
2288 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2289
2290 vty_out(vty, "Interface %s: MLD running\n", ifp->name);
2291 vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
2292 vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
2293 vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
2294 querier ? " (this system)" : "");
2295 vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
2296 vty_out(vty, " Other querier timer: %pTH\n",
2297 gm_ifp->t_other_querier);
2298 vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
2299 vty_out(vty, " Query interval: %ums\n",
2300 gm_ifp->cur_query_intv);
2301 vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
2302 vty_out(vty, " Last member query intv.: %ums\n",
2303 gm_ifp->cur_query_intv_trig);
2304 vty_out(vty, " %u expiry timers from general queries:\n",
2305 gm_ifp->n_pending);
2306 for (i = 0; i < gm_ifp->n_pending; i++) {
2307 struct gm_general_pending *p = &gm_ifp->pending[i];
2308
2309 vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
2310 &p->query, &p->expiry);
2311 }
2312 vty_out(vty, " %zu expiry timers from *,G queries\n",
2313 gm_grp_pends_count(gm_ifp->grp_pends));
2314 vty_out(vty, " %zu expiry timers from S,G queries\n",
2315 gm_gsq_pends_count(gm_ifp->gsq_pends));
2316 vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
2317 gm_sgs_count(gm_ifp->sgs),
2318 gm_subscribers_count(gm_ifp->subscribers),
2319 gm_packet_expires_count(gm_ifp->expires));
2320 vty_out(vty, "\n");
2321}
2322
2323static void gm_show_if_one(struct vty *vty, struct interface *ifp,
2324 json_object *js_if)
2325{
2326 struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
2327 struct gm_if *gm_ifp = pim_ifp->mld;
2328 bool querier;
2329
2330 if (!gm_ifp) {
2331 if (js_if)
2332 json_object_string_add(js_if, "state", "down");
2333 else
2334 vty_out(vty, "%-16s %5s\n", ifp->name, "down");
2335 return;
2336 }
2337
2338 querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
2339
2340 if (js_if) {
2341 json_object_string_add(js_if, "state", "up");
2342 json_object_string_addf(js_if, "version", "%d",
2343 gm_ifp->cur_version);
2344 json_object_string_addf(js_if, "upTime", "%pTVMs",
2345 &gm_ifp->started);
2346 json_object_boolean_add(js_if, "querier", querier);
2347 json_object_string_addf(js_if, "querierIp", "%pPA",
2348 &gm_ifp->querier);
2349 if (querier)
2350 json_object_string_addf(js_if, "queryTimer", "%pTH",
2351 gm_ifp->t_query);
2352 else
2353 json_object_string_addf(js_if, "otherQuerierTimer",
2354 "%pTH",
2355 gm_ifp->t_other_querier);
2356 } else {
2357 vty_out(vty, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n",
2358 ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier,
2359 querier ? "query" : "other",
2360 querier ? gm_ifp->t_query : gm_ifp->t_other_querier,
2361 &gm_ifp->started);
2362 }
2363}
2364
2365static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
2366 bool detail, json_object *js)
2367{
2368 struct interface *ifp;
2369 json_object *js_vrf;
2370
2371 if (js) {
2372 js_vrf = json_object_new_object();
2373 json_object_object_add(js, vrf->name, js_vrf);
2374 }
2375
2376 FOR_ALL_INTERFACES (vrf, ifp) {
2377 json_object *js_if = NULL;
2378
2379 if (ifname && strcmp(ifp->name, ifname))
2380 continue;
2381 if (detail && !js) {
2382 gm_show_if_one_detail(vty, ifp);
2383 continue;
2384 }
2385
2386 if (!ifp->info)
2387 continue;
2388 if (js) {
2389 js_if = json_object_new_object();
2390 json_object_object_add(js_vrf, ifp->name, js_if);
2391 }
2392
2393 gm_show_if_one(vty, ifp, js_if);
2394 }
2395}
2396
2397static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
2398 bool detail, json_object *js)
2399{
2400 if (!js && !detail)
2401 vty_out(vty, "%-16s %-5s V %-25s %-18s %s\n", "Interface",
2402 "State", "Querier", "Timer", "Uptime");
2403
2404 if (vrf)
2405 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2406 else
2407 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2408 gm_show_if_vrf(vty, vrf, ifname, detail, js);
2409}
2410
2411DEFPY(gm_show_interface,
2412 gm_show_interface_cmd,
2413 "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME] [detail$detail|json$json]",
2414 DEBUG_STR
2415 SHOW_STR
2416 IPV6_STR
2417 MLD_STR
2418 VRF_FULL_CMD_HELP_STR
2419 "MLD interface information\n"
2420 "Detailed output\n"
2421 JSON_STR)
2422{
2423 int ret = CMD_SUCCESS;
2424 struct vrf *vrf;
2425 json_object *js = NULL;
2426
2427 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2428 if (ret != CMD_SUCCESS)
2429 return ret;
2430
2431 if (json)
2432 js = json_object_new_object();
2433 gm_show_if(vty, vrf, ifname, !!detail, js);
2434 return vty_json(vty, js);
2435}
2436
2437static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
2438 json_object *js_if)
2439{
2440 struct gm_if_stats *stats = &gm_ifp->stats;
2441 /* clang-format off */
2442 struct {
2443 const char *text;
2444 const char *js_key;
2445 uint64_t *val;
2446 } *item, items[] = {
2447 { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
2448 { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
2449 { "v1 done received", "rxV1Done", &stats->rx_old_leave },
2450
2451 { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
2452 { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
2453 { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
2454 { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
2455 { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
2456 { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
2457
2458 { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
2459 { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
2460 { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
2461 { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
2462 { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
2463 { "TX errors", "txErrors", &stats->tx_query_fail },
2464
2465 { "RX system errors", "rxErrorSys", &stats->rx_drop_sys },
2466 { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
2467 { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
2468 { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
2469 { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
2470 { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
2471 { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
2472 };
2473 /* clang-format on */
2474
2475 for (item = items; item < items + array_size(items); item++) {
2476 if (js_if)
2477 json_object_int_add(js_if, item->js_key, *item->val);
2478 else
2479 vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
2480 *item->val);
2481 }
2482}
2483
2484static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
2485 const char *ifname, json_object *js)
2486{
2487 struct interface *ifp;
2488 json_object *js_vrf;
2489
2490 if (js) {
2491 js_vrf = json_object_new_object();
2492 json_object_object_add(js, vrf->name, js_vrf);
2493 }
2494
2495 FOR_ALL_INTERFACES (vrf, ifp) {
2496 struct pim_interface *pim_ifp;
2497 struct gm_if *gm_ifp;
2498 json_object *js_if = NULL;
2499
2500 if (ifname && strcmp(ifp->name, ifname))
2501 continue;
2502
2503 if (!ifp->info)
2504 continue;
2505 pim_ifp = ifp->info;
2506 if (!pim_ifp->mld)
2507 continue;
2508 gm_ifp = pim_ifp->mld;
2509
2510 if (js) {
2511 js_if = json_object_new_object();
2512 json_object_object_add(js_vrf, ifp->name, js_if);
2513 } else {
2514 vty_out(vty, "Interface: %s\n", ifp->name);
2515 }
2516 gm_show_stats_one(vty, gm_ifp, js_if);
2517 if (!js)
2518 vty_out(vty, "\n");
2519 }
2520}
2521
2522DEFPY(gm_show_interface_stats,
2523 gm_show_interface_stats_cmd,
2524 "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
2525 SHOW_STR
2526 IPV6_STR
2527 MLD_STR
2528 VRF_FULL_CMD_HELP_STR
2529 "MLD statistics\n"
2530 INTERFACE_STR
2531 "Interface name\n"
2532 JSON_STR)
2533{
2534 int ret = CMD_SUCCESS;
2535 struct vrf *vrf;
2536 json_object *js = NULL;
2537
2538 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2539 if (ret != CMD_SUCCESS)
2540 return ret;
2541
2542 if (json)
2543 js = json_object_new_object();
2544
2545 if (vrf)
2546 gm_show_stats_vrf(vty, vrf, ifname, js);
2547 else
2548 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2549 gm_show_stats_vrf(vty, vrf, ifname, js);
2550 return vty_json(vty, js);
2551}
2552
2553static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
2554 const struct prefix_ipv6 *groups,
2555 const struct prefix_ipv6 *sources, bool detail,
2556 json_object *js_if)
2557{
2558 struct gm_sg *sg, *sg_start;
2559 json_object *js_group = NULL;
2560 pim_addr js_grpaddr = PIMADDR_ANY;
2561 struct gm_subscriber sub_ref = {}, *sub_untracked;
2562
2563 if (groups) {
2564 struct gm_sg sg_ref = {};
2565
2566 sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
2567 sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
2568 } else
2569 sg_start = gm_sgs_first(gm_ifp->sgs);
2570
2571 sub_ref.addr = gm_dummy_untracked;
2572 sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
2573 /* NB: sub_untracked may be NULL if no untracked joins exist */
2574
2575 frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
2576 struct timeval *recent = NULL, *untracked = NULL;
2577 json_object *js_src;
2578
2579 if (groups) {
2580 struct prefix grp_p;
2581
2582 pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
2583 if (!prefix_match(groups, &grp_p))
2584 break;
2585 }
2586
2587 if (sources) {
2588 struct prefix src_p;
2589
2590 pim_addr_to_prefix(&src_p, sg->sgaddr.src);
2591 if (!prefix_match(sources, &src_p))
2592 continue;
2593 }
2594
2595 if (sg->most_recent) {
2596 struct gm_packet_state *packet;
2597
2598 packet = gm_packet_sg2state(sg->most_recent);
2599 recent = &packet->received;
2600 }
2601
2602 if (sub_untracked) {
2603 struct gm_packet_state *packet;
2604 struct gm_packet_sg *item;
2605
2606 item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
2607 if (item) {
2608 packet = gm_packet_sg2state(item);
2609 untracked = &packet->received;
2610 }
2611 }
2612
2613 if (!js_if) {
2614 FMT_NSTD_BEGIN; /* %.0p */
2615 vty_out(vty,
2616 "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
2617 &sg->sgaddr.grp, &sg->sgaddr.src,
2618 gm_states[sg->state], recent, untracked,
2619 &sg->created);
2620
2621 if (!detail)
2622 continue;
2623
2624 struct gm_packet_sg *item;
2625 struct gm_packet_state *packet;
2626
2627 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2628 packet = gm_packet_sg2state(item);
2629
2630 if (packet->subscriber == sub_untracked)
2631 continue;
2632 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2633 &packet->subscriber->addr, "(JOIN)",
2634 &packet->received);
2635 }
2636 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2637 packet = gm_packet_sg2state(item);
2638
2639 if (packet->subscriber == sub_untracked)
2640 continue;
2641 vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
2642 &packet->subscriber->addr, "(PRUNE)",
2643 &packet->received);
2644 }
2645 FMT_NSTD_END; /* %.0p */
2646 continue;
2647 }
2648 /* if (js_if) */
2649
2650 if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
2651 js_group = json_object_new_object();
2652 json_object_object_addf(js_if, js_group, "%pPA",
2653 &sg->sgaddr.grp);
2654 js_grpaddr = sg->sgaddr.grp;
2655 }
2656
2657 js_src = json_object_new_object();
2658 json_object_object_addf(js_group, js_src, "%pPA",
2659 &sg->sgaddr.src);
2660
2661 json_object_string_add(js_src, "state", gm_states[sg->state]);
2662 json_object_string_addf(js_src, "created", "%pTVMs",
2663 &sg->created);
2664 json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
2665
2666 if (untracked)
2667 json_object_string_addf(js_src, "untrackedLastSeen",
2668 "%pTVMs", untracked);
2669 if (!detail)
2670 continue;
2671
2672 json_object *js_subs;
2673 struct gm_packet_sg *item;
2674 struct gm_packet_state *packet;
2675
2676 js_subs = json_object_new_object();
2677 json_object_object_add(js_src, "joinedBy", js_subs);
2678 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2679 packet = gm_packet_sg2state(item);
2680 if (packet->subscriber == sub_untracked)
2681 continue;
2682
2683 json_object *js_sub;
2684
2685 js_sub = json_object_new_object();
2686 json_object_object_addf(js_subs, js_sub, "%pPA",
2687 &packet->subscriber->addr);
2688 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2689 &packet->received);
2690 }
2691
2692 js_subs = json_object_new_object();
2693 json_object_object_add(js_src, "prunedBy", js_subs);
2694 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2695 packet = gm_packet_sg2state(item);
2696 if (packet->subscriber == sub_untracked)
2697 continue;
2698
2699 json_object *js_sub;
2700
2701 js_sub = json_object_new_object();
2702 json_object_object_addf(js_subs, js_sub, "%pPA",
2703 &packet->subscriber->addr);
2704 json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
2705 &packet->received);
2706 }
2707 }
2708}
2709
2710static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
2711 const char *ifname,
2712 const struct prefix_ipv6 *groups,
2713 const struct prefix_ipv6 *sources, bool detail,
2714 json_object *js)
2715{
2716 struct interface *ifp;
2717 json_object *js_vrf;
2718
2719 if (js) {
2720 js_vrf = json_object_new_object();
2721 json_object_object_add(js, vrf->name, js_vrf);
2722 }
2723
2724 FOR_ALL_INTERFACES (vrf, ifp) {
2725 struct pim_interface *pim_ifp;
2726 struct gm_if *gm_ifp;
2727 json_object *js_if = NULL;
2728
2729 if (ifname && strcmp(ifp->name, ifname))
2730 continue;
2731
2732 if (!ifp->info)
2733 continue;
2734 pim_ifp = ifp->info;
2735 if (!pim_ifp->mld)
2736 continue;
2737 gm_ifp = pim_ifp->mld;
2738
2739 if (js) {
2740 js_if = json_object_new_object();
2741 json_object_object_add(js_vrf, ifp->name, js_if);
2742 }
2743
2744 if (!js && !ifname)
2745 vty_out(vty, "\nOn interface %s:\n", ifp->name);
2746
2747 gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
2748 }
2749}
2750
2751DEFPY(gm_show_interface_joins,
2752 gm_show_interface_joins_cmd,
2753 "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
2754 SHOW_STR
2755 IPV6_STR
2756 MLD_STR
2757 VRF_FULL_CMD_HELP_STR
2758 "MLD joined groups & sources\n"
2759 INTERFACE_STR
2760 "Interface name\n"
2761 "Limit output to group range\n"
2762 "Show groups covered by this prefix\n"
2763 "Limit output to source range\n"
2764 "Show sources covered by this prefix\n"
2765 "Show details, including tracked receivers\n"
2766 JSON_STR)
2767{
2768 int ret = CMD_SUCCESS;
2769 struct vrf *vrf;
2770 json_object *js = NULL;
2771
2772 vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
2773 if (ret != CMD_SUCCESS)
2774 return ret;
2775
2776 if (json)
2777 js = json_object_new_object();
2778 else
2779 vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
2780 "Source", "State", "LastSeen", "NonTrkSeen", "Created");
2781
2782 if (vrf)
2783 gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
2784 js);
2785 else
2786 RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
2787 gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
2788 !!detail, js);
2789 return vty_json(vty, js);
2790}
2791
5e5034b0
DL
2792DEFPY(gm_debug_show,
2793 gm_debug_show_cmd,
2794 "debug show mld interface IFNAME",
2795 DEBUG_STR
2796 SHOW_STR
2797 "MLD"
2798 INTERFACE_STR
2799 "interface name")
2800{
2801 struct interface *ifp;
2802 struct pim_interface *pim_ifp;
2803 struct gm_if *gm_ifp;
2804
2805 ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
2806 if (!ifp) {
2807 vty_out(vty, "%% no such interface: %pSQq\n", ifname);
2808 return CMD_WARNING;
2809 }
2810
2811 pim_ifp = ifp->info;
2812 if (!pim_ifp) {
2813 vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
2814 return CMD_WARNING;
2815 }
2816
2817 gm_ifp = pim_ifp->mld;
2818 if (!gm_ifp) {
2819 vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
2820 return CMD_WARNING;
2821 }
2822
2823 vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
2824 vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
2825 vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
2826 vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
2827 vty_out(vty, "t_recv: %pTHD\n", gm_ifp->t_recv);
2828 vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
2829
2830 vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
2831 for (size_t i = 0; i < gm_ifp->n_pending; i++) {
2832 int64_t query, expiry;
2833
2834 query = monotime_since(&gm_ifp->pending[i].query, NULL);
2835 expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
2836
2837 vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
2838 i, query / 1000, expiry / 1000);
2839 }
2840
2841 struct gm_sg *sg;
2842 struct gm_packet_state *pkt;
2843 struct gm_packet_sg *item;
2844 struct gm_subscriber *subscriber;
2845
2846 vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
2847 frr_each (gm_sgs, gm_ifp->sgs, sg) {
2848 vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
2849 sg->t_sg_expire);
2850
2851 vty_out(vty, "\t @pos:%zu\n",
2852 gm_packet_sg_subs_count(sg->subs_positive));
2853 frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
2854 pkt = gm_packet_sg2state(item);
2855
2856 vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
2857 item->is_src ? "S" : "",
2858 item->is_excl ? "E" : "",
2859 &pkt->subscriber->addr, pkt->subscriber, pkt,
2860 item->offset);
2861
2862 assert(item->sg == sg);
2863 }
2864 vty_out(vty, "\t @neg:%zu\n",
2865 gm_packet_sg_subs_count(sg->subs_negative));
2866 frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
2867 pkt = gm_packet_sg2state(item);
2868
2869 vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
2870 item->is_src ? "S" : "",
2871 item->is_excl ? "E" : "",
2872 &pkt->subscriber->addr, pkt->subscriber, pkt,
2873 item->offset);
2874
2875 assert(item->sg == sg);
2876 }
2877 }
2878
2879 vty_out(vty, "\n%zu subscribers:\n",
2880 gm_subscribers_count(gm_ifp->subscribers));
2881 frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
2882 vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
2883 subscriber, gm_packets_count(subscriber->packets));
2884
2885 frr_each (gm_packets, subscriber->packets, pkt) {
2886 vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
2887 pkt,
2888 monotime_since(&pkt->received, NULL) *
2889 0.000001f,
2890 pkt->n_active, pkt->n_sg);
2891
2892 for (size_t i = 0; i < pkt->n_sg; i++) {
2893 item = pkt->items + i;
2894
2895 vty_out(vty, "\t\t[%zu]", i);
2896
2897 if (!item->sg) {
2898 vty_out(vty, " inactive\n");
2899 continue;
2900 }
2901
2902 vty_out(vty, " %s%s %pSG nE=%u\n",
2903 item->is_src ? "S" : "",
2904 item->is_excl ? "E" : "",
2905 &item->sg->sgaddr, item->n_exclude);
2906 }
2907 }
2908 }
2909
2910 return CMD_SUCCESS;
2911}
2912
2913DEFPY(gm_debug_iface_cfg,
2914 gm_debug_iface_cfg_cmd,
2915 "debug ipv6 mld {"
2916 "robustness (0-7)|"
2917 "query-max-response-time (1-8387584)"
2918 "}",
2919 DEBUG_STR
2920 IPV6_STR
2921 "Multicast Listener Discovery\n"
2922 "QRV\nQRV\n"
2923 "maxresp\nmaxresp\n")
2924{
2925 VTY_DECLVAR_CONTEXT(interface, ifp);
2926 struct pim_interface *pim_ifp;
2927 struct gm_if *gm_ifp;
2928 bool changed = false;
2929
2930 pim_ifp = ifp->info;
2931 if (!pim_ifp) {
2932 vty_out(vty, "%% no PIM state for interface %pSQq\n",
2933 ifp->name);
2934 return CMD_WARNING;
2935 }
2936 gm_ifp = pim_ifp->mld;
2937 if (!gm_ifp) {
2938 vty_out(vty, "%% no MLD state for interface %pSQq\n",
2939 ifp->name);
2940 return CMD_WARNING;
2941 }
2942
2943 if (robustness_str && gm_ifp->cur_qrv != robustness) {
2944 gm_ifp->cur_qrv = robustness;
2945 changed = true;
2946 }
2947 if (query_max_response_time_str &&
2948 gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
2949 gm_ifp->cur_max_resp = query_max_response_time;
2950 changed = true;
2951 }
2952
2953 if (changed) {
2954 vty_out(vty, "%% MLD querier config changed, bumping\n");
2955 gm_bump_querier(gm_ifp);
2956 }
2957 return CMD_SUCCESS;
2958}
2959
2960void gm_cli_init(void);
2961
2962void gm_cli_init(void)
2963{
d2951219
DL
2964 install_element(VIEW_NODE, &gm_show_interface_cmd);
2965 install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
2966 install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
2967
5e5034b0
DL
2968 install_element(VIEW_NODE, &gm_debug_show_cmd);
2969 install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
2970}