#include "plist.h"
#include "lib_errors.h"
#include "network.h"
+#include "if.h"
#include "babel_main.h"
#include "babeld.h"
/* We failed to get a global EUI64 from the interfaces we were given.
Let's try to find an interface with a MAC address. */
for(i = 1; i < 256; i++) {
- char buf[IF_NAMESIZE], *ifname;
+ char buf[INTERFACE_NAMSIZ], *ifname;
unsigned char eui[8];
ifname = if_indextoname(i, buf);
if(ifname == NULL)
followed by a sequence of TLVs. TLVs of known types are also checked to meet
minimum length constraints defined for each. Return 0 for no errors. */
static int
-babel_packet_examin(const unsigned char *packet, int packetlen)
+babel_packet_examin(const unsigned char *packet, int packetlen, int *blength)
{
int i = 0, bodylen;
const unsigned char *message;
}
i += len + 2;
}
+
+ *blength = bodylen;
return 0;
}
return;
}
- if (babel_packet_examin (packet, packetlen)) {
+ if (babel_packet_examin (packet, packetlen, &bodylen)) {
flog_err(EC_BABEL_PACKET,
"Received malformed packet on %s from %s.",
ifp->name, format_address(from));
return;
}
- DO_NTOHS(bodylen, packet + 2);
-
i = 0;
while(i < bodylen) {
message = packet + 4 + i;
uint16_t mhop;
struct in6_addr peer;
struct in6_addr local;
- char ifname[MAXNAMELEN];
- char vrfname[MAXNAMELEN];
+ char ifname[INTERFACE_NAMSIZ];
+ char vrfname[VRF_NAMSIZ];
} __attribute__((packed));
struct bfd_session_stats {
struct peer_label *pl;
struct bfd_dplane_ctx *bdc;
- struct sockaddr_any local_address;
struct interface *ifp;
struct vrf *vrf;
return;
}
+ /* Ensure that existing good sessions are not overridden. */
+ if (!cp->discrs.remote_discr && bfd->ses_state != PTM_BFD_DOWN &&
+ bfd->ses_state != PTM_BFD_ADM_DOWN) {
+ cp_debug(is_mhop, &peer, &local, ifindex, vrfid,
+ "'remote discriminator' is zero, not overridden");
+ return;
+ }
+
/*
* Multi hop: validate packet TTL.
- * Single hop: set local address that received the packet.
*/
if (is_mhop) {
if (ttl < bfd->mh_ttl) {
bfd->mh_ttl, ttl);
return;
}
- } else if (bfd->local_address.sa_sin.sin_family == AF_UNSPEC) {
- bfd->local_address = local;
}
bfd->stats.rx_ctrl_pkt++;
bool ctlsockused = false;
int opt;
+ bglobal.bg_use_dplane = false;
+
/* Initialize system sockets. */
bg_init();
{
char addr_buf[INET6_ADDRSTRLEN];
- if (CHECK_FLAG(bs->flags, BFD_SESS_FLAG_MH)) {
- vty_out(vty, "%-10u", bs->discrs.my_discr);
- inet_ntop(bs->key.family, &bs->key.local, addr_buf, sizeof(addr_buf));
- vty_out(vty, " %-40s", addr_buf);
- inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf));
- vty_out(vty, " %-40s", addr_buf);
- vty_out(vty, "%-15s\n", state_list[bs->ses_state].str);
- } else {
- vty_out(vty, "%-10u", bs->discrs.my_discr);
- vty_out(vty, " %-40s", satostr(&bs->local_address));
- inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf));
- vty_out(vty, " %-40s", addr_buf);
-
- vty_out(vty, "%-15s\n", state_list[bs->ses_state].str);
- }
+ vty_out(vty, "%-10u", bs->discrs.my_discr);
+ inet_ntop(bs->key.family, &bs->key.local, addr_buf, sizeof(addr_buf));
+ vty_out(vty, " %-40s", addr_buf);
+ inet_ntop(bs->key.family, &bs->key.peer, addr_buf, sizeof(addr_buf));
+ vty_out(vty, " %-40s", addr_buf);
+ vty_out(vty, "%-15s\n", state_list[bs->ses_state].str);
}
static void _display_peer_brief_iter(struct hash_bucket *hb, void *arg)
#define BGP_ADJ_IN_DEL(N, A) BGP_PATH_INFO_DEL(N, A, adj_in)
/* Prototypes. */
-extern bool bgp_adj_out_lookup(struct peer *, struct bgp_dest *, uint32_t);
-extern void bgp_adj_in_set(struct bgp_dest *, struct peer *, struct attr *,
- uint32_t);
-extern bool bgp_adj_in_unset(struct bgp_dest *, struct peer *, uint32_t);
-extern void bgp_adj_in_remove(struct bgp_dest *, struct bgp_adj_in *);
-
-extern void bgp_sync_init(struct peer *);
-extern void bgp_sync_delete(struct peer *);
+extern bool bgp_adj_out_lookup(struct peer *peer, struct bgp_dest *dest,
+ uint32_t addpath_tx_id);
+extern void bgp_adj_in_set(struct bgp_dest *dest, struct peer *peer,
+ struct attr *attr, uint32_t addpath_id);
+extern bool bgp_adj_in_unset(struct bgp_dest *dest, struct peer *peer,
+ uint32_t addpath_id);
+extern void bgp_adj_in_remove(struct bgp_dest *dest, struct bgp_adj_in *bai);
+
+extern void bgp_sync_init(struct peer *peer);
+extern void bgp_sync_delete(struct peer *peer);
extern unsigned int baa_hash_key(const void *p);
extern bool baa_hash_cmp(const void *p1, const void *p2);
extern void bgp_advertise_add(struct bgp_advertise_attr *baa,
if (length % AS16_VALUE_SIZE)
return NULL;
- memset(&as, 0, sizeof(struct aspath));
+ memset(&as, 0, sizeof(as));
if (assegments_parse(s, length, &as.segments, use32bit) < 0)
return NULL;
/* If already same aspath exist then return it. */
find = hash_get(ashash, &as, aspath_hash_alloc);
- /* bug! should not happen, let the daemon crash below */
- assert(find);
-
/* if the aspath was already hashed free temporary memory. */
if (find->refcnt) {
assegment_free_all(as.segments);
/* Prototypes. */
extern void aspath_init(void);
extern void aspath_finish(void);
-extern struct aspath *aspath_parse(struct stream *, size_t, int);
-extern struct aspath *aspath_dup(struct aspath *);
-extern struct aspath *aspath_aggregate(struct aspath *, struct aspath *);
-extern struct aspath *aspath_prepend(struct aspath *, struct aspath *);
-extern struct aspath *aspath_filter_exclude(struct aspath *, struct aspath *);
-extern struct aspath *aspath_add_seq_n(struct aspath *, as_t, unsigned);
-extern struct aspath *aspath_add_seq(struct aspath *, as_t);
-extern struct aspath *aspath_add_confed_seq(struct aspath *, as_t);
+extern struct aspath *aspath_parse(struct stream *s, size_t length,
+ int use32bit);
+extern struct aspath *aspath_dup(struct aspath *aspath);
+extern struct aspath *aspath_aggregate(struct aspath *as1, struct aspath *as2);
+extern struct aspath *aspath_prepend(struct aspath *as1, struct aspath *as2);
+extern struct aspath *aspath_filter_exclude(struct aspath *source,
+ struct aspath *exclude_list);
+extern struct aspath *aspath_add_seq_n(struct aspath *aspath, as_t asno,
+ unsigned num);
+extern struct aspath *aspath_add_seq(struct aspath *aspath, as_t asno);
+extern struct aspath *aspath_add_confed_seq(struct aspath *aspath, as_t asno);
extern bool aspath_cmp(const void *as1, const void *as2);
-extern bool aspath_cmp_left(const struct aspath *, const struct aspath *);
+extern bool aspath_cmp_left(const struct aspath *aspath1,
+ const struct aspath *aspath2);
extern bool aspath_cmp_left_confed(const struct aspath *as1,
- const struct aspath *as2xs);
-extern struct aspath *aspath_delete_confed_seq(struct aspath *);
+ const struct aspath *as2);
+extern struct aspath *aspath_delete_confed_seq(struct aspath *aspath);
extern struct aspath *aspath_empty(void);
extern struct aspath *aspath_empty_get(void);
-extern struct aspath *aspath_str2aspath(const char *);
+extern struct aspath *aspath_str2aspath(const char *str);
extern void aspath_str_update(struct aspath *as, bool make_json);
-extern void aspath_free(struct aspath *);
-extern struct aspath *aspath_intern(struct aspath *);
-extern void aspath_unintern(struct aspath **);
-extern const char *aspath_print(struct aspath *);
-extern void aspath_print_vty(struct vty *, const char *, struct aspath *,
- const char *);
-extern void aspath_print_all_vty(struct vty *);
-extern unsigned int aspath_key_make(const void *);
-extern unsigned int aspath_get_first_as(struct aspath *);
-extern unsigned int aspath_get_last_as(struct aspath *);
-extern int aspath_loop_check(struct aspath *, as_t);
-extern bool aspath_private_as_check(struct aspath *);
-extern bool aspath_single_asn_check(struct aspath *, as_t asn);
+extern void aspath_free(struct aspath *aspath);
+extern struct aspath *aspath_intern(struct aspath *aspath);
+extern void aspath_unintern(struct aspath **aspath);
+extern const char *aspath_print(struct aspath *aspath);
+extern void aspath_print_vty(struct vty *vty, const char *format,
+ struct aspath *aspath, const char *suffix);
+extern void aspath_print_all_vty(struct vty *vty);
+extern unsigned int aspath_key_make(const void *p);
+extern unsigned int aspath_get_first_as(struct aspath *aspath);
+extern unsigned int aspath_get_last_as(struct aspath *aspath);
+extern int aspath_loop_check(struct aspath *aspath, as_t asno);
+extern bool aspath_private_as_check(struct aspath *aspath);
+extern bool aspath_single_asn_check(struct aspath *aspath, as_t asn);
extern struct aspath *aspath_replace_specific_asn(struct aspath *aspath,
as_t target_asn,
as_t our_asn);
as_t asn, as_t peer_asn);
extern struct aspath *aspath_remove_private_asns(struct aspath *aspath,
as_t peer_asn);
-extern bool aspath_firstas_check(struct aspath *, as_t);
-extern bool aspath_confed_check(struct aspath *);
-extern bool aspath_left_confed_check(struct aspath *);
+extern bool aspath_firstas_check(struct aspath *aspath, as_t asno);
+extern bool aspath_confed_check(struct aspath *aspath);
+extern bool aspath_left_confed_check(struct aspath *aspath);
extern unsigned long aspath_count(void);
-extern unsigned int aspath_count_hops(const struct aspath *);
+extern unsigned int aspath_count_hops(const struct aspath *aspath);
extern bool aspath_check_as_sets(struct aspath *aspath);
extern bool aspath_check_as_zero(struct aspath *aspath);
-extern unsigned int aspath_count_confeds(struct aspath *);
-extern unsigned int aspath_size(struct aspath *);
-extern as_t aspath_highest(struct aspath *);
-extern as_t aspath_leftmost(struct aspath *);
-extern size_t aspath_put(struct stream *, struct aspath *, int);
+extern unsigned int aspath_count_confeds(struct aspath *aspath);
+extern unsigned int aspath_size(struct aspath *aspath);
+extern as_t aspath_highest(struct aspath *aspath);
+extern as_t aspath_leftmost(struct aspath *aspath);
+extern size_t aspath_put(struct stream *s, struct aspath *aspath, int use32bit);
-extern struct aspath *aspath_reconcile_as4(struct aspath *, struct aspath *);
-extern bool aspath_has_as4(struct aspath *);
+extern struct aspath *aspath_reconcile_as4(struct aspath *aspath,
+ struct aspath *as4path);
+extern bool aspath_has_as4(struct aspath *aspath);
/* For SNMP BGP4PATHATTRASPATHSEGMENT, might be useful for debug */
-extern uint8_t *aspath_snmp_pathseg(struct aspath *, size_t *);
+extern uint8_t *aspath_snmp_pathseg(struct aspath *aspath, size_t *varlen);
extern void bgp_compute_aggregate_aspath(struct bgp_aggregate *aggregate,
struct aspath *aspath);
struct attr *new;
int ret;
- memset(&attr, 0, sizeof(struct attr));
+ memset(&attr, 0, sizeof(attr));
/* Origin attribute. */
attr.origin = origin;
struct attr attr_tmp = attr;
struct bgp_path_info rmap_path;
- memset(&rmap_path, 0, sizeof(struct bgp_path_info));
+ memset(&rmap_path, 0, sizeof(rmap_path));
rmap_path.peer = bgp->peer_self;
rmap_path.attr = &attr_tmp;
bool addpath_capable, uint32_t addpath_tx_id,
struct attr *attr)
{
- uint8_t wlabel[3] = {0x80, 0x00, 0x00};
+ uint8_t wlabel[4] = {0x80, 0x00, 0x00};
if (safi == SAFI_LABELED_UNICAST) {
label = (mpls_label_t *)wlabel;
/* Prototypes. */
extern void bgp_attr_init(void);
extern void bgp_attr_finish(void);
-extern enum bgp_attr_parse_ret bgp_attr_parse(struct peer *, struct attr *,
- bgp_size_t, struct bgp_nlri *,
- struct bgp_nlri *);
+extern enum bgp_attr_parse_ret
+bgp_attr_parse(struct peer *peer, struct attr *attr, bgp_size_t size,
+ struct bgp_nlri *mp_update, struct bgp_nlri *mp_withdraw);
extern struct attr *bgp_attr_intern(struct attr *attr);
-extern void bgp_attr_unintern_sub(struct attr *);
-extern void bgp_attr_unintern(struct attr **);
-extern void bgp_attr_flush(struct attr *);
-extern struct attr *bgp_attr_default_set(struct attr *attr, uint8_t);
+extern void bgp_attr_unintern_sub(struct attr *attr);
+extern void bgp_attr_unintern(struct attr **pattr);
+extern void bgp_attr_flush(struct attr *attr);
+extern struct attr *bgp_attr_default_set(struct attr *attr, uint8_t origin);
extern struct attr *bgp_attr_aggregate_intern(
struct bgp *bgp, uint8_t origin, struct aspath *aspath,
struct community *community, struct ecommunity *ecommunity,
extern void bgp_dump_routes_attr(struct stream *s, struct attr *attr,
const struct prefix *p);
extern bool attrhash_cmp(const void *arg1, const void *arg2);
-extern unsigned int attrhash_key_make(const void *);
-extern void attr_show_all(struct vty *);
+extern unsigned int attrhash_key_make(const void *p);
+extern void attr_show_all(struct vty *vty);
extern unsigned long int attr_count(void);
extern unsigned long int attr_unknown_count(void);
/* Cluster list prototypes. */
-extern bool cluster_loop_check(struct cluster_list *, struct in_addr);
+extern bool cluster_loop_check(struct cluster_list *cluster,
+ struct in_addr originator);
/* Below exported for unit-test purposes only */
struct bgp_attr_parser_args {
uint8_t *startp;
};
extern int bgp_mp_reach_parse(struct bgp_attr_parser_args *args,
- struct bgp_nlri *);
+ struct bgp_nlri *mp_update);
extern int bgp_mp_unreach_parse(struct bgp_attr_parser_args *args,
- struct bgp_nlri *);
+ struct bgp_nlri *mp_withdraw);
extern enum bgp_attr_parse_ret
bgp_attr_prefix_sid(struct bgp_attr_parser_args *args);
struct ecommunity_val routermac_ecom;
struct ecommunity *ecomm = bgp_attr_get_ecommunity(attr);
- memset(&routermac_ecom, 0, sizeof(struct ecommunity_val));
+ memset(&routermac_ecom, 0, sizeof(routermac_ecom));
routermac_ecom.val[0] = ECOMMUNITY_ENCODE_EVPN;
routermac_ecom.val[1] = ECOMMUNITY_EVPN_SUBTYPE_ROUTERMAC;
memcpy(&routermac_ecom.val[2], routermac->octet, ETH_ALEN);
if (adjin->peer == peer)
break;
}
- bmp_monitor(bmp, peer, BMP_PEER_FLAG_L, &bqe->p, prd,
+ bmp_monitor(bmp, peer, 0, &bqe->p, prd,
adjin ? adjin->attr : NULL, afi, safi,
adjin ? adjin->uptime : monotime(NULL));
written = true;
new->name_hash = bgp_clist_hash_key_community_list(new);
/* Save for later */
- hash_get(cm->hash, new, hash_alloc_intern);
+ (void)hash_get(cm->hash, new, hash_alloc_intern);
/* If name is made by all digit character. We treat it as
number. */
/* Prototypes. */
extern struct community_list_handler *community_list_init(void);
-extern void community_list_terminate(struct community_list_handler *);
+extern void community_list_terminate(struct community_list_handler *ch);
extern int community_list_set(struct community_list_handler *ch,
const char *name, const char *str,
const char *seq, int direct, int style);
extern struct community_list_master *
-community_list_master_lookup(struct community_list_handler *, int);
+community_list_master_lookup(struct community_list_handler *ch, int master);
extern struct community_list *
community_list_lookup(struct community_list_handler *c, const char *name,
uint32_t name_hash, int master);
-extern bool community_list_match(struct community *, struct community_list *);
-extern bool ecommunity_list_match(struct ecommunity *, struct community_list *);
-extern bool lcommunity_list_match(struct lcommunity *, struct community_list *);
-extern bool community_list_exact_match(struct community *,
- struct community_list *);
+extern bool community_list_match(struct community *com,
+ struct community_list *list);
+extern bool ecommunity_list_match(struct ecommunity *ecom,
+ struct community_list *list);
+extern bool lcommunity_list_match(struct lcommunity *lcom,
+ struct community_list *list);
+extern bool community_list_exact_match(struct community *com,
+ struct community_list *list);
extern bool lcommunity_list_exact_match(struct lcommunity *lcom,
struct community_list *list);
-extern struct community *community_list_match_delete(struct community *,
- struct community_list *);
+extern struct community *
+community_list_match_delete(struct community *com, struct community_list *list);
extern struct lcommunity *
lcommunity_list_match_delete(struct lcommunity *lcom,
struct community_list *list);
extern void community_init(void);
extern void community_finish(void);
extern void community_free(struct community **comm);
-extern struct community *community_uniq_sort(struct community *);
-extern struct community *community_parse(uint32_t *, unsigned short);
-extern struct community *community_intern(struct community *);
-extern void community_unintern(struct community **);
-extern char *community_str(struct community *, bool make_json,
+extern struct community *community_uniq_sort(struct community *com);
+extern struct community *community_parse(uint32_t *pnt, unsigned short length);
+extern struct community *community_intern(struct community *com);
+extern void community_unintern(struct community **com);
+extern char *community_str(struct community *com, bool make_json,
bool translate_alias);
-extern unsigned int community_hash_make(const struct community *);
-extern struct community *community_str2com(const char *);
-extern bool community_match(const struct community *, const struct community *);
+extern unsigned int community_hash_make(const struct community *com);
+extern struct community *community_str2com(const char *str);
+extern bool community_match(const struct community *com1,
+ const struct community *com2);
extern bool community_cmp(const struct community *c1,
const struct community *c2);
-extern struct community *community_merge(struct community *,
- struct community *);
-extern struct community *community_delete(struct community *,
- struct community *);
-extern struct community *community_dup(struct community *);
-extern bool community_include(struct community *, uint32_t);
+extern struct community *community_merge(struct community *com1,
+ struct community *com2);
+extern struct community *community_delete(struct community *com1,
+ struct community *com2);
+extern struct community *community_dup(struct community *com);
+extern bool community_include(struct community *com, uint32_t val);
extern void community_add_val(struct community *com, uint32_t val);
-extern void community_del_val(struct community *, uint32_t *);
+extern void community_del_val(struct community *com, uint32_t *val);
extern unsigned long community_count(void);
extern struct hash *community_hash(void);
extern uint32_t community_val_get(struct community *com, int i);
void bgp_ca_community_insert(struct community_alias *ca)
{
- hash_get(bgp_ca_community_hash, ca, bgp_community_alias_alloc);
+ (void)hash_get(bgp_ca_community_hash, ca, bgp_community_alias_alloc);
}
void bgp_ca_alias_insert(struct community_alias *ca)
{
- hash_get(bgp_ca_alias_hash, ca, bgp_community_alias_alloc);
+ (void)hash_get(bgp_ca_alias_hash, ca, bgp_community_alias_alloc);
}
void bgp_ca_community_delete(struct community_alias *ca)
#define REUSE_LIST_SIZE 256
#define REUSE_ARRAY_SIZE 1024
-extern int bgp_damp_enable(struct bgp *, afi_t, safi_t, time_t, unsigned int,
- unsigned int, time_t);
-extern int bgp_damp_disable(struct bgp *, afi_t, safi_t);
+extern int bgp_damp_enable(struct bgp *bgp, afi_t afi, safi_t safi, time_t half,
+ unsigned int reuse, unsigned int suppress,
+ time_t max);
+extern int bgp_damp_disable(struct bgp *bgp, afi_t afi, safi_t safi);
extern int bgp_damp_withdraw(struct bgp_path_info *path, struct bgp_dest *dest,
afi_t afi, safi_t safi, int attr_change);
extern int bgp_damp_update(struct bgp_path_info *path, struct bgp_dest *dest,
extern void bgp_damp_info_free(struct bgp_damp_info *path, int withdraw,
afi_t afi, safi_t safi);
extern void bgp_damp_info_clean(afi_t afi, safi_t safi);
-extern int bgp_damp_decay(time_t, int, struct bgp_damp_config *damp);
-extern void bgp_config_write_damp(struct vty *, afi_t afi, safi_t safi);
+extern int bgp_damp_decay(time_t tdiff, int penalty,
+ struct bgp_damp_config *damp);
+extern void bgp_config_write_damp(struct vty *vty, afi_t afi, safi_t safi);
extern void bgp_damp_info_vty(struct vty *vty, struct bgp_path_info *path,
afi_t afi, safi_t safi, json_object *json_path);
extern const char *bgp_damp_reuse_time_vty(struct vty *vty,
char *timebuf, size_t len, afi_t afi,
safi_t safi, bool use_json,
json_object *json);
-extern int bgp_show_dampening_parameters(struct vty *vty, afi_t, safi_t,
- uint16_t);
+extern int bgp_show_dampening_parameters(struct vty *vty, afi_t afi,
+ safi_t safi, uint16_t show_flags);
#endif /* _QUAGGA_BGP_DAMP_H */
#include "bgpd/bgp_evpn_vty.h"
#include "bgpd/bgp_vty.h"
#include "bgpd/bgp_flowspec.h"
+#include "bgpd/bgp_packet.h"
unsigned long conf_bgp_debug_as4;
unsigned long conf_bgp_debug_neighbor_events;
{BGP_NOTIFY_CEASE_COLLISION_RESOLUTION,
"/Connection Collision Resolution"},
{BGP_NOTIFY_CEASE_OUT_OF_RESOURCE, "/Out of Resources"},
+ {BGP_NOTIFY_CEASE_HARD_RESET, "/Hard Reset"},
{0}};
static const struct message bgp_notify_route_refresh_msg[] = {
/* dump notify packet */
void bgp_notify_print(struct peer *peer, struct bgp_notify *bgp_notify,
- const char *direct)
+ const char *direct, bool hard_reset)
{
const char *subcode_str;
const char *code_str;
if (msg_str) {
zlog_info(
- "%%NOTIFICATION: %s neighbor %s %d/%d (%s%s) \"%s\"",
+ "%%NOTIFICATION%s: %s neighbor %s %d/%d (%s%s) \"%s\"",
+ hard_reset ? "(Hard Reset)" : "",
strcmp(direct, "received") == 0
? "received from"
: "sent to",
} else {
msg_str = bgp_notify->data ? bgp_notify->data : "";
zlog_info(
- "%%NOTIFICATION: %s neighbor %s %d/%d (%s%s) %d bytes %s",
+ "%%NOTIFICATION%s: %s neighbor %s %d/%d (%s%s) %d bytes %s",
+ hard_reset ? "(Hard Reset)" : "",
strcmp(direct, "received") == 0
? "received from"
: "sent to",
int argc, struct prefix **argv_pp)
{
struct prefix *argv_p;
- struct ethaddr mac;
- struct ipaddr ip;
+ struct ethaddr mac = {};
+ struct ipaddr ip = {};
int evpn_type = 0;
int mac_idx = 0;
int ip_idx = 0;
return CMD_WARNING;
if (evpn_type == BGP_EVPN_MAC_IP_ROUTE) {
- memset(&ip, 0, sizeof(struct ipaddr));
+ memset(&ip, 0, sizeof(ip));
- argv_find(argv, argc, "mac", &mac_idx);
- (void)prefix_str2mac(argv[mac_idx + 1]->arg, &mac);
+ if (argv_find(argv, argc, "mac", &mac_idx))
+ if (!prefix_str2mac(argv[mac_idx + 1]->arg, &mac)) {
+ vty_out(vty, "%% Malformed MAC address\n");
+ return CMD_WARNING;
+ }
- argv_find(argv, argc, "ip", &ip_idx);
- str2ipaddr(argv[ip_idx + 1]->arg, &ip);
+ if (argv_find(argv, argc, "ip", &ip_idx))
+ if (str2ipaddr(argv[ip_idx + 1]->arg, &ip) != 0) {
+ vty_out(vty, "%% Malformed IP address\n");
+ return CMD_WARNING;
+ }
build_evpn_type2_prefix((struct prefix_evpn *)argv_p,
&mac, &ip);
} else if (evpn_type == BGP_EVPN_IMET_ROUTE) {
- memset(&ip, 0, sizeof(struct ipaddr));
+ memset(&ip, 0, sizeof(ip));
- argv_find(argv, argc, "ip", &ip_idx);
- str2ipaddr(argv[ip_idx + 1]->arg, &ip);
+ if (argv_find(argv, argc, "ip", &ip_idx))
+ if (str2ipaddr(argv[ip_idx + 1]->arg, &ip) != 0) {
+ vty_out(vty, "%% Malformed IP address\n");
+ return CMD_WARNING;
+ }
build_evpn_type3_prefix((struct prefix_evpn *)argv_p,
ip.ipaddr_v4);
} else if (evpn_type == BGP_EVPN_IP_PREFIX_ROUTE) {
struct prefix ip_prefix;
- memset(&ip_prefix, 0, sizeof(struct prefix));
+ memset(&ip_prefix, 0, sizeof(ip_prefix));
if (argv_find(argv, argc, "ip", &ip_idx)) {
(void)str2prefix(argv[ip_idx + 1]->arg, &ip_prefix);
apply_mask(&ip_prefix);
/* Return true if this peer is on the per_peer_list of peers to debug
* for BGP_DEBUG_TYPE
*/
-static int bgp_debug_per_peer(char *host, unsigned long term_bgp_debug_type,
- unsigned int BGP_DEBUG_TYPE,
- struct list *per_peer_list)
+static bool bgp_debug_per_peer(char *host, unsigned long term_bgp_debug_type,
+ unsigned int BGP_DEBUG_TYPE,
+ struct list *per_peer_list)
{
struct bgp_debug_filter *filter;
struct listnode *node, *nnode;
if (term_bgp_debug_type & BGP_DEBUG_TYPE) {
/* We are debugging all peers so return true */
if (!per_peer_list || list_isempty(per_peer_list))
- return 1;
+ return true;
else {
if (!host)
- return 0;
+ return false;
for (ALL_LIST_ELEMENTS(per_peer_list, node, nnode,
filter))
if (strcmp(filter->host, host) == 0)
- return 1;
+ return true;
- return 0;
+ return false;
}
}
- return 0;
+ return false;
}
-int bgp_debug_neighbor_events(struct peer *peer)
+bool bgp_debug_neighbor_events(const struct peer *peer)
{
char *host = NULL;
bgp_debug_neighbor_events_peers);
}
-int bgp_debug_keepalive(struct peer *peer)
+bool bgp_debug_keepalive(const struct peer *peer)
{
char *host = NULL;
bgp_debug_keepalive_peers);
}
-bool bgp_debug_update(struct peer *peer, const struct prefix *p,
+bool bgp_debug_update(const struct peer *peer, const struct prefix *p,
struct update_group *updgrp, unsigned int inbound)
{
char *host = NULL;
extern const char *const bgp_type_str[];
-extern bool bgp_dump_attr(struct attr *, char *, size_t);
+extern bool bgp_dump_attr(struct attr *attr, char *buf, size_t size);
extern bool bgp_debug_peer_updout_enabled(char *host);
-extern const char *bgp_notify_code_str(char);
-extern const char *bgp_notify_subcode_str(char, char);
-extern void bgp_notify_print(struct peer *, struct bgp_notify *, const char *);
+extern const char *bgp_notify_code_str(char code);
+extern const char *bgp_notify_subcode_str(char code, char subcode);
+extern void bgp_notify_print(struct peer *peer, struct bgp_notify *bgp_notify,
+ const char *direct, bool hard_reset);
extern const struct message bgp_status_msg[];
-extern int bgp_debug_neighbor_events(struct peer *peer);
-extern int bgp_debug_keepalive(struct peer *peer);
-extern bool bgp_debug_update(struct peer *peer, const struct prefix *p,
+extern bool bgp_debug_neighbor_events(const struct peer *peer);
+extern bool bgp_debug_keepalive(const struct peer *peer);
+extern bool bgp_debug_update(const struct peer *peer, const struct prefix *p,
struct update_group *updgrp, unsigned int inbound);
extern bool bgp_debug_bestpath(struct bgp_dest *dest);
extern bool bgp_debug_zebra(const struct prefix *p);
/* Initialize BGP packet dump functionality. */
void bgp_dump_init(void)
{
- memset(&bgp_dump_all, 0, sizeof(struct bgp_dump));
- memset(&bgp_dump_updates, 0, sizeof(struct bgp_dump));
- memset(&bgp_dump_routes, 0, sizeof(struct bgp_dump));
+ memset(&bgp_dump_all, 0, sizeof(bgp_dump_all));
+ memset(&bgp_dump_updates, 0, sizeof(bgp_dump_updates));
+ memset(&bgp_dump_routes, 0, sizeof(bgp_dump_routes));
bgp_dump_obuf =
stream_new((BGP_STANDARD_MESSAGE_MAX_PACKET_SIZE * 2)
extern void bgp_dump_init(void);
extern void bgp_dump_finish(void);
-extern int bgp_dump_state(struct peer *);
+extern int bgp_dump_state(struct peer *peer);
#endif /* _QUAGGA_BGP_DUMP_H */
.description = "BGP is in the process of building NLRI information for a peer and has discovered an inconsistent internal state",
.suggestion = "Gather log files and open an Issue, restart FRR",
},
+ {
+ .code = EC_BGP_SENDQ_STUCK_WARN,
+ .title = "BGP has been unable to send anything to a peer for an extended time",
+ .description = "The BGP peer does not seem to be receiving or processing any data received from us, causing updates to be delayed.",
+ .suggestion = "Check connectivity to the peer and that it is not overloaded",
+ },
{
.code = END_FERR,
}
.description = "BGP when using a v6 peer requires a v6 LL address to be configured on the outgoing interface as per RFC 4291 section 2.1",
.suggestion = "Add a v6 LL address to the outgoing interfaces as per RFC",
},
+ {
+ .code = EC_BGP_SENDQ_STUCK_PROPER,
+ .title = "BGP is shutting down a peer due to being unable to send anything for an extended time",
+ .description = "No BGP updates were successfully sent to the peer for more than twice the holdtime.",
+ .suggestion = "Check connectivity to the peer and that it is not overloaded",
+ },
{
.code = END_FERR,
}
EC_BGP_INVALID_BGP_INSTANCE,
EC_BGP_INVALID_ROUTE,
EC_BGP_NO_LL_ADDRESS_AVAILABLE,
+ EC_BGP_SENDQ_STUCK_WARN,
+ EC_BGP_SENDQ_STUCK_PROPER,
};
extern void bgp_error_init(void);
irt->vrfs = list_new();
/* Add to hash */
- if (!hash_get(bgp_evpn->vrf_import_rt_hash, irt, hash_alloc_intern)) {
- XFREE(MTYPE_BGP_EVPN_VRF_IMPORT_RT, irt);
- return NULL;
- }
+ (void)hash_get(bgp_evpn->vrf_import_rt_hash, irt, hash_alloc_intern);
return irt;
}
return NULL;
}
- memset(&tmp, 0, sizeof(struct vrf_irt_node));
+ memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp.rt, rt, ECOMMUNITY_SIZE);
irt = hash_lookup(bgp_evpn->vrf_import_rt_hash, &tmp);
return irt;
{
struct irt_node *irt;
- if (!bgp)
- return NULL;
-
irt = XCALLOC(MTYPE_BGP_EVPN_IMPORT_RT, sizeof(struct irt_node));
irt->rt = *rt;
irt->vnis = list_new();
/* Add to hash */
- if (!hash_get(bgp->import_rt_hash, irt, hash_alloc_intern)) {
- XFREE(MTYPE_BGP_EVPN_IMPORT_RT, irt);
- return NULL;
- }
+ (void)hash_get(bgp->import_rt_hash, irt, hash_alloc_intern);
return irt;
}
struct irt_node *irt;
struct irt_node tmp;
- memset(&tmp, 0, sizeof(struct irt_node));
+ memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp.rt, rt, ECOMMUNITY_SIZE);
irt = hash_lookup(bgp->import_rt_hash, &tmp);
return irt;
/* Already mapped. */
return;
- if (!irt) {
+ if (!irt)
irt = import_rt_new(bgp, &eval_tmp);
- assert(irt);
- }
/* Add VNI to the hash list for this RT. */
listnode_add(irt->vnis, vpn);
if (src_attr)
attr = *src_attr;
else {
- memset(&attr, 0, sizeof(struct attr));
+ memset(&attr, 0, sizeof(attr));
bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
}
int route_change;
bool old_is_sync = false;
- memset(&attr, 0, sizeof(struct attr));
+ memset(&attr, 0, sizeof(attr));
/* Build path-attribute for this route. */
bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
* Delete all type-2 (MACIP) local routes for this VNI - only from the
* global routing table. These are also scheduled for withdraw from peers.
*/
-static int delete_global_type2_routes(struct bgp *bgp, struct bgpevpn *vpn)
+static void delete_global_type2_routes(struct bgp *bgp, struct bgpevpn *vpn)
{
afi_t afi;
safi_t safi;
rddest = bgp_node_lookup(bgp->rib[afi][safi],
(struct prefix *)&vpn->prd);
- if (rddest && bgp_dest_has_bgp_path_info_data(rddest)) {
+ if (rddest) {
table = bgp_dest_get_bgp_table_info(rddest);
for (dest = bgp_table_top(table); dest;
dest = bgp_route_next(dest)) {
if (pi)
bgp_process(bgp, dest, afi, safi);
}
- }
- /* Unlock RD node. */
- if (rddest)
+ /* Unlock RD node. */
bgp_dest_unlock_node(rddest);
-
- return 0;
+ }
}
/*
es = bgp_evpn_es_find(&evp->prefix.ead_addr.esi);
bgp_evpn_mh_route_update(bgp, es, vpn, afi, safi,
- global_dest, attr, 1,
- &global_pi, &route_changed);
+ global_dest, attr, &global_pi,
+ &route_changed);
}
/* Schedule for processing and unlock node. */
*/
static int delete_withdraw_vni_routes(struct bgp *bgp, struct bgpevpn *vpn)
{
- int ret;
struct prefix_evpn p;
struct bgp_dest *global_dest;
struct bgp_path_info *pi;
/* Delete and withdraw locally learnt type-2 routes (MACIP)
* for this VNI - from the global table.
*/
- ret = delete_global_type2_routes(bgp, vpn);
- if (ret)
- return ret;
+ delete_global_type2_routes(bgp, vpn);
/* Remove type-3 route for this VNI from global table. */
build_evpn_type3_prefix(&p, vpn->originator_ip);
pfx += 8;
/* Make EVPN prefix. */
- memset(&p, 0, sizeof(struct prefix_evpn));
+ memset(&p, 0, sizeof(p));
p.family = AF_EVPN;
p.prefixlen = EVPN_ROUTE_PREFIXLEN;
p.prefix.route_type = BGP_EVPN_IMET_ROUTE;
pfx += 8;
/* Make EVPN prefix. */
- memset(&p, 0, sizeof(struct prefix_evpn));
+ memset(&p, 0, sizeof(p));
p.family = AF_EVPN;
p.prefixlen = EVPN_ROUTE_PREFIXLEN;
p.prefix.route_type = BGP_EVPN_IP_PREFIX_ROUTE;
char temp[16];
const struct evpn_addr *p_evpn_p;
- memset(&temp, 0, 16);
+ memset(&temp, 0, sizeof(temp));
if (p->family != AF_EVPN)
return;
p_evpn_p = &(p->u.prefix_evpn);
struct prefix ip_prefix;
/* form the default prefix 0.0.0.0/0 */
- memset(&ip_prefix, 0, sizeof(struct prefix));
+ memset(&ip_prefix, 0, sizeof(ip_prefix));
ip_prefix.family = afi2family(afi);
if (add) {
for (; pnt < lim; pnt += psize) {
/* Clear prefix structure. */
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
/* Deal with path-id if AddPath is supported. */
if (addpath_capable) {
struct bgpevpn *vpn;
struct bgpevpn tmp;
- memset(&tmp, 0, sizeof(struct bgpevpn));
+ memset(&tmp, 0, sizeof(tmp));
tmp.vni = vni;
vpn = hash_lookup(bgp->vnihash, &tmp);
return vpn;
{
struct bgpevpn *vpn;
- if (!bgp)
- return NULL;
-
vpn = XCALLOC(MTYPE_BGP_EVPN, sizeof(struct bgpevpn));
/* Set values - RD and RT set to defaults. */
vpn->route_table = bgp_table_init(bgp, AFI_L2VPN, SAFI_EVPN);
/* Add to hash */
- if (!hash_get(bgp->vnihash, vpn, hash_alloc_intern)) {
- XFREE(MTYPE_BGP_EVPN, vpn);
- return NULL;
- }
+ (void)hash_get(bgp->vnihash, vpn, hash_alloc_intern);
bgp_evpn_remote_ip_hash_init(vpn);
bgp_evpn_link_to_vni_svi_hash(bgp, vpn);
*/
if (is_vni_live(vpn))
update_routes_for_vni(bgp, vpn);
- }
-
- /* Create or update as appropriate. */
- if (!vpn) {
+ } else {
+ /* Create or update as appropriate. */
vpn = bgp_evpn_new(bgp, vni, originator_ip, tenant_vrf_id,
- mcast_grp, svi_ifindex);
- if (!vpn) {
- flog_err(
- EC_BGP_VNI,
- "%u: Failed to allocate VNI entry for VNI %u - at Add",
- bgp->vrf_id, vni);
- return -1;
- }
+ mcast_grp, svi_ifindex);
}
/* if the VNI is live already, there is nothing more to do */
}
ip = hash_get(vpn->remote_ip_hash, &tmp, bgp_evpn_remote_ip_hash_alloc);
- if (!ip)
- return;
-
(void)listnode_add(ip->macip_path_list, pi);
bgp_evpn_remote_ip_process_nexthops(vpn, &ip->addr, true);
struct bgpevpn *vpn;
struct bgpevpn tmp;
- memset(&tmp, 0, sizeof(struct bgpevpn));
+ memset(&tmp, 0, sizeof(tmp));
tmp.svi_ifindex = svi;
vpn = hash_lookup(bgp->vni_svi_hash, &tmp);
return vpn;
if (vpn->svi_ifindex == 0)
return;
- hash_get(bgp->vni_svi_hash, vpn, hash_alloc_intern);
+ (void)hash_get(bgp->vni_svi_hash, vpn, hash_alloc_intern);
}
static void bgp_evpn_unlink_from_vni_svi_hash(struct bgp *bgp,
* which stores all the remote IP addresses received via MAC/IP routes
* in this EVI
*/
- memset(&tmp, 0, sizeof(struct evpn_remote_ip));
+ memset(&tmp, 0, sizeof(tmp));
p = &bnc->prefix;
if (p->family == AF_INET) {
if (!vpn->bgp_vrf || vpn->svi_ifindex == 0)
return;
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
if (addr->ipa_type == IPADDR_V4) {
afi = AFI_IP;
*/
int bgp_evpn_mh_route_update(struct bgp *bgp, struct bgp_evpn_es *es,
struct bgpevpn *vpn, afi_t afi, safi_t safi,
- struct bgp_dest *dest, struct attr *attr, int add,
+ struct bgp_dest *dest, struct attr *attr,
struct bgp_path_info **ri, int *route_changed)
{
struct bgp_path_info *tmp_pi = NULL;
return -1;
}
- if (!local_pi && !add)
- return 0;
-
/* create or update the entry */
if (!local_pi) {
bgp_attr_set_ecommunity(attr, ecommunity_dup(&ecom_encap));
/* ES import RT */
- memset(&mac, 0, sizeof(struct ethaddr));
+ memset(&mac, 0, sizeof(mac));
memset(&ecom_es_rt, 0, sizeof(ecom_es_rt));
es_get_system_mac(&es->esi, &mac);
encode_es_rt_extcomm(&eval_es_rt, &mac);
struct bgp_dest *dest = NULL;
struct bgp_path_info *pi = NULL;
- memset(&attr, 0, sizeof(struct attr));
+ memset(&attr, 0, sizeof(attr));
/* Build path-attribute for this route. */
bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
dest = bgp_node_get(es->route_table, (struct prefix *)p);
/* Create or update route entry. */
- ret = bgp_evpn_mh_route_update(bgp, es, NULL, afi, safi, dest, &attr, 1,
+ ret = bgp_evpn_mh_route_update(bgp, es, NULL, afi, safi, dest, &attr,
&pi, &route_changed);
if (ret != 0)
flog_err(
dest = bgp_global_evpn_node_get(bgp->rib[afi][safi], afi, safi,
p, &es->es_base_frag->prd);
bgp_evpn_mh_route_update(bgp, es, NULL, afi, safi, dest,
- attr_new, 1, &global_pi,
- &route_changed);
+ attr_new, &global_pi, &route_changed);
/* Schedule for processing and unlock node. */
bgp_process(bgp, dest, afi, safi);
int route_changed = 0;
struct prefix_rd *global_rd;
- memset(&attr, 0, sizeof(struct attr));
+ memset(&attr, 0, sizeof(attr));
/* Build path-attribute for this route. */
bgp_attr_default_set(&attr, BGP_ORIGIN_IGP);
/* Create or update route entry. */
ret = bgp_evpn_mh_route_update(bgp, es, vpn, afi, safi, dest,
- &attr, 1, &pi, &route_changed);
+ &attr, &pi, &route_changed);
if (ret != 0)
flog_err(
EC_BGP_ES_INVALID,
/* Create or update route entry. */
ret = bgp_evpn_mh_route_update(bgp, es, vpn, afi, safi, dest,
- &attr, 1, &pi, &route_changed);
+ &attr, &pi, &route_changed);
if (ret != 0) {
flog_err(
EC_BGP_ES_INVALID,
- "%u ERROR: Failed to updated EAD-EVI route ESI: %s VTEP %pI4",
+ "%u ERROR: Failed to updated EAD-ES route ESI: %s VTEP %pI4",
bgp->vrf_id, es->esi_str, &es->originator_ip);
}
global_rd = &es_frag->prd;
dest = bgp_global_evpn_node_get(bgp->rib[afi][safi], afi, safi,
p, global_rd);
bgp_evpn_mh_route_update(bgp, es, vpn, afi, safi, dest,
- attr_new, 1, &global_pi,
- &route_changed);
+ attr_new, &global_pi, &route_changed);
/* Schedule for processing and unlock node. */
bgp_process(bgp, dest, afi, safi);
struct bgp_evpn_nh tmp_n;
struct bgp_evpn_nh *n = NULL;
- memset(&tmp_n, 0, sizeof(struct bgp_evpn_nh));
+ memset(&tmp_n, 0, sizeof(tmp_n));
memcpy(&tmp_n.ip, ip, sizeof(struct ipaddr));
n = hash_get(bgp_vrf->evpn_nh_table, &tmp_n, bgp_evpn_nh_alloc);
ipaddr2str(ip, n->nh_str, sizeof(n->nh_str));
extern int bgp_evpn_mh_route_update(struct bgp *bgp, struct bgp_evpn_es *es,
struct bgpevpn *vpn, afi_t afi, safi_t safi,
struct bgp_dest *dest, struct attr *attr,
- int add, struct bgp_path_info **ri,
+ struct bgp_path_info **ri,
int *route_changed);
int bgp_evpn_type1_route_process(struct peer *peer, afi_t afi, safi_t safi,
struct attr *attr, uint8_t *pfx, int psize,
p->prefix.route_type = BGP_EVPN_MAC_IP_ROUTE;
memcpy(&p->prefix.macip_addr.mac.octet, mac->octet, ETH_ALEN);
p->prefix.macip_addr.ip.ipa_type = IPADDR_NONE;
- if (ip)
- memcpy(&p->prefix.macip_addr.ip, ip, sizeof(*ip));
+ memcpy(&p->prefix.macip_addr.ip, ip, sizeof(*ip));
}
static inline void
struct prefix_rd prd;
int rd_all = 0;
- argv_find(argv, argc, "all", &rd_all);
- if (rd_all)
+ if (argv_find(argv, argc, "all", &rd_all))
return bgp_show_ethernet_vpn(vty, NULL, bgp_show_type_normal,
NULL, SHOW_DISPLAY_STANDARD,
use_json(argc, argv));
struct prefix_rd prd;
int rd_all = 0;
- argv_find(argv, argc, "all", &rd_all);
- if (rd_all)
+ if (argv_find(argv, argc, "all", &rd_all))
return bgp_show_ethernet_vpn(vty, NULL, bgp_show_type_normal,
NULL, SHOW_DISPLAY_TAGS, 0);
int ret;
struct peer *peer;
char *peerstr = NULL;
- struct prefix_rd prd;
+ struct prefix_rd prd = {};
bool uj = use_json(argc, argv);
afi_t afi = AFI_L2VPN;
safi_t safi = SAFI_EVPN;
return CMD_WARNING;
}
- argv_find(argv, argc, "all", &rd_all);
- if (!rd_all) {
+ if (argv_find(argv, argc, "all", &rd_all)) {
argv_find(argv, argc, "ASN:NN_OR_IP-ADDRESS:NN",
&idx_ext_community);
ret = str2prefix_rd(argv[idx_ext_community]->arg, &prd);
return CMD_WARNING;
}
- argv_find(argv, argc, "all", &rd_all);
- if (rd_all)
+ if (argv_find(argv, argc, "all", &rd_all))
return show_adj_route_vpn(vty, peer, NULL, AFI_L2VPN, SAFI_EVPN,
uj);
else {
struct prefix_rd prd;
int rd_all = 0;
- argv_find(argv, argc, "all", &rd_all);
- if (rd_all)
+ if (argv_find(argv, argc, "all", &rd_all))
return bgp_show_ethernet_vpn(vty, NULL, bgp_show_type_normal,
NULL, SHOW_DISPLAY_OVERLAY,
use_json(argc, argv));
struct bgpevpn *vpn;
struct in_addr mcast_grp = {INADDR_ANY};
- if (!bgp->vnihash)
- return NULL;
-
vpn = bgp_evpn_lookup_vni(bgp, vni);
if (!vpn) {
/* Check if this L2VNI is already configured as L3VNI */
* zebra
*/
vpn = bgp_evpn_new(bgp, vni, bgp->router_id, 0, mcast_grp, 0);
- if (!vpn) {
- flog_err(
- EC_BGP_VNI,
- "%u: Failed to allocate VNI entry for VNI %u - at Config",
- bgp->vrf_id, vni);
- return NULL;
- }
}
/* Mark as configured. */
*/
static void evpn_delete_vni(struct bgp *bgp, struct bgpevpn *vpn)
{
- assert(bgp->vnihash);
-
if (!is_vni_live(vpn)) {
bgp_evpn_free(bgp, vpn);
return;
num_vnis = hashcount(bgp->vnihash);
if (!num_vnis)
return;
- memset(&wctx, 0, sizeof(struct vni_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.bgp = bgp;
wctx.vty = vty;
wctx.vtep_ip = vtep_ip;
JSON_STR)
{
struct bgp *bgp;
- int ret;
+ int ret = 0;
struct prefix_rd prd;
int type = 0;
bool uj = false;
if (uj)
json = json_object_new_object();
- argv_find(argv, argc, "all", &rd_all);
- if (!rd_all) {
+ if (argv_find(argv, argc, "all", &rd_all)) {
/* get the RD */
- argv_find(argv, argc, "ASN:NN_OR_IP-ADDRESS:NN",
- &idx_ext_community);
- ret = str2prefix_rd(argv[idx_ext_community]->arg, &prd);
- if (!ret) {
- vty_out(vty, "%% Malformed Route Distinguisher\n");
- return CMD_WARNING;
+ if (argv_find(argv, argc, "ASN:NN_OR_IP-ADDRESS:NN",
+ &idx_ext_community)) {
+ ret = str2prefix_rd(argv[idx_ext_community]->arg, &prd);
+ if (!ret) {
+ vty_out(vty,
+ "%% Malformed Route Distinguisher\n");
+ return CMD_WARNING;
+ }
}
}
JSON_STR)
{
struct bgp *bgp;
- int ret;
+ int ret = 0;
struct prefix_rd prd;
struct ethaddr mac;
struct ipaddr ip;
json = json_object_new_object();
/* get the prd */
- argv_find(argv, argc, "all", &rd_all);
- if (!rd_all) {
- argv_find(argv, argc, "ASN:NN_OR_IP-ADDRESS:NN",
- &idx_ext_community);
- ret = str2prefix_rd(argv[idx_ext_community]->arg, &prd);
- if (!ret) {
- vty_out(vty, "%% Malformed Route Distinguisher\n");
- return CMD_WARNING;
+ if (argv_find(argv, argc, "all", &rd_all)) {
+ if (argv_find(argv, argc, "ASN:NN_OR_IP-ADDRESS:NN",
+ &idx_ext_community)) {
+ ret = str2prefix_rd(argv[idx_ext_community]->arg, &prd);
+ if (!ret) {
+ vty_out(vty,
+ "%% Malformed Route Distinguisher\n");
+ return CMD_WARNING;
+ }
}
}
if (bgp->advertise_all_vni)
vty_out(vty, " advertise-all-vni\n");
- if (bgp->vnihash) {
+ if (hashcount(bgp->vnihash)) {
struct list *vnilist = hash_to_list(bgp->vnihash);
struct listnode *ln;
struct bgpevpn *data;
for (; pnt < lim; pnt += psize) {
/* Clear prefix structure. */
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
/* All FlowSpec NLRI begin with length. */
if (pnt + 1 > lim)
switch (type) {
case FLOWSPEC_DEST_PREFIX:
case FLOWSPEC_SRC_PREFIX:
- memset(&compare, 0, sizeof(struct prefix));
+ memset(&compare, 0, sizeof(compare));
ret = bgp_flowspec_ip_address(
BGP_FLOWSPEC_CONVERT_TO_NON_OPAQUE,
nlri_content+offset,
uint8_t prefix_offset = 0;
*error = 0;
- memset(&prefix_local, 0, sizeof(struct prefix));
+ memset(&prefix_local, 0, sizeof(prefix_local));
/* read the prefix length */
prefix_local.prefixlen = nlri_ptr[offset];
psize = PSIZE(prefix_local.prefixlen);
struct bgp_dest *dest = pi->net;
struct bgp_pbr_entry_action *api_action;
- memset(&api, 0, sizeof(struct bgp_pbr_entry_main));
+ memset(&api, 0, sizeof(api));
if (bgp_pbr_build_and_validate_entry(bgp_dest_get_prefix(dest), pi,
&api)
< 0)
"Begin read-only mode - update-delay timer %d seconds",
peer->bgp->v_update_delay);
}
- if (CHECK_FLAG(peer->cap, PEER_CAP_RESTART_BIT_RCV))
+ if (CHECK_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV))
bgp_update_restarted_peers(peer);
}
if (peer->ostatus == Established
return -1;
}
+ /*
+ * If we are doing nht for a peer that ls v6 LL based
+ * massage the event system to make things happy
+ */
+ bgp_nht_interface_events(peer);
+
bgp_reads_on(peer);
if (bgp_debug_neighbor_events(peer)) {
return -1;
}
+ /*
+ * If we are doing nht for a peer that ls v6 LL based
+ * massage the event system to make things happy
+ */
+ bgp_nht_interface_events(peer);
+
bgp_reads_on(peer);
if (bgp_debug_neighbor_events(peer)) {
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [FSM] Hold timer expire", peer->host);
+ /* RFC8538 updates RFC 4724 by defining an extension that permits
+ * the Graceful Restart procedures to be performed when the BGP
+ * speaker receives a BGP NOTIFICATION message or the Hold Time expires.
+ */
+ if (peer_established(peer) &&
+ bgp_has_graceful_restart_notification(peer))
+ if (CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE))
+ SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
+
return bgp_stop_with_notify(peer, BGP_NOTIFY_HOLD_ERR, 0);
}
} else {
/* Peer sends R-bit. In this case, we need to send
* ZEBRA_CLIENT_ROUTE_UPDATE_COMPLETE to Zebra. */
- if (CHECK_FLAG(peer->cap, PEER_CAP_RESTART_BIT_RCV)) {
+ if (CHECK_FLAG(peer->cap,
+ PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV)) {
FOREACH_AFI_SAFI (afi, safi)
/* Send route processing complete
message to RIB */
* so the hash_release is the same for either.
*/
hash_release(peer->bgp->peerhash, peer);
- hash_get(peer->bgp->peerhash, peer, hash_alloc_intern);
+ (void)hash_get(peer->bgp->peerhash, peer, hash_alloc_intern);
/* Start BFD peer if not already running. */
if (peer->bfd_config)
&& CHECK_FLAG(peer->cap, PEER_CAP_RESTART_RCV))
#define BGP_PEER_RESTARTING_MODE(peer) \
- (CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART) \
- && CHECK_FLAG(peer->cap, PEER_CAP_RESTART_BIT_ADV) \
- && !CHECK_FLAG(peer->cap, PEER_CAP_RESTART_BIT_RCV))
+ (CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART) && \
+ CHECK_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_ADV) && \
+ !CHECK_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV))
#define BGP_PEER_HELPER_MODE(peer) \
- (CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART_HELPER) \
- && CHECK_FLAG(peer->cap, PEER_CAP_RESTART_BIT_RCV) \
- && !CHECK_FLAG(peer->cap, PEER_CAP_RESTART_BIT_ADV))
+ (CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART_HELPER) && \
+ CHECK_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV) && \
+ !CHECK_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_ADV))
/* Prototypes. */
unsigned int iovsz;
unsigned int strmsz;
unsigned int total_written;
+ time_t now;
wpkt_quanta_old = atomic_load_explicit(&peer->bgp->wpkt_quanta,
memory_order_relaxed);
}
done : {
+ now = bgp_clock();
/*
* Update last_update if UPDATEs were written.
* Note: that these are only updated at end,
* not per message (i.e., per loop)
*/
if (uo)
- atomic_store_explicit(&peer->last_update, bgp_clock(),
+ atomic_store_explicit(&peer->last_update, now,
memory_order_relaxed);
/* If we TXed any flavor of packet */
- if (update_last_write)
- atomic_store_explicit(&peer->last_write, bgp_clock(),
+ if (update_last_write) {
+ atomic_store_explicit(&peer->last_write, now,
memory_order_relaxed);
+ peer->last_sendq_ok = now;
+ }
}
return status;
bgp_keepalive_send(pkat->peer);
monotime(&pkat->last);
- memset(&elapsed, 0x00, sizeof(struct timeval));
+ memset(&elapsed, 0, sizeof(elapsed));
diff = ka;
}
hash_iterate(peerhash, peer_process, &next_update);
if (next_update.tv_sec == -1)
- memset(&next_update, 0x00, sizeof(next_update));
+ memset(&next_update, 0, sizeof(next_update));
monotime_since(&currtime, &aftertime);
holder.peer = peer;
if (!hash_lookup(peerhash, &holder)) {
struct pkat *pkat = pkat_new(peer);
- hash_get(peerhash, pkat, hash_alloc_intern);
+ (void)hash_get(peerhash, pkat, hash_alloc_intern);
peer_lock(peer);
}
SET_FLAG(peer->thread_flags, PEER_THREAD_KEEPALIVES_ON);
s = zclient->ibuf;
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
p.family = stream_getw(s);
p.prefixlen = stream_getc(s);
stream_get(p.u.val, s, PSIZE(p.prefixlen));
for (; pnt < lim; pnt += psize) {
/* Clear prefix structure. */
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
if (addpath_capable) {
DEFINE_MTYPE(BGPD, BGP_SRV6_SID, "BGP srv6 segment-id");
DEFINE_MTYPE(BGPD, BGP_SRV6_FUNCTION, "BGP srv6 function");
DEFINE_MTYPE(BGPD, EVPN_REMOTE_IP, "BGP EVPN Remote IP hash entry");
+
+DEFINE_MTYPE(BGPD, BGP_NOTIFICATION, "BGP Notification Message");
DECLARE_MTYPE(EVPN_REMOTE_IP);
+DECLARE_MTYPE(BGP_NOTIFICATION);
+
#endif /* _QUAGGA_BGP_MEMORY_H */
* Record maximum-paths configuration for BGP instance
*/
int bgp_maximum_paths_set(struct bgp *bgp, afi_t afi, safi_t safi, int peertype,
- uint16_t maxpaths, uint16_t options)
+ uint16_t maxpaths, bool same_clusterlen)
{
if (!bgp || (afi >= AFI_MAX) || (safi >= SAFI_MAX))
return -1;
switch (peertype) {
case BGP_PEER_IBGP:
bgp->maxpaths[afi][safi].maxpaths_ibgp = maxpaths;
- bgp->maxpaths[afi][safi].ibgp_flags |= options;
+ bgp->maxpaths[afi][safi].same_clusterlen = same_clusterlen;
break;
case BGP_PEER_EBGP:
bgp->maxpaths[afi][safi].maxpaths_ebgp = maxpaths;
switch (peertype) {
case BGP_PEER_IBGP:
bgp->maxpaths[afi][safi].maxpaths_ibgp = multipath_num;
- bgp->maxpaths[afi][safi].ibgp_flags = 0;
+ bgp->maxpaths[afi][safi].same_clusterlen = false;
break;
case BGP_PEER_EBGP:
bgp->maxpaths[afi][safi].maxpaths_ebgp = multipath_num;
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef _QUAGGA_BGP_MPATH_H
-#define _QUAGGA_BGP_MPATH_H
+#ifndef _FRR_BGP_MPATH_H
+#define _FRR_BGP_MPATH_H
/* Supplemental information linked to bgp_path_info for keeping track of
* multipath selections, lazily allocated to save memory
};
/* Functions to support maximum-paths configuration */
-extern int bgp_maximum_paths_set(struct bgp *, afi_t, safi_t, int, uint16_t,
- uint16_t);
-extern int bgp_maximum_paths_unset(struct bgp *, afi_t, safi_t, int);
+extern int bgp_maximum_paths_set(struct bgp *bgp, afi_t afi, safi_t safi,
+ int peertype, uint16_t maxpaths,
+ bool clusterlen);
+extern int bgp_maximum_paths_unset(struct bgp *bgp, afi_t afi, safi_t safi,
+ int peertype);
/* Functions used by bgp_best_selection to record current
* multipath selections
*/
extern int bgp_path_info_nexthop_cmp(struct bgp_path_info *bpi1,
struct bgp_path_info *bpi2);
-extern void bgp_mp_list_init(struct list *);
-extern void bgp_mp_list_clear(struct list *);
+extern void bgp_mp_list_init(struct list *mp_list);
+extern void bgp_mp_list_clear(struct list *mp_list);
extern void bgp_mp_list_add(struct list *mp_list, struct bgp_path_info *mpinfo);
extern void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best);
extern void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest,
struct bgp_path_info *path);
extern uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info *path);
-#endif /* _QUAGGA_BGP_MPATH_H */
+#endif /* _FRR_BGP_MPATH_H */
#define VPN_PREFIXLEN_MIN_BYTES (3 + 8) /* label + RD */
while (STREAM_READABLE(data) > 0) {
/* Clear prefix structure. */
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
if (addpath_capable) {
STREAM_GET(&addpath_id, data, BGP_ADDPATH_ID_LEN);
peer = peer_create(&su, peer1->conf_if, peer1->bgp, peer1->local_as,
peer1->as, peer1->as_type, NULL);
hash_release(peer->bgp->peerhash, peer);
- hash_get(peer->bgp->peerhash, peer, hash_alloc_intern);
+ (void)hash_get(peer->bgp->peerhash, peer, hash_alloc_intern);
peer_xfer_config(peer, peer1);
bgp_peer_gr_flags_update(peer);
#ifdef IPTOS_PREC_INTERNETCONTROL
frr_with_privs(&bgpd_privs) {
if (sockunion_family(&peer->su) == AF_INET)
- setsockopt_ipv4_tos(peer->fd,
- IPTOS_PREC_INTERNETCONTROL);
+ setsockopt_ipv4_tos(peer->fd, bm->tcp_dscp);
else if (sockunion_family(&peer->su) == AF_INET6)
- setsockopt_ipv6_tclass(peer->fd,
- IPTOS_PREC_INTERNETCONTROL);
+ setsockopt_ipv6_tclass(peer->fd, bm->tcp_dscp);
}
#endif
#ifdef IPTOS_PREC_INTERNETCONTROL
if (sa->sa_family == AF_INET)
- setsockopt_ipv4_tos(sock, IPTOS_PREC_INTERNETCONTROL);
+ setsockopt_ipv4_tos(sock, bm->tcp_dscp);
else if (sa->sa_family == AF_INET6)
- setsockopt_ipv6_tclass(sock,
- IPTOS_PREC_INTERNETCONTROL);
+ setsockopt_ipv6_tclass(sock, bm->tcp_dscp);
#endif
sockopt_v6only(sa->sa_family, sock);
tmp.addr = *tip;
addr = hash_get(bgp->tip_hash, &tmp, bgp_tip_hash_alloc);
- if (!addr)
- return;
-
addr->refcnt++;
}
bgp_address_add(bgp, ifc, addr);
- dest = bgp_node_get(bgp->connected_table[AFI_IP],
- (struct prefix *)&p);
+ dest = bgp_node_get(bgp->connected_table[AFI_IP], &p);
bc = bgp_dest_get_bgp_connected_ref_info(dest);
if (bc)
bc->refcnt++;
bgp_address_add(bgp, ifc, addr);
- dest = bgp_node_get(bgp->connected_table[AFI_IP6],
- (struct prefix *)&p);
+ dest = bgp_node_get(bgp->connected_table[AFI_IP6], &p);
bc = bgp_dest_get_bgp_connected_ref_info(dest);
if (bc)
return true;
if (new_afi == AF_INET && hashcount(bgp->tip_hash)) {
- memset(&tmp_tip, 0, sizeof(struct tip_addr));
+ memset(&tmp_tip, 0, sizeof(tmp_tip));
tmp_tip.addr = attr->nexthop;
if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) {
SET_FLAG(peer->cap, PEER_CAP_RESTART_RCV);
restart_flag_time = stream_getw(s);
- if (CHECK_FLAG(restart_flag_time, RESTART_R_BIT))
- SET_FLAG(peer->cap, PEER_CAP_RESTART_BIT_RCV);
+
+ /* The most significant bit is defined in [RFC4724] as
+ * the Restart State ("R") bit.
+ */
+ if (CHECK_FLAG(restart_flag_time, GRACEFUL_RESTART_R_BIT))
+ SET_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV);
+ else
+ UNSET_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV);
+
+ /* The second most significant bit is defined in this
+ * document as the Graceful Notification ("N") bit.
+ */
+ if (CHECK_FLAG(restart_flag_time, GRACEFUL_RESTART_N_BIT))
+ SET_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_N_BIT_RCV);
else
- UNSET_FLAG(peer->cap, PEER_CAP_RESTART_BIT_RCV);
+ UNSET_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_N_BIT_RCV);
UNSET_FLAG(restart_flag_time, 0xF000);
peer->v_gr_restart = restart_flag_time;
if (bgp_debug_neighbor_events(peer)) {
- zlog_debug("%s Peer has%srestarted. Restart Time : %d",
- peer->host,
- CHECK_FLAG(peer->cap, PEER_CAP_RESTART_BIT_RCV)
- ? " "
- : " not ",
- peer->v_gr_restart);
+ zlog_debug(
+ "%s Peer has%srestarted. Restart Time: %d, N-bit set: %s",
+ peer->host,
+ CHECK_FLAG(peer->cap,
+ PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV)
+ ? " "
+ : " not ",
+ peer->v_gr_restart,
+ CHECK_FLAG(peer->cap,
+ PEER_CAP_GRACEFUL_RESTART_N_BIT_RCV)
+ ? "yes"
+ : "no");
}
while (stream_get_getp(s) + 4 <= end) {
SET_FLAG(peer->af_cap[afi][safi],
PEER_CAP_RESTART_AF_RCV);
- if (CHECK_FLAG(flag, RESTART_F_BIT))
+ if (CHECK_FLAG(flag, GRACEFUL_RESTART_F_BIT))
SET_FLAG(peer->af_cap[afi][safi],
PEER_CAP_RESTART_AF_PRESERVE_RCV);
}
stream_putc(s, 0);
restart_time = peer->bgp->restart_time;
if (peer->bgp->t_startup) {
- SET_FLAG(restart_time, RESTART_R_BIT);
- SET_FLAG(peer->cap, PEER_CAP_RESTART_BIT_ADV);
+ SET_FLAG(restart_time, GRACEFUL_RESTART_R_BIT);
+ SET_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_ADV);
+ if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART))
+ zlog_debug("[BGP_GR] Sending R-Bit for peer: %s",
+ peer->host);
+ }
+ if (CHECK_FLAG(peer->bgp->flags, BGP_FLAG_GRACEFUL_NOTIFICATION)) {
+ SET_FLAG(restart_time, GRACEFUL_RESTART_N_BIT);
+ SET_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_N_BIT_ADV);
if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART))
- zlog_debug("[BGP_GR] Sending R-Bit for Peer :%s :",
+ zlog_debug("[BGP_GR] Sending N-Bit for peer: %s",
peer->host);
}
stream_putc(s, pkt_safi);
if (CHECK_FLAG(peer->bgp->flags,
BGP_FLAG_GR_PRESERVE_FWD))
- stream_putc(s, RESTART_F_BIT);
+ stream_putc(s, GRACEFUL_RESTART_F_BIT);
else
stream_putc(s, 0);
}
#define CAPABILITY_ACTION_UNSET 1
/* Graceful Restart */
-#define RESTART_R_BIT 0x8000
-#define RESTART_F_BIT 0x80
+#define GRACEFUL_RESTART_R_BIT 0x8000
+#define GRACEFUL_RESTART_N_BIT 0x4000
+#define GRACEFUL_RESTART_F_BIT 0x80
/* Long-lived Graceful Restart */
#define LLGR_F_BIT 0x80
*/
static void bgp_packet_add(struct peer *peer, struct stream *s)
{
+ intmax_t delta;
+ uint32_t holdtime;
+
frr_with_mutex(&peer->io_mtx) {
+ /* if the queue is empty, reset the "last OK" timestamp to
+ * now, otherwise if we write another packet immediately
+ * after it'll get confused
+ */
+ if (!stream_fifo_count_safe(peer->obuf))
+ peer->last_sendq_ok = bgp_clock();
+
stream_fifo_push(peer->obuf, s);
+
+ delta = bgp_clock() - peer->last_sendq_ok;
+ holdtime = atomic_load_explicit(&peer->holdtime,
+ memory_order_relaxed);
+
+ /* Note that when we're here, we're adding some packet to the
+ * OutQ. That includes keepalives when there is nothing to
+ * do, so there's a guarantee we pass by here once in a while.
+ *
+ * That implies there is no need to go set up another separate
+ * timer that ticks down SendHoldTime, as we'll be here sooner
+ * or later anyway and will see the checks below failing.
+ */
+ if (delta > 2 * (intmax_t)holdtime) {
+ flog_err(
+ EC_BGP_SENDQ_STUCK_PROPER,
+ "%s has not made any SendQ progress for 2 holdtimes, terminating session",
+ peer->host);
+ BGP_EVENT_ADD(peer, TCP_fatal_error);
+ } else if (delta > (intmax_t)holdtime &&
+ bgp_clock() - peer->last_sendq_warn > 5) {
+ flog_warn(
+ EC_BGP_SENDQ_STUCK_WARN,
+ "%s has not made any SendQ progress for 1 holdtime, peer overloaded?",
+ peer->host);
+ peer->last_sendq_warn = bgp_clock();
+ }
}
}
stream_free(s);
}
+/*
+ * Encapsulate an original BGP CEASE Notification into Hard Reset
+ */
+static uint8_t *bgp_notify_encapsulate_hard_reset(uint8_t code, uint8_t subcode,
+ uint8_t *data, size_t datalen)
+{
+ uint8_t *message = XCALLOC(MTYPE_BGP_NOTIFICATION, datalen + 2);
+
+ /* ErrCode */
+ message[0] = code;
+ /* Subcode */
+ message[1] = subcode;
+ /* Data */
+ if (datalen)
+ memcpy(message + 2, data, datalen);
+
+ return message;
+}
+
+/*
+ * Decapsulate an original BGP CEASE Notification from Hard Reset
+ */
+struct bgp_notify bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify)
+{
+ struct bgp_notify bn = {};
+
+ bn.code = notify->raw_data[0];
+ bn.subcode = notify->raw_data[1];
+ bn.length = notify->length - 2;
+
+ bn.raw_data = XCALLOC(MTYPE_BGP_NOTIFICATION, bn.length);
+ memcpy(bn.raw_data, notify->raw_data + 2, bn.length);
+
+ return bn;
+}
+
+/* Check if Graceful-Restart N-bit is exchanged */
+bool bgp_has_graceful_restart_notification(struct peer *peer)
+{
+ return CHECK_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_N_BIT_RCV) &&
+ CHECK_FLAG(peer->cap, PEER_CAP_GRACEFUL_RESTART_N_BIT_ADV);
+}
+
+/*
+ * Check if to send BGP CEASE Notification/Hard Reset?
+ */
+bool bgp_notify_send_hard_reset(struct peer *peer, uint8_t code,
+ uint8_t subcode)
+{
+ /* When the "N" bit has been exchanged, a Hard Reset message is used to
+ * indicate to the peer that the session is to be fully terminated.
+ */
+ if (!bgp_has_graceful_restart_notification(peer))
+ return false;
+
+ /*
+ * https://datatracker.ietf.org/doc/html/rfc8538#section-5.1
+ */
+ if (code == BGP_NOTIFY_CEASE) {
+ switch (subcode) {
+ case BGP_NOTIFY_CEASE_MAX_PREFIX:
+ case BGP_NOTIFY_CEASE_ADMIN_SHUTDOWN:
+ case BGP_NOTIFY_CEASE_PEER_UNCONFIG:
+ case BGP_NOTIFY_CEASE_HARD_RESET:
+ return true;
+ case BGP_NOTIFY_CEASE_ADMIN_RESET:
+ /* Provide user control:
+ * `bgp hard-adminstrative-reset`
+ */
+ if (CHECK_FLAG(peer->bgp->flags,
+ BGP_FLAG_HARD_ADMIN_RESET))
+ return true;
+ else
+ return false;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Check if received BGP CEASE Notification/Hard Reset?
+ */
+bool bgp_notify_received_hard_reset(struct peer *peer, uint8_t code,
+ uint8_t subcode)
+{
+ /* When the "N" bit has been exchanged, a Hard Reset message is used to
+ * indicate to the peer that the session is to be fully terminated.
+ */
+ if (!bgp_has_graceful_restart_notification(peer))
+ return false;
+
+ if (code == BGP_NOTIFY_CEASE && subcode == BGP_NOTIFY_CEASE_HARD_RESET)
+ return true;
+
+ return false;
+}
+
/*
* Creates a BGP Notify and appends it to the peer's output queue.
*
uint8_t sub_code, uint8_t *data, size_t datalen)
{
struct stream *s;
+ bool hard_reset = bgp_notify_send_hard_reset(peer, code, sub_code);
/* Lock I/O mutex to prevent other threads from pushing packets */
frr_mutex_lock_autounlock(&peer->io_mtx);
/* Make notify packet. */
bgp_packet_set_marker(s, BGP_MSG_NOTIFY);
- /* Set notify packet values. */
- stream_putc(s, code); /* BGP notify code */
- stream_putc(s, sub_code); /* BGP notify sub_code */
+ /* Check if we should send Hard Reset Notification or not */
+ if (hard_reset) {
+ uint8_t *hard_reset_message = bgp_notify_encapsulate_hard_reset(
+ code, sub_code, data, datalen);
- /* If notify data is present. */
- if (data)
- stream_write(s, data, datalen);
+ /* Hard Reset encapsulates another NOTIFICATION message
+ * in its data portion.
+ */
+ stream_putc(s, BGP_NOTIFY_CEASE);
+ stream_putc(s, BGP_NOTIFY_CEASE_HARD_RESET);
+ stream_write(s, hard_reset_message, datalen + 2);
+
+ XFREE(MTYPE_BGP_NOTIFICATION, hard_reset_message);
+ } else {
+ stream_putc(s, code);
+ stream_putc(s, sub_code);
+ if (data)
+ stream_write(s, data, datalen);
+ }
/* Set BGP packet length. */
bgp_packet_set_size(s);
bgp_notify.length);
}
}
- bgp_notify_print(peer, &bgp_notify, "sending");
+ bgp_notify_print(peer, &bgp_notify, "sending", hard_reset);
if (bgp_notify.data) {
XFREE(MTYPE_TMP, bgp_notify.data);
}
/* Set initial values. */
- memset(&attr, 0, sizeof(struct attr));
+ memset(&attr, 0, sizeof(attr));
attr.label_index = BGP_INVALID_LABEL_INDEX;
attr.label = MPLS_INVALID_LABEL;
memset(&nlris, 0, sizeof(nlris));
*/
static int bgp_notify_receive(struct peer *peer, bgp_size_t size)
{
- struct bgp_notify bgp_notify;
+ struct bgp_notify outer = {};
+ struct bgp_notify inner = {};
+ bool hard_reset = false;
if (peer->notify.data) {
- XFREE(MTYPE_TMP, peer->notify.data);
+ XFREE(MTYPE_BGP_NOTIFICATION, peer->notify.data);
peer->notify.length = 0;
+ peer->notify.hard_reset = false;
}
- bgp_notify.code = stream_getc(peer->curr);
- bgp_notify.subcode = stream_getc(peer->curr);
- bgp_notify.length = size - 2;
- bgp_notify.data = NULL;
- bgp_notify.raw_data = NULL;
+ outer.code = stream_getc(peer->curr);
+ outer.subcode = stream_getc(peer->curr);
+ outer.length = size - 2;
+ outer.data = NULL;
+ outer.raw_data = NULL;
+ if (outer.length) {
+ outer.raw_data = XMALLOC(MTYPE_BGP_NOTIFICATION, outer.length);
+ memcpy(outer.raw_data, stream_pnt(peer->curr), outer.length);
+ }
+
+ hard_reset =
+ bgp_notify_received_hard_reset(peer, outer.code, outer.subcode);
+ if (hard_reset && outer.length) {
+ inner = bgp_notify_decapsulate_hard_reset(&outer);
+ peer->notify.hard_reset = true;
+ } else {
+ inner = outer;
+ }
/* Preserv notify code and sub code. */
- peer->notify.code = bgp_notify.code;
- peer->notify.subcode = bgp_notify.subcode;
+ peer->notify.code = inner.code;
+ peer->notify.subcode = inner.subcode;
/* For further diagnostic record returned Data. */
- if (bgp_notify.length) {
- peer->notify.length = size - 2;
- peer->notify.data = XMALLOC(MTYPE_TMP, size - 2);
- memcpy(peer->notify.data, stream_pnt(peer->curr), size - 2);
+ if (inner.length) {
+ peer->notify.length = inner.length;
+ peer->notify.data =
+ XMALLOC(MTYPE_BGP_NOTIFICATION, inner.length);
+ memcpy(peer->notify.data, inner.raw_data, inner.length);
}
/* For debug */
int first = 0;
char c[4];
- if (bgp_notify.length) {
- bgp_notify.data =
- XMALLOC(MTYPE_TMP, bgp_notify.length * 3);
- for (i = 0; i < bgp_notify.length; i++)
+ if (inner.length) {
+ inner.data = XMALLOC(MTYPE_BGP_NOTIFICATION,
+ inner.length * 3);
+ for (i = 0; i < inner.length; i++)
if (first) {
snprintf(c, sizeof(c), " %02x",
stream_getc(peer->curr));
- strlcat(bgp_notify.data, c,
- bgp_notify.length * 3);
+ strlcat(inner.data, c,
+ inner.length * 3);
} else {
first = 1;
snprintf(c, sizeof(c), "%02x",
stream_getc(peer->curr));
- strlcpy(bgp_notify.data, c,
- bgp_notify.length * 3);
+ strlcpy(inner.data, c,
+ inner.length * 3);
}
- bgp_notify.raw_data = (uint8_t *)peer->notify.data;
}
- bgp_notify_print(peer, &bgp_notify, "received");
- if (bgp_notify.data) {
- XFREE(MTYPE_TMP, bgp_notify.data);
- bgp_notify.length = 0;
+ bgp_notify_print(peer, &inner, "received", hard_reset);
+ if (inner.length) {
+ XFREE(MTYPE_BGP_NOTIFICATION, inner.data);
+ inner.length = 0;
+ }
+ if (outer.length) {
+ XFREE(MTYPE_BGP_NOTIFICATION, outer.data);
+ XFREE(MTYPE_BGP_NOTIFICATION, outer.raw_data);
+ outer.length = 0;
}
}
in that case we fallback to open without the capability option.
But this done in bgp_stop. We just mark it here to avoid changing
the fsm tables. */
- if (bgp_notify.code == BGP_NOTIFY_OPEN_ERR
- && bgp_notify.subcode == BGP_NOTIFY_OPEN_UNSUP_PARAM)
+ if (inner.code == BGP_NOTIFY_OPEN_ERR &&
+ inner.subcode == BGP_NOTIFY_OPEN_UNSUP_PARAM)
UNSET_FLAG(peer->sflags, PEER_STATUS_CAPABILITY_OPEN);
+ /* If Graceful-Restart N-bit (Notification) is exchanged,
+ * and it's not a Hard Reset, let's retain the routes.
+ */
+ if (bgp_has_graceful_restart_notification(peer) && !hard_reset &&
+ CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE))
+ SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
+
bgp_peer_gr_flags_update(peer);
BGP_GR_ROUTER_DETECT_AND_SEND_CAPABILITY_TO_ZEBRA(peer->bgp,
peer->bgp->peer);
* to maximise debug information.
*/
int ok;
- memset(&orfp, 0,
- sizeof(struct orf_prefix));
+ memset(&orfp, 0, sizeof(orfp));
common = *p_pnt++;
/* after ++: p_pnt <= p_end */
if (common
/* Task callback to handle socket error encountered in the io pthread */
void bgp_packet_process_error(struct thread *thread);
+extern struct bgp_notify
+bgp_notify_decapsulate_hard_reset(struct bgp_notify *notify);
+extern bool bgp_has_graceful_restart_notification(struct peer *peer);
+extern bool bgp_notify_send_hard_reset(struct peer *peer, uint8_t code,
+ uint8_t subcode);
+extern bool bgp_notify_received_hard_reset(struct peer *peer, uint8_t code,
+ uint8_t subcode);
#endif /* _QUAGGA_BGP_PACKET_H */
pbr_rule.action = bpa;
bpr = hash_get(bgp->pbr_rule_hash, &pbr_rule,
bgp_pbr_rule_alloc_intern);
- if (bpr && bpr->unique == 0) {
+ if (bpr->unique == 0) {
bpr->unique = ++bgp_pbr_action_counter_unique;
bpr->installed = false;
bpr->install_in_progress = false;
} else
bpr_found = true;
/* already installed */
- if (bpr_found && bpr) {
+ if (bpr_found) {
struct bgp_path_info_extra *extra =
bgp_path_info_extra_get(path);
bgp_pbr_bpa_add(bpa);
/* ip rule add */
- if (bpr && !bpr->installed)
+ if (!bpr->installed)
bgp_send_pbr_rule_action(bpa, bpr, true);
/* A previous entry may already exist
struct bgp_pbr_val_mask bpvm;
memset(&range, 0, sizeof(range));
- memset(&nh, 0, sizeof(struct nexthop));
- memset(&bpf, 0, sizeof(struct bgp_pbr_filter));
- memset(&bpof, 0, sizeof(struct bgp_pbr_or_filter));
+ memset(&nh, 0, sizeof(nh));
+ memset(&bpf, 0, sizeof(bpf));
+ memset(&bpof, 0, sizeof(bpof));
if (api->match_bitmask & PREFIX_SRC_PRESENT ||
(api->type == BGP_PBR_IPRULE &&
api->match_bitmask_iprule & PREFIX_SRC_PRESENT))
dst = &api->dst_prefix;
if (api->type == BGP_PBR_IPRULE)
bpf.type = api->type;
- memset(&nh, 0, sizeof(struct nexthop));
+ memset(&nh, 0, sizeof(nh));
nh.vrf_id = VRF_UNKNOWN;
if (api->match_protocol_num) {
proto = (uint8_t)api->protocol[0].value;
int str2prefix_rd(const char *str, struct prefix_rd *prd)
{
- int ret; /* ret of called functions */
- int lret; /* local ret, of this func */
+ int ret = 0;
char *p;
char *p2;
struct stream *s = NULL;
char *half = NULL;
struct in_addr addr;
- s = stream_new(8);
-
prd->family = AF_UNSPEC;
prd->prefixlen = 64;
- lret = 0;
p = strchr(str, ':');
if (!p)
goto out;
if (!all_digit(p + 1))
goto out;
+ s = stream_new(RD_BYTES);
+
half = XMALLOC(MTYPE_TMP, (p - str) + 1);
memcpy(half, str, (p - str));
half[p - str] = '\0';
stream_putl(s, atol(p + 1));
}
} else {
- ret = inet_aton(half, &addr);
- if (!ret)
+ if (!inet_aton(half, &addr))
goto out;
stream_putw(s, RD_TYPE_IP);
stream_putw(s, atol(p + 1));
}
memcpy(prd->val, s->data, 8);
- lret = 1;
+ ret = 1;
out:
if (s)
stream_free(s);
XFREE(MTYPE_TMP, half);
- return lret;
+ return ret;
}
char *prefix_rd2str(const struct prefix_rd *prd, char *buf, size_t size)
pair (newm, existm) with the cluster list length. Prefer the
path with smaller cluster list length. */
if (newm == existm) {
- if (peer_sort_lookup(new->peer) == BGP_PEER_IBGP
- && peer_sort_lookup(exist->peer) == BGP_PEER_IBGP
- && (mpath_cfg == NULL
- || CHECK_FLAG(
- mpath_cfg->ibgp_flags,
- BGP_FLAG_IBGP_MULTIPATH_SAME_CLUSTERLEN))) {
+ if (peer_sort_lookup(new->peer) == BGP_PEER_IBGP &&
+ peer_sort_lookup(exist->peer) == BGP_PEER_IBGP &&
+ (mpath_cfg == NULL || mpath_cfg->same_clusterlen)) {
newm = BGP_CLUSTER_LIST_LENGTH(new->attr);
existm = BGP_CLUSTER_LIST_LENGTH(exist->attr);
/* Route map apply. */
if (rmap) {
- memset(&rmap_path, 0, sizeof(struct bgp_path_info));
+ memset(&rmap_path, 0, sizeof(rmap_path));
/* Duplicate current value to new structure for modification. */
rmap_path.peer = peer;
rmap_path.attr = attr;
if (rmap == NULL)
return RMAP_DENY;
- memset(&rmap_path, 0, sizeof(struct bgp_path_info));
+ memset(&rmap_path, 0, sizeof(rmap_path));
/* Route map apply. */
/* Duplicate current value to new structure for modification. */
rmap_path.peer = peer;
PEER_STATUS_ORF_WAIT_REFRESH))
return;
- memset(&attr, 0, sizeof(struct attr));
+ memset(&attr, 0, sizeof(attr));
/* It's initialized in bgp_announce_check() */
/* Announcement to the subgroup. If the route is filtered withdraw it.
if (orig_safi == SAFI_LABELED_UNICAST)
safi = SAFI_UNICAST;
- memset(&new_attr, 0, sizeof(struct attr));
+ memset(&new_attr, 0, sizeof(new_attr));
new_attr.label_index = BGP_INVALID_LABEL_INDEX;
new_attr.label = MPLS_INVALID_LABEL;
then the Error Subcode is set to Invalid Network Field. */
for (; pnt < lim; pnt += psize) {
/* Clear prefix structure. */
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
if (addpath_capable) {
}
/* Defensive coding, double-check the psize fits in a struct
- * prefix */
- if (psize > (ssize_t)sizeof(p.u)) {
+ * prefix for the v4 and v6 afi's and unicast/multicast */
+ if (psize > (ssize_t)sizeof(p.u.val)) {
flog_err(
EC_BGP_UPDATE_RCV,
"%s [Error] Update packet error (prefix length %d too large for prefix storage %zu)",
- peer->host, p.prefixlen, sizeof(p.u));
+ peer->host, p.prefixlen, sizeof(p.u.val));
return BGP_NLRI_PARSE_ERROR_PACKET_LENGTH;
}
if (bgp_static->rmap.name) {
struct attr attr_tmp = attr;
- memset(&rmap_path, 0, sizeof(struct bgp_path_info));
+ memset(&rmap_path, 0, sizeof(rmap_path));
rmap_path.peer = bgp->peer_self;
rmap_path.attr = &attr_tmp;
memcpy(&attr.esi, bgp_static->eth_s_id, sizeof(esi_t));
if (bgp_static->encap_tunneltype == BGP_ENCAP_TYPE_VXLAN) {
struct bgp_encap_type_vxlan bet;
- memset(&bet, 0, sizeof(struct bgp_encap_type_vxlan));
+ memset(&bet, 0, sizeof(bet));
bet.vnid = p->u.prefix_evpn.prefix_addr.eth_tag;
bgp_encap_type_vxlan_to_tlv(&bet, &attr);
}
return CMD_WARNING_CONFIG_FAILED;
}
if (gwip) {
- memset(&gw_ip, 0, sizeof(struct prefix));
+ memset(&gw_ip, 0, sizeof(gw_ip));
ret = str2prefix(gwip, &gw_ip);
if (!ret) {
vty_out(vty, "%% Malformed GatewayIp\n");
/* Apply route-map. */
if (red->rmap.name) {
- memset(&rmap_path, 0, sizeof(struct bgp_path_info));
+ memset(&rmap_path, 0, sizeof(rmap_path));
rmap_path.peer = bgp->peer_self;
rmap_path.attr = &attr_new;
RPKI_NOT_BEING_USED, use_json(argc, argv));
}
-static void show_adj_route_header(struct vty *vty, struct bgp *bgp,
+static void show_adj_route_header(struct vty *vty, struct peer *peer,
struct bgp_table *table, int *header1,
int *header2, json_object *json,
json_object *json_scode,
if (json) {
json_object_int_add(json, "bgpTableVersion", version);
json_object_string_addf(json, "bgpLocalRouterId",
- "%pI4", &bgp->router_id);
+ "%pI4", &peer->bgp->router_id);
json_object_int_add(json, "defaultLocPrf",
- bgp->default_local_pref);
- json_object_int_add(json, "localAS", bgp->as);
+ peer->bgp->default_local_pref);
+ json_object_int_add(json, "localAS",
+ peer->change_local_as
+ ? peer->change_local_as
+ : peer->local_as);
json_object_object_add(json, "bgpStatusCodes",
json_scode);
json_object_object_add(json, "bgpOriginCodes",
vty_out(vty,
"BGP table version is %" PRIu64
", local router ID is %pI4, vrf id ",
- version, &bgp->router_id);
- if (bgp->vrf_id == VRF_UNKNOWN)
+ version, &peer->bgp->router_id);
+ if (peer->bgp->vrf_id == VRF_UNKNOWN)
vty_out(vty, "%s", VRFID_NONE_STR);
else
- vty_out(vty, "%u", bgp->vrf_id);
+ vty_out(vty, "%u", peer->bgp->vrf_id);
vty_out(vty, "\n");
vty_out(vty, "Default local pref %u, ",
- bgp->default_local_pref);
- vty_out(vty, "local AS %u\n", bgp->as);
+ peer->bgp->default_local_pref);
+ vty_out(vty, "local AS %u\n",
+ peer->change_local_as ? peer->change_local_as
+ : peer->local_as);
vty_out(vty, BGP_SHOW_SCODE_HEADER);
vty_out(vty, BGP_SHOW_NCODE_HEADER);
vty_out(vty, BGP_SHOW_OCODE_HEADER);
"%pI4", &bgp->router_id);
json_object_int_add(json, "defaultLocPrf",
bgp->default_local_pref);
- json_object_int_add(json, "localAS", bgp->as);
+ json_object_int_add(json, "localAS",
+ peer->change_local_as
+ ? peer->change_local_as
+ : peer->local_as);
json_object_object_add(json, "bgpStatusCodes",
json_scode);
json_object_object_add(json, "bgpOriginCodes",
vty_out(vty, "\n");
vty_out(vty, "Default local pref %u, ",
bgp->default_local_pref);
- vty_out(vty, "local AS %u\n", bgp->as);
+ vty_out(vty, "local AS %u\n",
+ peer->change_local_as ? peer->change_local_as
+ : peer->local_as);
vty_out(vty, BGP_SHOW_SCODE_HEADER);
vty_out(vty, BGP_SHOW_NCODE_HEADER);
vty_out(vty, BGP_SHOW_OCODE_HEADER);
if (ain->peer != peer)
continue;
- show_adj_route_header(vty, bgp, table, header1,
+ show_adj_route_header(vty, peer, table, header1,
header2, json, json_scode,
json_ocode, wide);
if (paf->peer != peer || !adj->attr)
continue;
- show_adj_route_header(vty, bgp, table,
+ show_adj_route_header(vty, peer, table,
header1, header2,
json, json_scode,
json_ocode, wide);
} else if (type == bgp_show_adj_route_bestpath) {
struct bgp_path_info *pi;
- show_adj_route_header(vty, bgp, table, header1, header2,
- json, json_scode, json_ocode,
- wide);
+ show_adj_route_header(vty, peer, table, header1,
+ header2, json, json_scode,
+ json_ocode, wide);
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
char buf[PREFIX_STRLEN * 2];
char buf2[SU_ADDRSTRLEN];
char rdbuf[RD_ADDRSTRLEN];
- char esi_buf[ESI_BYTES];
+ char esi_buf[ESI_STR_LEN];
/* Network configuration. */
for (pdest = bgp_table_top(bgp->route[afi][safi]); pdest;
#include "bgpd/bgp_rpki_clippy.c"
#endif
-static struct thread *t_rpki;
-static struct thread *t_rpki_start;
-
DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE, "BGP RPKI Cache server");
DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_CACHE_GROUP, "BGP RPKI Cache server group");
DEFINE_MTYPE_STATIC(BGPD, BGP_RPKI_RTRLIB, "BGP RPKI RTRLib");
#define RETRY_INTERVAL_DEFAULT 600
#define BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT 3
+static struct thread *t_rpki_sync;
+
#define RPKI_DEBUG(...) \
if (rpki_debug) { \
zlog_debug("RPKI: " __VA_ARGS__); \
static int add_tcp_cache(const char *host, const char *port,
const uint8_t preference, const char *bindaddr);
static void print_record(const struct pfx_record *record, struct vty *vty);
-static int is_synchronized(void);
-static int is_running(void);
-static int is_stopping(void);
+static bool is_synchronized(void);
+static bool is_running(void);
+static bool is_stopping(void);
static void route_match_free(void *rule);
static enum route_map_cmd_result_t route_match(void *rule,
const struct prefix *prefix,
static struct rtr_mgr_config *rtr_config;
static struct list *cache_list;
-static int rtr_is_running;
-static int rtr_is_stopping;
+static bool rtr_is_running;
+static bool rtr_is_stopping;
+static bool rtr_is_synced;
static _Atomic int rtr_update_overflow;
-static int rpki_debug;
+static bool rpki_debug;
static unsigned int polling_period;
static unsigned int expire_interval;
static unsigned int retry_interval;
return rtr_mgr_groups;
}
-inline int is_synchronized(void)
+inline bool is_synchronized(void)
{
- return is_running() && rtr_mgr_conf_in_sync(rtr_config);
+ return rtr_is_synced;
}
-inline int is_running(void)
+inline bool is_running(void)
{
return rtr_is_running;
}
-inline int is_stopping(void)
+inline bool is_stopping(void)
{
return rtr_is_stopping;
}
struct listnode *node;
struct prefix *prefix;
struct pfx_record rec;
- int retval;
- int socket = THREAD_FD(thread);
- thread_add_read(bm->master, bgpd_sync_callback, NULL, socket, &t_rpki);
+ thread_add_read(bm->master, bgpd_sync_callback, NULL,
+ rpki_sync_socket_bgpd, NULL);
if (atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst)) {
- while (read(socket, &rec, sizeof(struct pfx_record)) != -1)
+ while (read(rpki_sync_socket_bgpd, &rec,
+ sizeof(struct pfx_record)) != -1)
;
atomic_store_explicit(&rtr_update_overflow, 0,
return;
}
- retval = read(socket, &rec, sizeof(struct pfx_record));
+ int retval =
+ read(rpki_sync_socket_bgpd, &rec, sizeof(struct pfx_record));
if (retval != sizeof(struct pfx_record)) {
- RPKI_DEBUG("Could not read from socket");
- return;
- }
-
- /* RTR-Server crashed/terminated, let's handle and switch
- * to the second available RTR-Server according to preference.
- */
- if (rec.socket && rec.socket->state == RTR_ERROR_FATAL) {
- reset(true);
+ RPKI_DEBUG("Could not read from rpki_sync_socket_bgpd");
return;
}
-
prefix = pfx_record_to_prefix(&rec);
afi_t afi = (rec.prefix.ver == LRTR_IPV4) ? AFI_IP : AFI_IP6;
{
struct bgp *bgp;
struct listnode *node;
- afi_t afi;
- safi_t safi;
for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
struct peer *peer;
struct listnode *peer_listnode;
for (ALL_LIST_ELEMENTS_RO(bgp->peer, peer_listnode, peer)) {
- FOREACH_AFI_SAFI (afi, safi) {
- if (!peer->afc_nego[afi][safi])
- continue;
- if (!peer->bgp->rib[afi][safi])
- continue;
+ for (size_t i = 0; i < 2; i++) {
+ safi_t safi;
+ afi_t afi = (i == 0) ? AFI_IP : AFI_IP6;
+
+ for (safi = SAFI_UNICAST; safi < SAFI_MAX;
+ safi++) {
+ if (!peer->bgp->rib[afi][safi])
+ continue;
- bgp_soft_reconfig_in(peer, afi, safi);
+ bgp_soft_reconfig_in(peer, afi, safi);
+ }
}
}
}
}
-static void rpki_connection_status_cb(const struct rtr_mgr_group *group
- __attribute__((unused)),
- enum rtr_mgr_status status,
- const struct rtr_socket *socket
- __attribute__((unused)),
- void *data __attribute__((unused)))
-{
- struct pfx_record rec = {0};
- int retval;
-
- if (is_stopping() ||
- atomic_load_explicit(&rtr_update_overflow, memory_order_seq_cst))
- return;
-
- if (status == RTR_MGR_ERROR)
- rec.socket = socket;
-
- retval = write(rpki_sync_socket_rtr, &rec, sizeof(rec));
- if (retval == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))
- atomic_store_explicit(&rtr_update_overflow, 1,
- memory_order_seq_cst);
-
- else if (retval != sizeof(rec))
- RPKI_DEBUG("Could not write to rpki_sync_socket_rtr");
-}
-
static void rpki_update_cb_sync_rtr(struct pfx_table *p __attribute__((unused)),
const struct pfx_record rec,
const bool added __attribute__((unused)))
goto err;
}
+
thread_add_read(bm->master, bgpd_sync_callback, NULL,
- rpki_sync_socket_bgpd, &t_rpki);
+ rpki_sync_socket_bgpd, NULL);
return;
static int bgp_rpki_init(struct thread_master *master)
{
- rpki_debug = 0;
- rtr_is_running = 0;
- rtr_is_stopping = 0;
+ rpki_debug = false;
+ rtr_is_running = false;
+ rtr_is_stopping = false;
+ rtr_is_synced = false;
cache_list = list_new();
cache_list->del = (void (*)(void *)) & free_cache;
return 0;
}
-static void start_expired(struct thread *thread)
+static void sync_expired(struct thread *thread)
{
if (!rtr_mgr_conf_in_sync(rtr_config)) {
- thread_add_timer(bm->master, start_expired, NULL,
+ RPKI_DEBUG("rtr_mgr is not synced, retrying.");
+ thread_add_timer(bm->master, sync_expired, NULL,
BGP_RPKI_CACHE_SERVER_SYNC_RETRY_TIMEOUT,
- &t_rpki_start);
+ &t_rpki_sync);
return;
}
- rtr_is_running = 1;
+ RPKI_DEBUG("rtr_mgr sync is done.");
+
+ rtr_is_synced = true;
}
static int start(void)
{
int ret;
- rtr_is_stopping = 0;
+ rtr_is_stopping = false;
+ rtr_is_synced = false;
rtr_update_overflow = 0;
if (list_isempty(cache_list)) {
RPKI_DEBUG("Polling period: %d", polling_period);
ret = rtr_mgr_init(&rtr_config, groups, groups_len, polling_period,
expire_interval, retry_interval,
- rpki_update_cb_sync_rtr, NULL,
- rpki_connection_status_cb, NULL);
+ rpki_update_cb_sync_rtr, NULL, NULL, NULL);
if (ret == RTR_ERROR) {
RPKI_DEBUG("Init rtr_mgr failed.");
return ERROR;
return ERROR;
}
- thread_add_timer(bm->master, start_expired, NULL, 0, &t_rpki_start);
+ thread_add_timer(bm->master, sync_expired, NULL, 0, &t_rpki_sync);
XFREE(MTYPE_BGP_RPKI_CACHE_GROUP, groups);
+ rtr_is_running = true;
+
return SUCCESS;
}
static void stop(void)
{
- rtr_is_stopping = 1;
+ rtr_is_stopping = true;
if (is_running()) {
- THREAD_OFF(t_rpki_start);
+ THREAD_OFF(t_rpki_sync);
rtr_mgr_stop(rtr_config);
rtr_mgr_free(rtr_config);
- rtr_is_running = 0;
+ rtr_is_running = false;
}
}
if (is_running() && !force)
return SUCCESS;
- if (thread_is_scheduled(t_rpki_start))
- return SUCCESS;
-
RPKI_DEBUG("Resetting RPKI Session");
stop();
return start();
DEFUN (no_rpki_polling_period,
no_rpki_polling_period_cmd,
- "no rpki polling_period",
+ "no rpki polling_period [(1-86400)]",
NO_STR
RPKI_OUTPUT_STRING
- "Set polling period back to default\n")
+ "Set polling period back to default\n"
+ "Polling period value\n")
{
polling_period = POLLING_PERIOD_DEFAULT;
return CMD_SUCCESS;
DEFUN (no_rpki_expire_interval,
no_rpki_expire_interval_cmd,
- "no rpki expire_interval",
+ "no rpki expire_interval [(600-172800)]",
NO_STR
RPKI_OUTPUT_STRING
- "Set expire interval back to default\n")
+ "Set expire interval back to default\n"
+ "Expire interval value\n")
{
expire_interval = polling_period * 2;
return CMD_SUCCESS;
DEFUN (no_rpki_retry_interval,
no_rpki_retry_interval_cmd,
- "no rpki retry_interval",
+ "no rpki retry_interval [(1-7200)]",
NO_STR
RPKI_OUTPUT_STRING
- "Set retry interval back to default\n")
+ "Set retry interval back to default\n"
+ "retry interval value\n")
{
retry_interval = RETRY_INTERVAL_DEFAULT;
return CMD_SUCCESS;
}
vty_out(vty, "Connected to group %d\n", group->preference);
for (ALL_LIST_ELEMENTS_RO(cache_list, cache_node, cache)) {
- if (cache->preference == group->preference) {
- struct tr_tcp_config *tcp_config;
+ struct tr_tcp_config *tcp_config;
#if defined(FOUND_SSH)
- struct tr_ssh_config *ssh_config;
+ struct tr_ssh_config *ssh_config;
#endif
-
- switch (cache->type) {
- case TCP:
- tcp_config = cache->tr_config.tcp_config;
- vty_out(vty, "rpki tcp cache %s %s pref %hhu\n",
- tcp_config->host, tcp_config->port,
- cache->preference);
- break;
-
+ switch (cache->type) {
+ case TCP:
+ tcp_config = cache->tr_config.tcp_config;
+ vty_out(vty, "rpki tcp cache %s %s pref %hhu%s\n",
+ tcp_config->host, tcp_config->port,
+ cache->preference,
+ cache->rtr_socket->state == RTR_ESTABLISHED
+ ? " (connected)"
+ : "");
+ break;
#if defined(FOUND_SSH)
- case SSH:
- ssh_config = cache->tr_config.ssh_config;
- vty_out(vty, "rpki ssh cache %s %u pref %hhu\n",
- ssh_config->host, ssh_config->port,
- cache->preference);
- break;
+ case SSH:
+ ssh_config = cache->tr_config.ssh_config;
+ vty_out(vty, "rpki ssh cache %s %u pref %hhu%s\n",
+ ssh_config->host, ssh_config->port,
+ cache->preference,
+ cache->rtr_socket->state == RTR_ESTABLISHED
+ ? " (connected)"
+ : "");
+ break;
#endif
-
- default:
- break;
- }
+ default:
+ break;
}
}
DEBUG_STR
"Enable debugging for rpki\n")
{
- rpki_debug = 1;
+ rpki_debug = true;
return CMD_SUCCESS;
}
DEBUG_STR
"Disable debugging for rpki\n")
{
- rpki_debug = 0;
+ rpki_debug = false;
return CMD_SUCCESS;
}
install_element(ENABLE_NODE, &bgp_rpki_stop_cmd);
/* Install rpki reset command */
+ install_element(ENABLE_NODE, &rpki_reset_cmd);
install_element(RPKI_NODE, &rpki_reset_cmd);
/* Install rpki polling period commands */
intval = *(long *)var_val;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
peer = bgpPeerTable_lookup(NULL, name, &length, &addr, 1);
if (!peer)
if (smux_header_table(v, name, length, exact, var_len, write_method)
== MATCH_FAILED)
return NULL;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
peer = bgpPeerTable_lookup(v, name, length, &addr, exact);
if (!peer)
if (smux_header_table(v, name, length, exact, var_len, write_method)
== MATCH_FAILED)
return NULL;
- memset(&addr, 0, sizeof(struct prefix_ipv4));
+ memset(&addr, 0, sizeof(addr));
path = bgp4PathAttrLookup(v, name, length, bgp, &addr, exact);
if (!path)
key = jhash_1word(
(peer->shared_network && peer_afi_active_nego(peer, AFI_IP6)),
key);
-
/*
* There are certain peers that must get their own update-group:
* - lonesoul peers
key = jhash_1word(jhash(peer->host, strlen(peer->host), SEED2),
key);
+ if (bgp_debug_neighbor_events(peer)) {
+ zlog_debug(
+ "%pBP Update Group Hash: sort: %d UpdGrpFlags: %u UpdGrpAFFlags: %u",
+ peer, peer->sort, peer->flags & PEER_UPDGRP_FLAGS,
+ flags & PEER_UPDGRP_AF_FLAGS);
+ zlog_debug(
+ "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
+ peer, (uint32_t)peer->addpath_type[afi][safi],
+ peer->cap & PEER_UPDGRP_CAP_FLAGS,
+ peer->af_cap[afi][safi] & PEER_UPDGRP_AF_CAP_FLAGS,
+ peer->v_routeadv, peer->change_local_as);
+ zlog_debug(
+ "%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s",
+ peer, peer->max_packet_size, peer->pmax_out[afi][safi],
+ peer->group ? peer->group->name : "(NONE)",
+ ROUTE_MAP_OUT_NAME(filter) ? ROUTE_MAP_OUT_NAME(filter)
+ : "(NONE)");
+ zlog_debug(
+ "%pBP Update Group Hash: dlist out: %s plist out: %s aslist out: %s usmap out: %s advmap: %s",
+ peer,
+ DISTRIBUTE_OUT_NAME(filter)
+ ? DISTRIBUTE_OUT_NAME(filter)
+ : "(NONE)",
+ PREFIX_LIST_OUT_NAME(filter)
+ ? PREFIX_LIST_OUT_NAME(filter)
+ : "(NONE)",
+ FILTER_LIST_OUT_NAME(filter)
+ ? FILTER_LIST_OUT_NAME(filter)
+ : "(NONE)",
+ UNSUPPRESS_MAP_NAME(filter)
+ ? UNSUPPRESS_MAP_NAME(filter)
+ : "(NONE)",
+ ADVERTISE_MAP_NAME(filter) ? ADVERTISE_MAP_NAME(filter)
+ : "(NONE)");
+ zlog_debug(
+ "%pBP Update Group Hash: default rmap: %s shared network and afi active network: %d",
+ peer,
+ peer->default_rmap[afi][safi].name
+ ? peer->default_rmap[afi][safi].name
+ : "(NONE)",
+ peer->shared_network &&
+ peer_afi_active_nego(peer, AFI_IP6));
+ zlog_debug(
+ "%pBP Update Group Hash: Lonesoul: %u ORF prefix: %u ORF old: %u max prefix out: %u",
+ peer, CHECK_FLAG(peer->flags, PEER_FLAG_LONESOUL),
+ CHECK_FLAG(peer->af_cap[afi][safi],
+ PEER_CAP_ORF_PREFIX_SM_RCV),
+ CHECK_FLAG(peer->af_cap[afi][safi],
+ PEER_CAP_ORF_PREFIX_SM_OLD_RCV),
+ CHECK_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_MAX_PREFIX_OUT));
+ zlog_debug("%pBP Update Group Hash key: %u", peer, key);
+ }
return key;
}
updgrp = hash_get(paf->peer->bgp->update_groups[paf->afid], &tmp,
updgrp_hash_alloc);
- if (!updgrp)
- return NULL;
update_group_checkin(updgrp);
if (BGP_DEBUG(update_groups, UPDATE_GROUPS))
{ .val_bool = false, .match_version = "< 7.6", },
{ .val_bool = true },
);
+FRR_CFG_DEFAULT_BOOL(BGP_GRACEFUL_NOTIFICATION,
+ { .val_bool = false, .match_version = "< 8.3", },
+ { .val_bool = true },
+);
+FRR_CFG_DEFAULT_BOOL(BGP_HARD_ADMIN_RESET,
+ { .val_bool = false, .match_version = "< 8.3", },
+ { .val_bool = true },
+);
DEFINE_HOOK(bgp_inst_config_write,
(struct bgp *bgp, struct vty *vty),
SET_FLAG((*bgp)->flags, BGP_FLAG_EBGP_REQUIRES_POLICY);
if (DFLT_BGP_SUPPRESS_DUPLICATES)
SET_FLAG((*bgp)->flags, BGP_FLAG_SUPPRESS_DUPLICATES);
+ if (DFLT_BGP_GRACEFUL_NOTIFICATION)
+ SET_FLAG((*bgp)->flags, BGP_FLAG_GRACEFUL_NOTIFICATION);
+ if (DFLT_BGP_HARD_ADMIN_RESET)
+ SET_FLAG((*bgp)->flags, BGP_FLAG_HARD_ADMIN_RESET);
ret = BGP_SUCCESS;
}
BGP_L2VPN_EVPN_ADV_IPV6_UNICAST) ||
CHECK_FLAG(tmp_bgp->af_flags[AFI_L2VPN][SAFI_EVPN],
BGP_L2VPN_EVPN_ADV_IPV6_UNICAST_GW_IP))) ||
- (tmp_bgp->vnihash && hashcount(tmp_bgp->vnihash))) {
+ (hashcount(tmp_bgp->vnihash))) {
vty_out(vty,
"%% Cannot delete default BGP instance. Dependent VRF instances exist\n");
return CMD_WARNING_CONFIG_FAILED;
return CMD_SUCCESS;
}
+/* bgp session-dscp */
+
+DEFPY (bgp_session_dscp,
+ bgp_session_dscp_cmd,
+ "bgp session-dscp (0-63)$dscp",
+ BGP_STR
+ "Override default (C6) bgp TCP session DSCP value\n"
+ "Manually configured dscp parameter\n")
+{
+ bm->tcp_dscp = dscp << 2;
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (no_bgp_session_dscp,
+ no_bgp_session_dscp_cmd,
+ "no bgp session-dscp [(0-63)]",
+ NO_STR
+ BGP_STR
+ "Override default (C6) bgp TCP session DSCP value\n"
+ "Manually configured dscp parameter\n")
+{
+ bm->tcp_dscp = IPTOS_PREC_INTERNETCONTROL;
+
+ return CMD_SUCCESS;
+}
/* BGP router-id. */
VTY_DECLVAR_CONTEXT(bgp, bgp);
int idx = 0;
- argv_find(argv, argc, "(5-86400)", &idx);
- bgp->v_maxmed_onstartup = strtoul(argv[idx]->arg, NULL, 10);
+ if (argv_find(argv, argc, "(5-86400)", &idx))
+ bgp->v_maxmed_onstartup = strtoul(argv[idx]->arg, NULL, 10);
if (argv_find(argv, argc, "(0-4294967295)", &idx))
bgp->maxmed_onstartup_value = strtoul(argv[idx]->arg, NULL, 10);
else
VTY_DECLVAR_CONTEXT(bgp, bgp);
int idx = 0;
- argv_find(argv, argc, "(0-4294967295)", &idx);
+
bgp->heuristic_coalesce = false;
- bgp->coalesce_time = strtoul(argv[idx]->arg, NULL, 10);
+
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
+ bgp->coalesce_time = strtoul(argv[idx]->arg, NULL, 10);
+
return CMD_SUCCESS;
}
"Match the cluster length\n")
{
int idx_number = 2;
- return bgp_maxpaths_config_vty(
- vty, BGP_PEER_IBGP, argv[idx_number]->arg,
- BGP_FLAG_IBGP_MULTIPATH_SAME_CLUSTERLEN, 1);
+ return bgp_maxpaths_config_vty(vty, BGP_PEER_IBGP,
+ argv[idx_number]->arg, true, 1);
}
ALIAS_HIDDEN(bgp_maxpaths_ibgp_cluster, bgp_maxpaths_ibgp_cluster_hidden_cmd,
if (bgp->maxpaths[afi][safi].maxpaths_ibgp != multipath_num) {
vty_out(vty, " maximum-paths ibgp %d",
bgp->maxpaths[afi][safi].maxpaths_ibgp);
- if (CHECK_FLAG(bgp->maxpaths[afi][safi].ibgp_flags,
- BGP_FLAG_IBGP_MULTIPATH_SAME_CLUSTERLEN))
+ if (bgp->maxpaths[afi][safi].same_clusterlen)
vty_out(vty, " equal-cluster-length");
vty_out(vty, "\n");
}
return CMD_SUCCESS;
}
+DEFPY (bgp_graceful_restart_notification,
+ bgp_graceful_restart_notification_cmd,
+ "[no$no] bgp graceful-restart notification",
+ NO_STR
+ BGP_STR
+ "Graceful restart capability parameters\n"
+ "Indicate Graceful Restart support for BGP NOTIFICATION messages\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+ if (no)
+ UNSET_FLAG(bgp->flags, BGP_FLAG_GRACEFUL_NOTIFICATION);
+ else
+ SET_FLAG(bgp->flags, BGP_FLAG_GRACEFUL_NOTIFICATION);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (bgp_administrative_reset,
+ bgp_administrative_reset_cmd,
+ "[no$no] bgp hard-administrative-reset",
+ NO_STR
+ BGP_STR
+ "Send Hard Reset CEASE Notification for 'Administrative Reset'\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+ if (no)
+ UNSET_FLAG(bgp->flags, BGP_FLAG_HARD_ADMIN_RESET);
+ else
+ SET_FLAG(bgp->flags, BGP_FLAG_HARD_ADMIN_RESET);
+
+ return CMD_SUCCESS;
+}
+
DEFUN (bgp_graceful_restart_disable,
bgp_graceful_restart_disable_cmd,
"bgp graceful-restart-disable",
"Advertise extended next-hop capability to the peer\n")
{
int idx_peer = 1;
+ struct peer *peer;
+
+ peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
+ if (peer && peer->conf_if)
+ return CMD_SUCCESS;
+
return peer_flag_set_vty(vty, argv[idx_peer]->arg,
PEER_FLAG_CAPABILITY_ENHE);
}
"Advertise extended next-hop capability to the peer\n")
{
int idx_peer = 2;
+ struct peer *peer;
+
+ peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg);
+ if (peer && peer->conf_if) {
+ vty_out(vty,
+ "Peer %s cannot have capability extended-nexthop turned off\n",
+ argv[idx_peer]->arg);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
return peer_flag_unset_vty(vty, argv[idx_peer]->arg,
PEER_FLAG_CAPABILITY_ENHE);
}
json_object_string_add(json_peer,
"lastNotificationReason",
errorcodesubcode_str);
+ json_object_boolean_add(json_peer,
+ "lastNotificationHardReset",
+ peer->notify.hard_reset);
if (peer->last_reset == PEER_DOWN_NOTIFY_RECEIVED
&& peer->notify.code == BGP_NOTIFY_CEASE
&& (peer->notify.subcode
subcode_str =
bgp_notify_subcode_str(peer->notify.code,
peer->notify.subcode);
- vty_out(vty, " Notification %s (%s%s)\n",
+ vty_out(vty, " Notification %s (%s%s%s)\n",
peer->last_reset == PEER_DOWN_NOTIFY_SEND
- ? "sent"
- : "received",
- code_str, subcode_str);
+ ? "sent"
+ : "received",
+ code_str, subcode_str,
+ peer->notify.hard_reset
+ ? bgp_notify_subcode_str(
+ BGP_NOTIFY_CEASE,
+ BGP_NOTIFY_CEASE_HARD_RESET)
+ : "");
} else {
vty_out(vty, " %s\n",
peer_down_str[(int)peer->last_reset]);
}
}
-static void bgp_show_neighnor_graceful_restart_rbit(struct vty *vty,
- struct peer *p,
- bool use_json,
- json_object *json)
+static void bgp_show_neighnor_graceful_restart_flags(struct vty *vty,
+ struct peer *p,
+ bool use_json,
+ json_object *json)
{
- bool rbit_status = false;
-
- if (!use_json)
- vty_out(vty, "\n R bit: ");
+ bool rbit = false;
+ bool nbit = false;
if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)
&& (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV))
&& (peer_established(p))) {
-
- if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_BIT_RCV))
- rbit_status = true;
- else
- rbit_status = false;
+ rbit = CHECK_FLAG(p->cap, PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV);
+ nbit = CHECK_FLAG(p->cap, PEER_CAP_GRACEFUL_RESTART_N_BIT_RCV);
}
- if (rbit_status) {
- if (use_json)
- json_object_boolean_true_add(json, "rBit");
- else
- vty_out(vty, "True\n");
+ if (use_json) {
+ json_object_boolean_add(json, "rBit", rbit);
+ json_object_boolean_add(json, "nBit", nbit);
} else {
- if (use_json)
- json_object_boolean_false_add(json, "rBit");
- else
- vty_out(vty, "False\n");
+ vty_out(vty, "\n R bit: %s", rbit ? "True" : "False");
+ vty_out(vty, "\n N bit: %s\n", nbit ? "True" : "False");
}
}
/* capability extended-nexthop */
if (peergroup_flag_check(peer, PEER_FLAG_CAPABILITY_ENHE)) {
- if (CHECK_FLAG(peer->flags_invert, PEER_FLAG_CAPABILITY_ENHE))
+ if (CHECK_FLAG(peer->flags_invert, PEER_FLAG_CAPABILITY_ENHE) &&
+ !peer->conf_if)
vty_out(vty,
" no neighbor %s capability extended-nexthop\n",
addr);
if (CHECK_FLAG(bm->flags, BM_FLAG_SEND_EXTRA_DATA_TO_ZEBRA))
vty_out(vty, "bgp send-extra-data zebra\n");
+ /* BGP session DSCP value */
+ if (bm->tcp_dscp != IPTOS_PREC_INTERNETCONTROL)
+ vty_out(vty, "bgp session-dscp %u\n", bm->tcp_dscp >> 2);
+
/* BGP configuration. */
for (ALL_LIST_ELEMENTS(bm->bgp, mnode, mnnode, bgp)) {
? ""
: "no ");
+ /* Send Hard Reset CEASE Notification for 'Administrative Reset'
+ */
+ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_HARD_ADMIN_RESET) !=
+ SAVE_BGP_HARD_ADMIN_RESET)
+ vty_out(vty, " %sbgp hard-administrative-reset\n",
+ CHECK_FLAG(bgp->flags,
+ BGP_FLAG_HARD_ADMIN_RESET)
+ ? ""
+ : "no ");
+
/* BGP default <afi>-<safi> */
FOREACH_AFI_SAFI (afi, safi) {
if (afi == AFI_IP && safi == SAFI_UNICAST) {
vty_out(vty, " bgp graceful-restart restart-time %u\n",
bgp->restart_time);
+ if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_GRACEFUL_NOTIFICATION) !=
+ SAVE_BGP_GRACEFUL_NOTIFICATION)
+ vty_out(vty, " %sbgp graceful-restart notification\n",
+ CHECK_FLAG(bgp->flags,
+ BGP_FLAG_GRACEFUL_NOTIFICATION)
+ ? ""
+ : "no ");
+
if (bgp->select_defer_time != BGP_DEFAULT_SELECT_DEFERRAL_TIME)
vty_out(vty,
" bgp graceful-restart select-defer-time %u\n",
/* "no router bgp" commands. */
install_element(CONFIG_NODE, &no_router_bgp_cmd);
+ /* "bgp session-dscp command */
+ install_element(CONFIG_NODE, &bgp_session_dscp_cmd);
+ install_element(CONFIG_NODE, &no_bgp_session_dscp_cmd);
+
/* "bgp router-id" commands. */
install_element(BGP_NODE, &bgp_router_id_cmd);
install_element(BGP_NODE, &no_bgp_router_id_cmd);
&no_bgp_graceful_restart_select_defer_time_cmd);
install_element(BGP_NODE, &bgp_graceful_restart_preserve_fw_cmd);
install_element(BGP_NODE, &no_bgp_graceful_restart_preserve_fw_cmd);
+ install_element(BGP_NODE, &bgp_graceful_restart_notification_cmd);
install_element(BGP_NODE, &bgp_graceful_restart_disable_eor_cmd);
install_element(BGP_NODE, &no_bgp_graceful_restart_disable_eor_cmd);
install_element(BGP_NODE, &bgp_graceful_shutdown_cmd);
install_element(BGP_NODE, &no_bgp_graceful_shutdown_cmd);
+ /* "bgp hard-administrative-reset" commands */
+ install_element(BGP_NODE, &bgp_administrative_reset_cmd);
+
/* "bgp long-lived-graceful-restart" commands */
install_element(BGP_NODE, &bgp_llgr_stalepath_time_cmd);
install_element(BGP_NODE, &no_bgp_llgr_stalepath_time_cmd);
int style = COMMUNITY_LIST_STANDARD;
int idx = 0;
- argv_find(argv, argc, "(0-4294967295)", &idx);
- if (idx)
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
char *seq = NULL;
int idx = 0;
- argv_find(argv, argc, "(0-4294967295)", &idx);
- if (idx)
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
int style = COMMUNITY_LIST_EXPANDED;
int idx = 0;
- argv_find(argv, argc, "(0-4294967295)", &idx);
- if (idx)
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
int style = COMMUNITY_LIST_EXPANDED;
int idx = 0;
- argv_find(argv, argc, "(0-4294967295)", &idx);
- if (idx)
+ if (argv_find(argv, argc, "(0-4294967295)", &idx))
seq = argv[idx]->arg;
idx = 0;
"V AS LocalAS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc\n"
#define BGP_SHOW_SUMMARY_HEADER_FAILED "EstdCnt DropCnt ResetTime Reason\n"
-#define BGP_SHOW_PEER_GR_CAPABILITY( \
- vty, p, use_json, json) \
- do { \
- bgp_show_neighbor_graceful_restart_local_mode( \
- vty, p, use_json, json); \
- bgp_show_neighbor_graceful_restart_remote_mode( \
- vty, p, use_json, json); \
- bgp_show_neighnor_graceful_restart_rbit( \
- vty, p, use_json, json); \
- bgp_show_neighbor_graceful_restart_time( \
- vty, p, use_json, json); \
- bgp_show_neighbor_graceful_restart_capability_per_afi_safi(\
- vty, p, use_json, json); \
+#define BGP_SHOW_PEER_GR_CAPABILITY(vty, p, use_json, json) \
+ do { \
+ bgp_show_neighbor_graceful_restart_local_mode(vty, p, \
+ use_json, json); \
+ bgp_show_neighbor_graceful_restart_remote_mode( \
+ vty, p, use_json, json); \
+ bgp_show_neighnor_graceful_restart_flags(vty, p, use_json, \
+ json); \
+ bgp_show_neighbor_graceful_restart_time(vty, p, use_json, \
+ json); \
+ bgp_show_neighbor_graceful_restart_capability_per_afi_safi( \
+ vty, p, use_json, json); \
} while (0)
#define VTY_BGP_GR_DEFINE_LOOP_VARIABLE \
ifindex_t svi_ifindex;
bool is_anycast_mac = false;
- memset(&svi_rmac, 0, sizeof(struct ethaddr));
- memset(&originator_ip, 0, sizeof(struct in_addr));
+ memset(&svi_rmac, 0, sizeof(svi_rmac));
+ memset(&originator_ip, 0, sizeof(originator_ip));
s = zclient->ibuf;
l3vni = stream_getl(s);
if (cmd == ZEBRA_L3VNI_ADD) {
struct bgp *bgp_vrf = NULL;
struct prefix p;
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
s = zclient->ibuf;
stream_get(&p, s, sizeof(struct prefix));
if (!vrf_is_backend_netns() && bgp->vrf_id != nh->vrf_id)
return;
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
if (afi != AFI_IP && afi != AFI_IP6)
return;
p.family = afi2family(afi);
/* Check if capability is already sent. If the flag force is set
* send the capability since this can be initial bgp configuration
*/
- memset(&api, 0, sizeof(struct zapi_cap));
+ memset(&api, 0, sizeof(api));
if (disable) {
api.cap = ZEBRA_CLIENT_GR_DISABLE;
api.vrf_id = bgp->vrf_id;
return BGP_GR_FAILURE;
}
- memset(&api, 0, sizeof(struct zapi_cap));
+ memset(&api, 0, sizeof(api));
api.cap = ZEBRA_CLIENT_RIB_STALE_TIME;
api.stale_removal_time = bgp->rib_stale_time;
api.vrf_id = bgp->vrf_id;
extern void bgp_zebra_announce(struct bgp_dest *dest, const struct prefix *p,
struct bgp_path_info *path, struct bgp *bgp,
afi_t afi, safi_t safi);
-extern void bgp_zebra_announce_table(struct bgp *, afi_t, safi_t);
+extern void bgp_zebra_announce_table(struct bgp *bgp, afi_t afi, safi_t safi);
extern void bgp_zebra_withdraw(const struct prefix *p,
struct bgp_path_info *path, struct bgp *bgp,
safi_t safi);
extern void bgp_zebra_initiate_radv(struct bgp *bgp, struct peer *peer);
extern void bgp_zebra_terminate_radv(struct bgp *bgp, struct peer *peer);
-extern void bgp_zebra_instance_register(struct bgp *);
-extern void bgp_zebra_instance_deregister(struct bgp *);
+extern void bgp_zebra_instance_register(struct bgp *bgp);
+extern void bgp_zebra_instance_deregister(struct bgp *bgp);
extern void bgp_redistribute_redo(struct bgp *bgp);
-extern struct bgp_redist *bgp_redist_lookup(struct bgp *, afi_t, uint8_t,
- unsigned short);
-extern struct bgp_redist *bgp_redist_add(struct bgp *, afi_t, uint8_t,
- unsigned short);
-extern int bgp_redistribute_set(struct bgp *, afi_t, int, unsigned short,
- bool changed);
-extern int bgp_redistribute_resend(struct bgp *, afi_t, int, unsigned short);
+extern struct bgp_redist *bgp_redist_lookup(struct bgp *bgp, afi_t afi,
+ uint8_t type,
+ unsigned short instance);
+extern struct bgp_redist *bgp_redist_add(struct bgp *bgp, afi_t afi,
+ uint8_t type, unsigned short instance);
+extern int bgp_redistribute_set(struct bgp *bgp, afi_t afi, int type,
+ unsigned short instance, bool changed);
+extern int bgp_redistribute_resend(struct bgp *bgp, afi_t afi, int type,
+ unsigned short instance);
extern bool bgp_redistribute_rmap_set(struct bgp_redist *red, const char *name,
struct route_map *route_map);
-extern bool bgp_redistribute_metric_set(struct bgp *, struct bgp_redist *,
- afi_t, int, uint32_t);
-extern int bgp_redistribute_unset(struct bgp *, afi_t, int, unsigned short);
-extern int bgp_redistribute_unreg(struct bgp *, afi_t, int, unsigned short);
+extern bool bgp_redistribute_metric_set(struct bgp *bgp, struct bgp_redist *red,
+ afi_t afi, int type, uint32_t metric);
+extern int bgp_redistribute_unset(struct bgp *bgp, afi_t afi, int type,
+ unsigned short instance);
+extern int bgp_redistribute_unreg(struct bgp *bgp, afi_t afi, int type,
+ unsigned short instance);
-extern struct interface *if_lookup_by_ipv4(struct in_addr *, vrf_id_t);
-extern struct interface *if_lookup_by_ipv4_exact(struct in_addr *, vrf_id_t);
-extern struct interface *if_lookup_by_ipv6(struct in6_addr *, ifindex_t,
- vrf_id_t);
-extern struct interface *if_lookup_by_ipv6_exact(struct in6_addr *, ifindex_t,
- vrf_id_t);
+extern struct interface *if_lookup_by_ipv4(struct in_addr *addr,
+ vrf_id_t vrf_id);
+extern struct interface *if_lookup_by_ipv4_exact(struct in_addr *addr,
+ vrf_id_t vrf_id);
+extern struct interface *if_lookup_by_ipv6(struct in6_addr *addr,
+ ifindex_t ifindex, vrf_id_t vrf_id);
+extern struct interface *if_lookup_by_ipv6_exact(struct in6_addr *addr,
+ ifindex_t ifindex,
+ vrf_id_t vrf_id);
extern int bgp_zebra_advertise_subnet(struct bgp *bgp, int advertise,
vni_t vni);
-extern int bgp_zebra_advertise_gw_macip(struct bgp *, int, vni_t);
+extern int bgp_zebra_advertise_gw_macip(struct bgp *bgp, int advertise,
+ vni_t vni);
extern int bgp_zebra_advertise_svi_macip(struct bgp *bgp, int advertise,
vni_t vni);
-extern int bgp_zebra_advertise_all_vni(struct bgp *, int);
+extern int bgp_zebra_advertise_all_vni(struct bgp *bgp, int advertise);
extern int bgp_zebra_dup_addr_detection(struct bgp *bgp);
extern int bgp_zebra_vxlan_flood_control(struct bgp *bgp,
enum vxlan_flood_control flood_ctrl);
extern int bgp_zebra_num_connects(void);
-extern bool bgp_zebra_nexthop_set(union sockunion *, union sockunion *,
- struct bgp_nexthop *, struct peer *);
+extern bool bgp_zebra_nexthop_set(union sockunion *local,
+ union sockunion *remote,
+ struct bgp_nexthop *nexthop,
+ struct peer *peer);
struct bgp_pbr_action;
struct bgp_pbr_match;
struct bgp_pbr_rule;
zlog_info("Disabled BGP route installation to RIB (Zebra)");
for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
- FOREACH_AFI_SAFI(afi, safi)
+ FOREACH_AFI_SAFI (afi, safi) {
+ /*
+ * Stop a crash, more work is needed
+ * here to properly add/remove these types of
+ * routes from zebra.
+ */
+ if (!bgp_fibupd_safi(safi))
+ continue;
+
bgp_zebra_withdraw_table_all_subtypes(bgp, afi, safi);
+ }
}
zlog_info("All routes have been withdrawn from RIB (Zebra)");
zlog_info("Enabled BGP route installation to RIB (Zebra)");
for (ALL_LIST_ELEMENTS_RO(bm->bgp, node, bgp)) {
- FOREACH_AFI_SAFI(afi, safi)
+ FOREACH_AFI_SAFI (afi, safi) {
+ /*
+ * Stop a crash, more work is needed
+ * here to properly add/remove these types
+ * of routes from zebra
+ */
+ if (!bgp_fibupd_safi(safi))
+ continue;
+
bgp_zebra_announce_table_all_subtypes(bgp, afi, safi);
+ }
}
zlog_info("All routes have been installed in RIB (Zebra)");
XFREE(MTYPE_PEER_UPDATE_SOURCE, peer->update_if);
- XFREE(MTYPE_TMP, peer->notify.data);
+ XFREE(MTYPE_BGP_NOTIFICATION, peer->notify.data);
memset(&peer->notify, 0, sizeof(struct bgp_notify));
if (peer->clear_node_queue)
/*
* Since our su changed we need to del/add peer to the peerhash
*/
- hash_get(peer->bgp->peerhash, peer, hash_alloc_intern);
+ (void)hash_get(peer->bgp->peerhash, peer, hash_alloc_intern);
}
void bgp_recalculate_afi_safi_bestpaths(struct bgp *bgp, afi_t afi, safi_t safi)
peer = peer_lock(peer); /* bgp peer list reference */
peer->group = group;
listnode_add_sort(bgp->peer, peer);
- hash_get(bgp->peerhash, peer, hash_alloc_intern);
+ (void)hash_get(bgp->peerhash, peer, hash_alloc_intern);
/* Adjust update-group coalesce timer heuristics for # peers. */
if (bgp->heuristic_coalesce) {
bgp->evpn_info = XCALLOC(MTYPE_BGP_EVPN_INFO,
sizeof(struct bgp_evpn_info));
-
bgp_evpn_init(bgp);
bgp_evpn_vrf_es_init(bgp);
bgp_pbr_init(bgp);
if (set) {
bgp_zebra_initiate_radv(peer->bgp, peer);
} else if (peer_group_active(peer)) {
- if (!CHECK_FLAG(peer->group->conf->flags, flag))
+ if (!CHECK_FLAG(peer->group->conf->flags,
+ flag) &&
+ !peer->conf_if)
bgp_zebra_terminate_radv(peer->bgp,
peer);
} else
/* Update flag on peer-group member. */
COND_FLAG(member->flags, flag, set != member_invert);
- if (flag == PEER_FLAG_CAPABILITY_ENHE)
+ if (flag == PEER_FLAG_CAPABILITY_ENHE && !member->conf_if)
set ? bgp_zebra_initiate_radv(member->bgp, member)
: bgp_zebra_terminate_radv(member->bgp, member);
{
qobj_init();
- memset(&bgp_master, 0, sizeof(struct bgp_master));
+ memset(&bgp_master, 0, sizeof(bgp_master));
bm = &bgp_master;
bm->bgp = list_new();
bm->terminating = false;
bm->socket_buffer = buffer_size;
bm->wait_for_fib = false;
+ bm->tcp_dscp = IPTOS_PREC_INTERNETCONTROL;
bgp_mac_init();
/* init the rd id space.
#define BM_FLAG_SEND_EXTRA_DATA_TO_ZEBRA (1 << 1)
bool terminating; /* global flag that sigint terminate seen */
+
+ /* DSCP value for TCP sessions */
+ uint8_t tcp_dscp;
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(bgp_master);
#define BGP_LINK_BW_REF_BW 1
/* BGP flags. */
- uint32_t flags;
+ uint64_t flags;
#define BGP_FLAG_ALWAYS_COMPARE_MED (1 << 0)
#define BGP_FLAG_DETERMINISTIC_MED (1 << 1)
#define BGP_FLAG_MED_MISSING_AS_WORST (1 << 2)
#define BGP_FLAG_SUPPRESS_FIB_PENDING (1 << 26)
#define BGP_FLAG_SUPPRESS_DUPLICATES (1 << 27)
#define BGP_FLAG_PEERTYPE_MULTIPATH_RELAX (1 << 29)
+/* Indicate Graceful Restart support for BGP NOTIFICATION messages */
+#define BGP_FLAG_GRACEFUL_NOTIFICATION (1 << 30)
+/* Send Hard Reset CEASE Notification for 'Administrative Reset' */
+#define BGP_FLAG_HARD_ADMIN_RESET (1 << 31)
/* BGP default address-families.
* New peers inherit enabled afi/safis from bgp instance.
struct bgp_maxpaths_cfg {
uint16_t maxpaths_ebgp;
uint16_t maxpaths_ibgp;
- uint16_t ibgp_flags;
-#define BGP_FLAG_IBGP_MULTIPATH_SAME_CLUSTERLEN (1 << 0)
+ bool same_clusterlen;
} maxpaths[AFI_MAX][SAFI_MAX];
_Atomic uint32_t wpkt_quanta; // max # packets to write per i/o cycle
char *data;
bgp_size_t length;
uint8_t *raw_data;
+ bool hard_reset;
};
/* Next hop self address. */
#define PEER_CAP_RESTART_RCV (1U << 6) /* restart received */
#define PEER_CAP_AS4_ADV (1U << 7) /* as4 advertised */
#define PEER_CAP_AS4_RCV (1U << 8) /* as4 received */
-#define PEER_CAP_RESTART_BIT_ADV (1U << 9) /* sent restart state */
-#define PEER_CAP_RESTART_BIT_RCV (1U << 10) /* peer restart state */
+/* sent graceful-restart restart (R) bit */
+#define PEER_CAP_GRACEFUL_RESTART_R_BIT_ADV (1U << 9)
+/* received graceful-restart restart (R) bit */
+#define PEER_CAP_GRACEFUL_RESTART_R_BIT_RCV (1U << 10)
#define PEER_CAP_ADDPATH_ADV (1U << 11) /* addpath advertised */
#define PEER_CAP_ADDPATH_RCV (1U << 12) /* addpath received */
#define PEER_CAP_ENHE_ADV (1U << 13) /* Extended nexthop advertised */
#define PEER_CAP_EXTENDED_MESSAGE_RCV (1U << 20)
#define PEER_CAP_LLGR_ADV (1U << 21)
#define PEER_CAP_LLGR_RCV (1U << 22)
+/* sent graceful-restart notification (N) bit */
+#define PEER_CAP_GRACEFUL_RESTART_N_BIT_ADV (1U << 23)
+/* received graceful-restart notification (N) bit */
+#define PEER_CAP_GRACEFUL_RESTART_N_BIT_RCV (1U << 24)
/* Capability flags (reset in bgp_stop) */
uint32_t af_cap[AFI_MAX][SAFI_MAX];
/* timestamp when the last msg was written */
_Atomic time_t last_update;
+ /* only updated under io_mtx.
+ * last_sendq_warn is only for ratelimiting log warning messages.
+ */
+ time_t last_sendq_ok, last_sendq_warn;
+
/* Notify data. */
struct bgp_notify notify;
#define BGP_NOTIFY_CEASE_CONFIG_CHANGE 6
#define BGP_NOTIFY_CEASE_COLLISION_RESOLUTION 7
#define BGP_NOTIFY_CEASE_OUT_OF_RESOURCE 8
+#define BGP_NOTIFY_CEASE_HARD_RESET 9
/* BGP_NOTIFY_ROUTE_REFRESH_ERR sub codes (RFC 7313). */
#define BGP_NOTIFY_ROUTE_REFRESH_INVALID_MSG_LEN 1
if (vrf == NULL)
return 0;
RB_FOREACH (ifp, if_name_head, &vrf->ifaces_by_name) {
- if (strncmp(ifp->name, bgp->name, VRF_NAMSIZ) == 0)
+ if (strcmp(ifp->name, bgp->name) == 0)
continue;
if (!active || if_is_up(ifp))
count++;
sl);
for (cursor = NULL,
- rc = skiplist_next(sl, NULL, (void **)&m,
- (void **)&cursor);
+ rc = skiplist_next(sl, NULL, (void **)&m, &cursor);
!rc; rc = skiplist_next(sl, NULL, (void **)&m,
- (void **)&cursor)) {
+ &cursor)) {
#if DEBUG_PENDING_DELETE_ROUTE
vnc_zlog_debug_verbose("%s: eth monitor rfd=%p",
struct prefix pfx_orig_nexthop;
memset(&pfx_orig_nexthop, 0,
- sizeof(struct prefix)); /* keep valgrind happy */
+ sizeof(pfx_orig_nexthop)); /* keep valgrind happy */
pkey = p->key;
pb = p->value;
struct prefix pfx_orig_nexthop;
memset(&pfx_orig_nexthop, 0,
- sizeof(struct prefix)); /* keep valgrind happy */
+ sizeof(pfx_orig_nexthop)); /* keep valgrind happy */
/*
* prefix list check
* must be freed before we return. It's easier to put it after
* all of the possible returns above.
*/
- memset(&hattr, 0, sizeof(struct attr));
+ memset(&hattr, 0, sizeof(hattr));
/* hattr becomes a ghost attr */
hattr = *attr;
* must be freed before we return. It's easier to put it after
* all of the possible returns above.
*/
- memset(&hattr, 0, sizeof(struct attr));
+ memset(&hattr, 0, sizeof(hattr));
/* hattr becomes a ghost attr */
hattr = *attr;
* must be freed before we return. It's easier to put it after
* all of the possible returns above.
*/
- memset(&hattr, 0, sizeof(struct attr));
+ memset(&hattr, 0, sizeof(hattr));
/* hattr becomes a ghost attr */
hattr = *attr;
uint32_t local_pref;
memset(&pfx_unicast_nexthop, 0,
- sizeof(struct prefix)); /* keep valgrind happy */
+ sizeof(pfx_unicast_nexthop)); /* keep valgrind happy */
if (VNC_DEBUG(IMPORT_BGP_ADD_ROUTE))
vnc_zlog_debug_any(
struct prefix pfx_unicast_nexthop;
memset(&pfx_unicast_nexthop, 0,
- sizeof(struct prefix)); /* keep valgrind happy */
+ sizeof(pfx_unicast_nexthop)); /* keep valgrind happy */
if (process_unicast_route(bgp, afi, &pb->upfx, pb->ubpi, &ecom,
&pfx_unicast_nexthop)) {
prd = NULL;
/* use local_pref from unicast route */
- memset(&new_attr, 0, sizeof(struct attr));
+ memset(&new_attr, 0, sizeof(new_attr));
new_attr = *bpi_interior->attr;
if (info->attr->flag
& ATTR_FLAG_BIT(BGP_ATTR_LOCAL_PREF)) {
return;
memset(&pfx_orig_nexthop, 0,
- sizeof(struct prefix)); /* keep valgrind happy */
+ sizeof(pfx_orig_nexthop)); /* keep valgrind happy */
h = bgp_default->rfapi;
hc = bgp_default->rfapi_cfg;
prd = NULL;
/* use local_pref from unicast route */
- memset(&new_attr, 0, sizeof(struct attr));
+ memset(&new_attr, 0, sizeof(new_attr));
new_attr = *bpi->attr;
if (bpi_exterior
&& (bpi_exterior->attr->flag
void *rfp_start(struct thread_master *master, struct rfapi_rfp_cfg **cfgp,
struct rfapi_rfp_cb_methods **cbmp)
{
- memset(&global_rfi, 0, sizeof(struct rfp_instance_t));
+ memset(&global_rfi, 0, sizeof(global_rfi));
global_rfi.master = master; /* for BGPD threads */
/* initilize struct rfapi_rfp_cfg, see rfapi.h */
- :c:struct:`prefix_ls`
- :c:struct:`prefix_rd`
- - :c:struct:`prefix_ptr`
- :c:struct:`prefix_sg` (use :frrfmt:`%pPSG4`)
- :c:union:`prefixptr` (dereference to get :c:struct:`prefix`)
- :c:union:`prefixconstptr` (dereference to get :c:struct:`prefix`)
Suppress duplicate updates if the route actually not changed.
Default: enabled.
+Send Hard Reset CEASE Notification for Administrative Reset
+-----------------------------------------------------------
+
+.. clicmd:: bgp hard-administrative-reset
+
+ Send Hard Reset CEASE Notification for 'Administrative Reset' events.
+
+ When disabled, and Graceful Restart Notification capability is exchanged
+ between the peers, Graceful Restart procedures apply, and routes will be
+ retained.
+
+ Enabled by default.
+
Disable checking if nexthop is connected on EBGP sessions
---------------------------------------------------------
expires. The stale path timer is started when the router receives a Route-Refresh
BoRR message.
+.. clicmd:: bgp graceful-restart notification
+
+ Indicate Graceful Restart support for BGP NOTIFICATION messages.
+
+ After changing this parameter, you have to reset the peers in order to advertise
+ N-bit in Graceful Restart capability.
+
+ Without Graceful-Restart Notification capability (N-bit not set), GR is not
+ activated when receiving CEASE/HOLDTIME expire notifications.
+
+ When sending ``CEASE/Administrative Reset`` (``clear bgp``), the session is closed
+ and routes are not retained. When N-bit is set and ``bgp hard-administrative-reset``
+ is turned off Graceful-Restart is activated and routes are retained.
+
+ Enabled by default.
+
.. _bgp-per-peer-graceful-restart:
BGP Per Peer Graceful Restart
daemons RIB to Zebra. If the option is passed as a command line argument when
starting the daemon and the configuration gets saved, the option will persist
unless removed from the configuration with the negating command prior to the
-configuration write operation.
+configuration write operation. At this point in time non SAFI_UNICAST BGP
+data is not properly withdrawn from zebra when this command is issued.
.. clicmd:: bgp send-extra-data zebra
the option is changed, bgpd doesn't reinstall the routes to comply with the new
setting.
+.. clicmd:: bgp session-dscp (0-63)
+
+This command allows bgp to control, at a global level, the TCP dscp values
+in the TCP header.
+
.. _bgp-suppress-fib:
Suppressing routes not installed in FIB
clear the Node flag that is set by default for Prefix-SIDs associated to
loopback addresses. This option is necessary to configure Anycast-SIDs.
-.. clicmd:: show isis segment-routing nodes
+.. clicmd:: show isis segment-routing node
Show detailed information about all learned Segment Routing Nodes.
:t:`Default External BGP (EBGP) Route Propagation Behavior without Policies. J. Mauch, J. Snijders, G. Hankins. July 2017`
- :rfc:`8277`
:t:`Using BGP to Bind MPLS Labels to Address Prefixes. E. Rosen. October 2017`
+- :rfc:`8538`
+ :t:`Notification Message Support for BGP Graceful Restart. K. Patel, R. Fernando, J. Scudder, J. Haas. March 2019`
- :rfc:`8654`
:t:`Extended Message Support for BGP. R. Bush, K. Patel, D. Ward. October 2019`
- :rfc:`9003`
and would like pim to use a specific source address associated with
that interface.
+.. clicmd:: ip pim passive
+
+ Disable sending and receiving pim control packets on the interface.
+
.. clicmd:: ip igmp
Tell pim to receive IGMP reports and Query on this interface. The default
and would like pim to use a specific source address associated with
that interface.
+.. clicmd:: ipv6 pim passive
+
+ Disable sending and receiving pim control packets on the interface.
+
.. clicmd:: ipv6 mld
Tell pim to receive MLD reports and Query on this interface. The default
'all' allows you to look at all vrfs for the command. Naming a vrf 'all' will
cause great confusion.
+PIM protocol state
+------------------
+
.. clicmd:: show ipv6 pim [vrf NAME] group-type [json]
Display SSM group ranges.
Display upstream information for S,G's and the RPF data associated with them.
+
+MLD state
+---------
+
+.. clicmd:: show ipv6 mld [vrf NAME] interface [IFNAME] [detail|json]
+
+ Display per-interface MLD state, elected querier and related timers. Use
+ the ``detail`` or ``json`` options for further information (the JSON output
+ always contains all details.)
+
+.. clicmd:: show ipv6 mld [vrf NAME] statistics [interface IFNAME] [json]
+
+ Display packet and error counters for MLD interfaces. All counters are
+ packet counters (not bytes) and wrap at 64 bit. In some rare cases,
+ malformed received MLD reports may be partially processed and counted on
+ multiple counters.
+
+.. clicmd:: show ipv6 mld [vrf NAME] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail}] [json]
+
+ Display joined groups tracked by MLD. ``interface``, ``groups`` and
+ ``sources`` options may be used to limit output to a subset (note ``sources``
+ refers to the multicast traffic sender, not the host that joined to receive
+ the traffic.)
+
+ The ``detail`` option also reports which hosts have joined (subscribed) to
+ particular ``S,G``. This information is only available for MLDv2 hosts with
+ a MLDv2 querier. MLDv1 joins are recorded as "untracked" and shown in the
+ ``NonTrkSeen`` output column.
+
+
+General multicast routing state
+-------------------------------
+
.. clicmd:: show ipv6 multicast
Display various information about the interfaces used in this pim instance.
Display multicast data packets count per interface for all vrf.
+.. clicmd:: show ipv6 mroute [vrf NAME] [X:X::X:X [X:X::X:X]] [fill] [json]
+
+ Display information about installed into the kernel S,G mroutes. If
+ one address is specified we assume it is the Group we are interested
+ in displaying data on. If the second address is specified then it is
+ Source Group. The keyword ``fill`` says to fill in all assumed data
+ for test/data gathering purposes.
+
+.. clicmd:: show ipv6 mroute [vrf NAME] count [json]
+
+ Display information about installed into the kernel S,G mroutes and in
+ addition display data about packet flow for the mroutes for a specific
+ vrf.
+
+.. clicmd:: show ipv6 mroute vrf all count [json]
+
+ Display information about installed into the kernel S,G mroutes and in
+ addition display data about packet flow for the mroutes for all vrfs.
+
+.. clicmd:: show ipv6 mroute [vrf NAME] summary [json]
+
+ Display total number of S,G mroutes and number of S,G mroutes installed
+ into the kernel for a specific vrf.
+
+.. clicmd:: show ipv6 mroute vrf all summary [json]
+
+ Display total number of S,G mroutes and number of S,G mroutes
+ installed into the kernel for all vrfs.
+
+PIMv6 Clear Commands
+====================
+
+Clear commands reset various variables.
+
+.. clicmd:: clear ipv6 mroute
+
+ Reset multicast routes.
+
+.. clicmd:: clear ipv6 mroute [vrf NAME] count
+
+ When this command is issued, reset the counts of data shown for
+ packet count, byte count and wrong interface to 0 and start count
+ up from this spot.
+
+.. clicmd:: clear ipv6 pim oil
+
+ Rescan PIMv6 OIL (output interface list).
+
PIMv6 Debug Commands
====================
mode, the debug commands can be persistent across restarts of the FRR pim6d if
the config was written out.
+.. clicmd:: debug pimv6 events
+
+ This turns on debugging for PIMv6 system events. Especially timers.
+
+.. clicmd:: debug pimv6 nht
+
+ This turns on debugging for PIMv6 nexthop tracking. It will display
+ information about RPF lookups and information about when a nexthop changes.
+
+.. clicmd:: debug pimv6 nht detail
+
+ This turns on debugging for PIMv6 nexthop in detail. This is not enabled
+ by default.
+
+.. clicmd:: debug pimv6 packet-dump
+
+ This turns on an extraordinary amount of data. Each pim packet sent and
+ received is dumped for debugging purposes. This should be considered a
+ developer only command.
+
+.. clicmd:: debug pimv6 packets
+
+ This turns on information about packet generation for sending and about
+ packet handling from a received packet.
+
+.. clicmd:: debug pimv6 trace
+
+ This traces pim code and how it is running.
+
+.. clicmd:: debug pimv6 zebra
+
+ This gathers data about events from zebra that come up through the ZAPI.
.. clicmd:: rpki polling_period (1-3600)
-
Set the number of seconds the router waits until the router asks the cache
again for updated data.
The default value is 300 seconds.
- The following commands configure one or multiple cache servers.
+.. clicmd:: rpki expire_interval (600-172800)
+
+ Set the number of seconds the router waits until the router expires the cache.
+
+ The default value is 7200 seconds.
+
+.. clicmd:: rpki retry_interval (1-7200)
+
+ Set the number of seconds the router waits until retrying to connect to the
+ cache server.
+
+ The default value is 600 seconds.
.. clicmd:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [SSH_PUBKEY_PATH] [KNOWN_HOSTS_PATH] [source A.B.C.D] PREFERENCE
if (ep->dst.s_addr == htonl(EIGRP_MULTICAST_ADDRESS))
eigrp_if_ipmulticast(eigrp, &ei->address, ei->ifp->ifindex);
- memset(&iph, 0, sizeof(struct ip));
+ memset(&iph, 0, sizeof(iph));
memset(&sa_dst, 0, sizeof(sa_dst));
/*
char buff[CMSG_SPACE(SOPT_SIZE_CMSG_IFINDEX_IPV4())];
struct msghdr msgh;
- memset(&msgh, 0, sizeof(struct msghdr));
+ memset(&msgh, 0, sizeof(msgh));
msgh.msg_iov = &iov;
msgh.msg_iovlen = 1;
msgh.msg_control = (caddr_t)buff;
== MATCH_FAILED)
return NULL;
- memset(&nbr_addr, 0, sizeof(struct in_addr));
+ memset(&nbr_addr, 0, sizeof(nbr_addr));
ifindex = 0;
nbr = eigrpNbrLookup(v, name, length, &nbr_addr, &ifindex, exact);
{
struct timeval tv;
- memset(&eigrp_master, 0, sizeof(struct eigrp_master));
+ memset(&eigrp_master, 0, sizeof(eigrp_master));
eigrp_om = &eigrp_master;
eigrp_om->eigrp = list_new();
#define IPVLAN_F_PRIVATE 0x01
#define IPVLAN_F_VEPA 0x02
+/* Tunnel RTM header */
+struct tunnel_msg {
+ __u8 family;
+ __u8 reserved1;
+ __u16 reserved2;
+ __u32 ifindex;
+};
+
+enum {
+ VXLAN_VNIFILTER_ENTRY_UNSPEC,
+ VXLAN_VNIFILTER_ENTRY_START,
+ VXLAN_VNIFILTER_ENTRY_END,
+ VXLAN_VNIFILTER_ENTRY_GROUP,
+ VXLAN_VNIFILTER_ENTRY_GROUP6,
+ __VXLAN_VNIFILTER_ENTRY_MAX
+};
+#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1)
+
+enum {
+ VXLAN_VNIFILTER_UNSPEC,
+ VXLAN_VNIFILTER_ENTRY,
+ __VXLAN_VNIFILTER_MAX
+};
+#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1)
+
/* VXLAN section */
enum {
IFLA_VXLAN_UNSPEC,
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
+ IFLA_VXLAN_DF,
+ IFLA_VXLAN_VNIFILTER, /* only applicable when COLLECT_METADATA mode is
+ on */
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
#define NIFBITS (sizeof(if_mask) * 8) /* bits per mask */
typedef struct if_set {
- if_mask ifs_bits[__KERNEL_DIV_ROUND_UP(IF_SETSIZE, NIFBITS)];
+ /* __KERNEL_DIV_ROUND_UP() */
+ if_mask ifs_bits[(IF_SETSIZE + NIFBITS - 1) / NIFBITS];
} if_set;
#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS)))
RTM_GETNEXTHOPBUCKET,
#define RTM_GETNEXTHOPBUCKET RTM_GETNEXTHOPBUCKET
+ RTM_SETHWFLAGS = 119,
+#define RTM_SETHWFLAGS RTM_SETHWFLAGS
+
+ RTM_NEWTUNNEL = 120,
+#define RTM_NEWTUNNEL RTM_NEWTUNNEL
+ RTM_DELTUNNEL,
+#define RTM_DELTUNNEL RTM_DELTUNNEL
+ RTM_GETTUNNEL,
+#define RTM_GETTUNNEL RTM_GETTUNNEL
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
#define RTA_SPACE(len) RTA_ALIGN(RTA_LENGTH(len))
#define RTA_DATA(rta) ((void*)(((char*)(rta)) + RTA_LENGTH(0)))
#define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0))
+#ifndef TUNNEL_RTA
+#define TUNNEL_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) + \
+ NLMSG_ALIGN(sizeof(struct tunnel_msg))))
+#endif
#define RTNLGRP_NEXTHOP RTNLGRP_NEXTHOP
RTNLGRP_BRVLAN,
#define RTNLGRP_BRVLAN RTNLGRP_BRVLAN
+ RTNLGRP_TUNNEL,
+#define RTNLGRP_TUNNEL RTNLGRP_TUNNEL
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
/*
* And set the filter
*/
- memset(&bpf_prog, 0, sizeof(struct bpf_program));
+ memset(&bpf_prog, 0, sizeof(bpf_prog));
bpf_prog.bf_len = 8;
bpf_prog.bf_insns = &(llcfilter[0]);
if (ioctl(fd, BIOCSETF, (caddr_t)&bpf_prog) < 0) {
void isis_lfa_excluded_iface_add(struct isis_circuit *circuit, int level,
const char *ifname)
{
- hash_get(circuit->lfa_excluded_ifaces[level - 1], (char *)ifname,
- lfa_excl_interface_hash_alloc);
+ (void)hash_get(circuit->lfa_excluded_ifaces[level - 1], (char *)ifname,
+ lfa_excl_interface_hash_alloc);
}
/**
/*
* Bind to the physical interface
*/
- memset(&s_addr, 0, sizeof(struct sockaddr_ll));
+ memset(&s_addr, 0, sizeof(s_addr));
s_addr.sll_family = AF_PACKET;
s_addr.sll_protocol = htons(ETH_P_ALL);
s_addr.sll_ifindex = circuit->interface->ifindex;
addr_len = sizeof(s_addr);
- memset(&s_addr, 0, sizeof(struct sockaddr_ll));
+ memset(&s_addr, 0, sizeof(s_addr));
bytesread =
recvfrom(circuit->fd, (void *)&llc, LLC_LEN, MSG_PEEK,
int bytesread, addr_len;
struct sockaddr_ll s_addr;
- memset(&s_addr, 0, sizeof(struct sockaddr_ll));
+ memset(&s_addr, 0, sizeof(s_addr));
addr_len = sizeof(s_addr);
/* we can read directly to the stream */
struct sockaddr_ll sa;
stream_set_getp(circuit->snd_stream, 0);
- memset(&sa, 0, sizeof(struct sockaddr_ll));
+ memset(&sa, 0, sizeof(sa));
sa.sll_family = AF_PACKET;
size_t frame_size = stream_get_endp(circuit->snd_stream) + LLC_LEN;
ssize_t rv;
stream_set_getp(circuit->snd_stream, 0);
- memset(&sa, 0, sizeof(struct sockaddr_ll));
+ memset(&sa, 0, sizeof(sa));
sa.sll_family = AF_PACKET;
sa.sll_ifindex = circuit->interface->ifindex;
sa.sll_halen = ETH_ALEN;
struct isis_vertex *vertex = arg;
struct isis_vertex *hop = bucket->data;
- hash_get(vertex->firsthops, hop, hash_alloc_intern);
+ (void)hash_get(vertex->firsthops, hop, hash_alloc_intern);
}
static void vertex_update_firsthops(struct isis_vertex *vertex,
struct isis_vertex *parent)
{
if (vertex->d_N <= 2)
- hash_get(vertex->firsthops, vertex, hash_alloc_intern);
+ (void)hash_get(vertex->firsthops, vertex, hash_alloc_intern);
if (vertex->d_N < 2 || !parent)
return;
if (vertex->N.ip.sr.label != MPLS_INVALID_LABEL)
vertex->N.ip.sr.present = true;
- hash_get(spftree->prefix_sids, vertex,
- hash_alloc_intern);
+ (void)hash_get(spftree->prefix_sids, vertex,
+ hash_alloc_intern);
}
}
if (vtype >= VTYPE_IPREACH_INTERNAL) {
memcpy(&p, id, sizeof(p));
apply_mask(&p.dest);
- apply_mask((struct prefix *)&p.src);
+ apply_mask(&p.src);
id = &p;
}
SET_FLAG(lnode.flags, LS_NODE_ROUTER_ID6);
}
if (tlvs->hostname) {
- memcpy(&lnode.name, tlvs->hostname, MAX_NAME_LENGTH);
+ strlcpy(lnode.name, tlvs->hostname, MAX_NAME_LENGTH);
SET_FLAG(lnode.flags, LS_NODE_NAME);
}
if (tlvs->router_cap) {
p.u.prefix6 = std->local6;
}
if (!std)
- p = *prefix;
+ prefix_copy(&p, prefix);
else
te_debug(" |- Adjust prefix %pFX with local address to: %pFX",
prefix, &p);
DECLARE_MTYPE(ISIS_SUBTLV);
struct lspdb_head;
-struct isis_subtlvs;
struct sr_prefix_cfg;
-struct isis_area_address;
struct isis_area_address {
struct isis_area_address *next;
#define ISIS_WIDE_METRIC_INFINITY 0xFFFFFE
#define ISIS_NARROW_METRIC_INFINITY 62
-struct isis_oldstyle_reach;
struct isis_oldstyle_reach {
struct isis_oldstyle_reach *next;
uint8_t metric;
};
-struct isis_oldstyle_ip_reach;
struct isis_oldstyle_ip_reach {
struct isis_oldstyle_ip_reach *next;
struct prefix_ipv4 prefix;
};
-struct isis_lsp_entry;
struct isis_lsp_entry {
struct isis_lsp_entry *next;
struct isis_lsp *lsp;
};
-struct isis_extended_reach;
-struct isis_ext_subtlvs;
struct isis_extended_reach {
struct isis_extended_reach *next;
struct isis_ext_subtlvs *subtlvs;
};
-struct isis_extended_ip_reach;
struct isis_extended_ip_reach {
struct isis_extended_ip_reach *next;
struct isis_subtlvs *subtlvs;
};
-struct isis_ipv6_reach;
struct isis_ipv6_reach {
struct isis_ipv6_reach *next;
#define ISIS_PREFIX_SID_VALUE 0x08
#define ISIS_PREFIX_SID_LOCAL 0x04
-struct isis_prefix_sid;
struct isis_prefix_sid {
struct isis_prefix_sid *next;
#define EXT_SUBTLV_LINK_ADJ_SID_SFLG 0x08
#define EXT_SUBTLV_LINK_ADJ_SID_PFLG 0x04
-struct isis_adj_sid;
struct isis_adj_sid {
struct isis_adj_sid *next;
uint32_t sid;
};
-struct isis_lan_adj_sid;
struct isis_lan_adj_sid {
struct isis_lan_adj_sid *next;
uint8_t msd;
};
-struct isis_item;
struct isis_item {
struct isis_item *next;
};
-struct isis_lan_neighbor;
struct isis_lan_neighbor {
struct isis_lan_neighbor *next;
uint8_t mac[6];
};
-struct isis_ipv4_address;
struct isis_ipv4_address {
struct isis_ipv4_address *next;
struct in_addr addr;
};
-struct isis_ipv6_address;
struct isis_ipv6_address {
struct isis_ipv6_address *next;
struct in6_addr addr;
};
-struct isis_mt_router_info;
struct isis_mt_router_info {
struct isis_mt_router_info *next;
uint16_t mtid;
};
-struct isis_auth;
struct isis_auth {
struct isis_auth *next;
size_t offset; /* Only valid after packing */
};
-struct isis_item_list;
struct isis_item_list {
struct isis_item *head;
struct isis_item **tail;
void isis_master_init(struct thread_master *master)
{
- memset(&isis_master, 0, sizeof(struct isis_master));
+ memset(&isis_master, 0, sizeof(isis_master));
im = &isis_master;
im->isis = list_new();
im->master = master;
struct iface {
RB_ENTRY(iface) entry;
- char name[IF_NAMESIZE];
+ char name[INTERFACE_NAMSIZ];
ifindex_t ifindex;
struct if_addr_head addr_list;
struct in6_addr linklocal;
struct l2vpn_if {
RB_ENTRY(l2vpn_if) entry;
struct l2vpn *l2vpn;
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
int operative;
uint8_t mac[ETH_ALEN];
int af;
union ldpd_addr addr;
uint32_t pwid;
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
bool enabled;
uint32_t remote_group;
int type;
int pw_type;
int mtu;
- char br_ifname[IF_NAMESIZE];
+ char br_ifname[INTERFACE_NAMSIZ];
ifindex_t br_ifindex;
struct l2vpn_if_head if_tree;
struct l2vpn_pw_head pw_tree;
};
struct kaddr {
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
int af;
union ldpd_addr addr;
};
struct kif {
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
int flags;
int operative;
/* control data structures */
struct ctl_iface {
int af;
- char name[IF_NAMESIZE];
+ char name[INTERFACE_NAMSIZ];
ifindex_t ifindex;
int state;
enum iface_type type;
};
struct ctl_disc_if {
- char name[IF_NAMESIZE];
+ char name[INTERFACE_NAMSIZ];
int active_v4;
int active_v6;
int no_adj;
int af;
struct in_addr id;
enum hello_type type;
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
union ldpd_addr src_addr;
uint16_t holdtime;
uint16_t holdtime_remaining;
struct ctl_pw {
uint16_t type;
char l2vpn_name[L2VPN_NAME_LEN];
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
uint32_t pwid;
struct in_addr lsr_id;
uint32_t local_label;
};
struct ctl_ldp_sync {
- char name[IF_NAMESIZE];
+ char name[INTERFACE_NAMSIZ];
ifindex_t ifindex;
bool in_sync;
bool timer_running;
return;
}
- assert(hash_get(cnode->cmd_hash, (void *)cmd, hash_alloc_intern));
+ (void)hash_get(cnode->cmd_hash, (void *)cmd, hash_alloc_intern);
if (cnode->graph_built || !defer_cli_tree) {
struct graph *graph = graph_new();
frr_with_mutex(&refs_mtx) {
while (ref[i].code != END_FERR) {
- hash_get(refs, &ref[i], hash_alloc_intern);
+ (void)hash_get(refs, &ref[i], hash_alloc_intern);
i++;
}
}
* Backward compatibility: don't complain about duplicated values,
* just silently accept.
*/
- if (seq_str == NULL) {
- ada.ada_type = "ipv4";
- ada.ada_name = name;
- ada.ada_action = action;
- if (host_str && mask_str == NULL) {
- ada.ada_xpath[0] = "./host";
- ada.ada_value[0] = host_str;
- } else if (host_str && mask_str) {
- ada.ada_xpath[0] = "./network/address";
- ada.ada_value[0] = host_str;
- ada.ada_xpath[1] = "./network/mask";
- ada.ada_value[1] = mask_str;
- } else {
- ada.ada_xpath[0] = "./source-any";
- ada.ada_value[0] = "";
- }
-
- /* Duplicated entry without sequence, just quit. */
- if (acl_is_dup(vty->candidate_config->dnode, &ada))
- return CMD_SUCCESS;
+ ada.ada_type = "ipv4";
+ ada.ada_name = name;
+ ada.ada_action = action;
+ if (host_str && mask_str == NULL) {
+ ada.ada_xpath[0] = "./host";
+ ada.ada_value[0] = host_str;
+ } else if (host_str && mask_str) {
+ ada.ada_xpath[0] = "./network/address";
+ ada.ada_value[0] = host_str;
+ ada.ada_xpath[1] = "./network/mask";
+ ada.ada_value[1] = mask_str;
+ } else {
+ ada.ada_xpath[0] = "./source-any";
+ ada.ada_value[0] = "";
}
+ if (acl_is_dup(vty->candidate_config->dnode, &ada))
+ return CMD_SUCCESS;
+
/*
* Create the access-list first, so we can generate sequence if
* none given (backward compatibility).
* Backward compatibility: don't complain about duplicated values,
* just silently accept.
*/
- if (seq_str == NULL) {
- ada.ada_type = "ipv4";
- ada.ada_name = name;
- ada.ada_action = action;
- if (src_str && src_mask_str == NULL) {
- ada.ada_xpath[idx] = "./host";
- ada.ada_value[idx] = src_str;
- idx++;
- } else if (src_str && src_mask_str) {
- ada.ada_xpath[idx] = "./network/address";
- ada.ada_value[idx] = src_str;
- idx++;
- ada.ada_xpath[idx] = "./network/mask";
- ada.ada_value[idx] = src_mask_str;
- idx++;
- } else {
- ada.ada_xpath[idx] = "./source-any";
- ada.ada_value[idx] = "";
- idx++;
- }
-
- if (dst_str && dst_mask_str == NULL) {
- ada.ada_xpath[idx] = "./destination-host";
- ada.ada_value[idx] = dst_str;
- idx++;
- } else if (dst_str && dst_mask_str) {
- ada.ada_xpath[idx] = "./destination-network/address";
- ada.ada_value[idx] = dst_str;
- idx++;
- ada.ada_xpath[idx] = "./destination-network/mask";
- ada.ada_value[idx] = dst_mask_str;
- idx++;
- } else {
- ada.ada_xpath[idx] = "./destination-any";
- ada.ada_value[idx] = "";
- idx++;
- }
+ ada.ada_type = "ipv4";
+ ada.ada_name = name;
+ ada.ada_action = action;
+ if (src_str && src_mask_str == NULL) {
+ ada.ada_xpath[idx] = "./host";
+ ada.ada_value[idx] = src_str;
+ idx++;
+ } else if (src_str && src_mask_str) {
+ ada.ada_xpath[idx] = "./network/address";
+ ada.ada_value[idx] = src_str;
+ idx++;
+ ada.ada_xpath[idx] = "./network/mask";
+ ada.ada_value[idx] = src_mask_str;
+ idx++;
+ } else {
+ ada.ada_xpath[idx] = "./source-any";
+ ada.ada_value[idx] = "";
+ idx++;
+ }
- /* Duplicated entry without sequence, just quit. */
- if (acl_is_dup(vty->candidate_config->dnode, &ada))
- return CMD_SUCCESS;
+ if (dst_str && dst_mask_str == NULL) {
+ ada.ada_xpath[idx] = "./destination-host";
+ ada.ada_value[idx] = dst_str;
+ idx++;
+ } else if (dst_str && dst_mask_str) {
+ ada.ada_xpath[idx] = "./destination-network/address";
+ ada.ada_value[idx] = dst_str;
+ idx++;
+ ada.ada_xpath[idx] = "./destination-network/mask";
+ ada.ada_value[idx] = dst_mask_str;
+ idx++;
+ } else {
+ ada.ada_xpath[idx] = "./destination-any";
+ ada.ada_value[idx] = "";
+ idx++;
}
+ if (acl_is_dup(vty->candidate_config->dnode, &ada))
+ return CMD_SUCCESS;
+
/*
* Create the access-list first, so we can generate sequence if
* none given (backward compatibility).
* Backward compatibility: don't complain about duplicated values,
* just silently accept.
*/
- if (seq_str == NULL) {
- ada.ada_type = "ipv4";
- ada.ada_name = name;
- ada.ada_action = action;
-
- if (prefix_str) {
- ada.ada_xpath[0] = "./ipv4-prefix";
- ada.ada_value[0] = prefix_str;
- if (exact) {
- ada.ada_xpath[1] = "./ipv4-exact-match";
- ada.ada_value[1] = "true";
- }
- } else {
- ada.ada_xpath[0] = "./any";
- ada.ada_value[0] = "";
- }
+ ada.ada_type = "ipv4";
+ ada.ada_name = name;
+ ada.ada_action = action;
- /* Duplicated entry without sequence, just quit. */
- if (acl_is_dup(vty->candidate_config->dnode, &ada))
- return CMD_SUCCESS;
+ if (prefix_str) {
+ ada.ada_xpath[0] = "./ipv4-prefix";
+ ada.ada_value[0] = prefix_str;
+ if (exact) {
+ ada.ada_xpath[1] = "./ipv4-exact-match";
+ ada.ada_value[1] = "true";
+ }
+ } else {
+ ada.ada_xpath[0] = "./any";
+ ada.ada_value[0] = "";
}
+ if (acl_is_dup(vty->candidate_config->dnode, &ada))
+ return CMD_SUCCESS;
+
/*
* Create the access-list first, so we can generate sequence if
* none given (backward compatibility).
* Backward compatibility: don't complain about duplicated values,
* just silently accept.
*/
- if (seq_str == NULL) {
- ada.ada_type = "ipv6";
- ada.ada_name = name;
- ada.ada_action = action;
-
- if (prefix_str) {
- ada.ada_xpath[0] = "./ipv6-prefix";
- ada.ada_value[0] = prefix_str;
- if (exact) {
- ada.ada_xpath[1] = "./ipv6-exact-match";
- ada.ada_value[1] = "true";
- }
- } else {
- ada.ada_xpath[0] = "./any";
- ada.ada_value[0] = "";
- }
+ ada.ada_type = "ipv6";
+ ada.ada_name = name;
+ ada.ada_action = action;
- /* Duplicated entry without sequence, just quit. */
- if (acl_is_dup(vty->candidate_config->dnode, &ada))
- return CMD_SUCCESS;
+ if (prefix_str) {
+ ada.ada_xpath[0] = "./ipv6-prefix";
+ ada.ada_value[0] = prefix_str;
+ if (exact) {
+ ada.ada_xpath[1] = "./ipv6-exact-match";
+ ada.ada_value[1] = "true";
+ }
+ } else {
+ ada.ada_xpath[0] = "./any";
+ ada.ada_value[0] = "";
}
+ if (acl_is_dup(vty->candidate_config->dnode, &ada))
+ return CMD_SUCCESS;
+
/*
* Create the access-list first, so we can generate sequence if
* none given (backward compatibility).
* Backward compatibility: don't complain about duplicated values,
* just silently accept.
*/
- if (seq_str == NULL) {
- ada.ada_type = "mac";
- ada.ada_name = name;
- ada.ada_action = action;
-
- if (mac_str) {
- ada.ada_xpath[0] = "./mac";
- ada.ada_value[0] = mac_str;
- } else {
- ada.ada_xpath[0] = "./any";
- ada.ada_value[0] = "";
- }
+ ada.ada_type = "mac";
+ ada.ada_name = name;
+ ada.ada_action = action;
- /* Duplicated entry without sequence, just quit. */
- if (acl_is_dup(vty->candidate_config->dnode, &ada))
- return CMD_SUCCESS;
+ if (mac_str) {
+ ada.ada_xpath[0] = "./mac";
+ ada.ada_value[0] = mac_str;
+ } else {
+ ada.ada_xpath[0] = "./any";
+ ada.ada_value[0] = "";
}
+ if (acl_is_dup(vty->candidate_config->dnode, &ada))
+ return CMD_SUCCESS;
+
/*
* Create the access-list first, so we can generate sequence if
* none given (backward compatibility).
* Backward compatibility: don't complain about duplicated values,
* just silently accept.
*/
- if (seq_str == NULL) {
- pda.pda_type = "ipv4";
- pda.pda_name = name;
- pda.pda_action = action;
- if (prefix_str) {
- prefix_copy(&pda.prefix, prefix);
- pda.ge = ge;
- pda.le = le;
- } else {
- pda.any = true;
- }
-
- /* Duplicated entry without sequence, just quit. */
- if (plist_is_dup(vty->candidate_config->dnode, &pda))
- return CMD_SUCCESS;
+ pda.pda_type = "ipv4";
+ pda.pda_name = name;
+ pda.pda_action = action;
+ if (prefix_str) {
+ prefix_copy(&pda.prefix, prefix);
+ pda.ge = ge;
+ pda.le = le;
+ } else {
+ pda.any = true;
}
+ if (plist_is_dup(vty->candidate_config->dnode, &pda))
+ return CMD_SUCCESS;
+
/*
* Create the prefix-list first, so we can generate sequence if
* none given (backward compatibility).
* Backward compatibility: don't complain about duplicated values,
* just silently accept.
*/
- if (seq_str == NULL) {
- pda.pda_type = "ipv6";
- pda.pda_name = name;
- pda.pda_action = action;
- if (prefix_str) {
- prefix_copy(&pda.prefix, prefix);
- pda.ge = ge;
- pda.le = le;
- } else {
- pda.any = true;
- }
-
- /* Duplicated entry without sequence, just quit. */
- if (plist_is_dup(vty->candidate_config->dnode, &pda))
- return CMD_SUCCESS;
+ pda.pda_type = "ipv6";
+ pda.pda_name = name;
+ pda.pda_action = action;
+ if (prefix_str) {
+ prefix_copy(&pda.prefix, prefix);
+ pda.ge = ge;
+ pda.le = le;
+ } else {
+ pda.any = true;
}
+ if (plist_is_dup(vty->candidate_config->dnode, &pda))
+ return CMD_SUCCESS;
+
/*
* Create the prefix-list first, so we can generate sequence if
* none given (backward compatibility).
assert(!"Type codec double-registered.");
}
- assert(hash_get(codec_hash, &c, codec_alloc));
+ (void)hash_get(codec_hash, &c, codec_alloc);
}
void frrscript_register_type_codecs(struct frrscript_codec *codecs)
/* Add the Lua function state to frrscript */
struct lua_function_state key = {.name = function_name, .L = L};
- hash_get(fs->lua_function_hash, &key, lua_function_alloc);
+ (void)hash_get(fs->lua_function_hash, &key, lua_function_alloc);
return 0;
fail:
#define IFNAMSIZ 16
*/
-#define INTERFACE_NAMSIZ 20
+#define INTERFACE_NAMSIZ IFNAMSIZ
#define INTERFACE_HWADDR_MAX 20
typedef signed int ifindex_t;
/* Check year_str. Year must be <1993-2035>. */
GET_LONG_RANGE(year, year_str, 1993, 2035);
- memset(&tm, 0, sizeof(struct tm));
+ memset(&tm, 0, sizeof(tm));
tm.tm_sec = sec;
tm.tm_min = min;
tm.tm_hour = hour;
int i = -1;
void *data;
size_t n = list->count;
- void **items = XCALLOC(MTYPE_TMP, (sizeof(void *)) * n);
+ void **items;
int (*realcmp)(const void *, const void *) =
(int (*)(const void *, const void *))cmp;
+ if (!n)
+ return;
+
+ items = XCALLOC(MTYPE_TMP, (sizeof(void *)) * n);
+
for (ALL_LIST_ELEMENTS(list, ln, nn, data)) {
items[++i] = data;
list_delete_node(list, ln);
strlcpy(entry->xpath, newpath, sizeof(entry->xpath));
XFREE(MTYPE_TMP, newpath);
- hash_get(running_config_entries, entry, hash_alloc_intern);
+ (void)hash_get(running_config_entries, entry,
+ hash_alloc_intern);
}
list_delete(&entries);
}
/* If n includes p prefix then return 1 else return 0. */
-int prefix_match(const struct prefix *n, const struct prefix *p)
+int prefix_match(union prefixconstptr unet, union prefixconstptr upfx)
{
+ const struct prefix *n = unet.p;
+ const struct prefix *p = upfx.p;
int offset;
int shift;
const uint8_t *np, *pp;
}
/* If n includes p then return 1 else return 0. Prefix mask is not considered */
-int prefix_match_network_statement(const struct prefix *n,
- const struct prefix *p)
+int prefix_match_network_statement(union prefixconstptr unet,
+ union prefixconstptr upfx)
{
+ const struct prefix *n = unet.p;
+ const struct prefix *p = upfx.p;
int offset;
int shift;
const uint8_t *np, *pp;
* address families don't match, return -1; otherwise the return value is
* in range 0 ... maximum prefix length for the address family.
*/
-int prefix_common_bits(const struct prefix *p1, const struct prefix *p2)
+int prefix_common_bits(union prefixconstptr ua, union prefixconstptr ub)
{
+ const struct prefix *p1 = ua.p;
+ const struct prefix *p2 = ub.p;
int pos, bit;
int length = 0;
uint8_t xor ;
}
/* Return prefix family type string. */
-const char *prefix_family_str(const struct prefix *p)
+const char *prefix_family_str(union prefixconstptr pu)
{
+ const struct prefix *p = pu.p;
+
if (p->family == AF_INET)
return "inet";
if (p->family == AF_INET6)
}
}
-void apply_mask(struct prefix *p)
+void apply_mask(union prefixptr pu)
{
+ struct prefix *p = pu.p;
+
switch (p->family) {
case AF_INET:
- apply_mask_ipv4((struct prefix_ipv4 *)p);
+ apply_mask_ipv4(pu.p4);
break;
case AF_INET6:
- apply_mask_ipv6((struct prefix_ipv6 *)p);
+ apply_mask_ipv6(pu.p6);
break;
default:
break;
sizeof(struct in6_addr));
}
-int prefix_blen(const struct prefix *p)
+int prefix_blen(union prefixconstptr pu)
{
+ const struct prefix *p = pu.p;
+
switch (p->family) {
case AF_INET:
return IPV4_MAX_BYTELEN;
return 0;
}
-/* Prefix for a generic pointer */
-struct prefix_ptr {
- uint8_t family;
- uint16_t prefixlen;
- uintptr_t prefix __attribute__((aligned(8)));
-};
-
/* Prefix for a Flowspec entry */
struct prefix_fs {
uint8_t family;
* Function to handle prefix_free being used as a del function.
*/
extern void prefix_free_lists(void *arg);
-extern const char *prefix_family_str(const struct prefix *);
-extern int prefix_blen(const struct prefix *);
+extern const char *prefix_family_str(union prefixconstptr pu);
+extern int prefix_blen(union prefixconstptr pu);
extern int str2prefix(const char *, struct prefix *);
#define PREFIX2STR_BUFFER PREFIX_STRLEN
extern const char *prefix2str(union prefixconstptr, char *, int);
extern int evpn_type5_prefix_match(const struct prefix *evpn_pfx,
const struct prefix *match_pfx);
-extern int prefix_match(const struct prefix *, const struct prefix *);
-extern int prefix_match_network_statement(const struct prefix *,
- const struct prefix *);
-extern int prefix_same(union prefixconstptr, union prefixconstptr);
-extern int prefix_cmp(union prefixconstptr, union prefixconstptr);
-extern int prefix_common_bits(const struct prefix *, const struct prefix *);
-extern void prefix_copy(union prefixptr, union prefixconstptr);
-extern void apply_mask(struct prefix *);
+extern int prefix_match(union prefixconstptr unet, union prefixconstptr upfx);
+extern int prefix_match_network_statement(union prefixconstptr unet,
+ union prefixconstptr upfx);
+extern int prefix_same(union prefixconstptr ua, union prefixconstptr ub);
+extern int prefix_cmp(union prefixconstptr ua, union prefixconstptr ub);
+extern int prefix_common_bits(union prefixconstptr ua, union prefixconstptr ub);
+extern void prefix_copy(union prefixptr udst, union prefixconstptr usrc);
+extern void apply_mask(union prefixptr pu);
#ifdef __clang_analyzer__
/* clang-SA doesn't understand transparent unions, making it think that the
return NULL;
// map.deleted is false via memset
- memset(&tmp_map, 0, sizeof(struct route_map));
+ memset(&tmp_map, 0, sizeof(tmp_map));
tmp_map.name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, name);
map = hash_lookup(route_map_master_hash, &tmp_map);
XFREE(MTYPE_ROUTE_MAP_NAME, tmp_map.name);
* with deleted=true
*/
if (!map) {
- memset(&tmp_map, 0, sizeof(struct route_map));
+ memset(&tmp_map, 0, sizeof(tmp_map));
tmp_map.name = XSTRDUP(MTYPE_ROUTE_MAP_NAME, name);
tmp_map.deleted = true;
map = hash_lookup(route_map_master_hash, &tmp_map);
if (!dep->this_hash)
dep->this_hash = upd8_hash;
- memset(&pentry_dep, 0, sizeof(struct route_map_pentry_dep));
+ memset(&pentry_dep, 0, sizeof(pentry_dep));
pentry_dep.pentry = pentry;
pentry_dep.plist_name = affected_name;
pentry_dep.event = event;
struct route_map_dep *dep = bucket->data;
struct route_map_dep_data *dep_data = NULL, tmp_dep_data;
- memset(&tmp_dep_data, 0, sizeof(struct route_map_dep_data));
+ memset(&tmp_dep_data, 0, sizeof(tmp_dep_data));
tmp_dep_data.rname = arg;
dep_data = hash_release(dep->dep_rmap_hash, &tmp_dep_data);
if (dep_data) {
if (!dep->this_hash)
dep->this_hash = dephash;
- memset(&tmp_dep_data, 0, sizeof(struct route_map_dep_data));
+ memset(&tmp_dep_data, 0, sizeof(tmp_dep_data));
tmp_dep_data.rname = rname;
dep_data = hash_lookup(dep->dep_rmap_hash, &tmp_dep_data);
if (!dep_data)
goto out;
}
- memset(&tmp_dep_data, 0, sizeof(struct route_map_dep_data));
+ memset(&tmp_dep_data, 0, sizeof(tmp_dep_data));
tmp_dep_data.rname = rname;
dep_data = hash_lookup(dep->dep_rmap_hash, &tmp_dep_data);
/*
if (su->sa.sa_family == AF_INET6
&& IN6_IS_ADDR_V4MAPPED(&su->sin6.sin6_addr)) {
- memset(&sin, 0, sizeof(struct sockaddr_in));
+ memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_port = su->sin6.sin6_port;
memcpy(&sin.sin_addr, ((char *)&su->sin6.sin6_addr) + 12, 4);
if (flags & TIMEFMT_SKIP)
return 0;
+ if (!ts)
+ return bputch(buf, '-');
if (flags & TIMEFMT_REALTIME)
*real_ts = *ts;
if (flags & TIMEFMT_SKIP)
return 0;
+ if (!ts)
+ return bputch(buf, '-');
if (flags & TIMEFMT_ABSOLUTE) {
struct timespec anchor[1];
{
const struct timespec *ts = vptr;
- if (!ts)
- return bputs(buf, "(null)");
return printfrr_time(buf, ea, ts, 0);
}
struct timespec ts;
if (!tv)
- return bputs(buf, "(null)");
+ return printfrr_time(buf, ea, NULL, 0);
ts.tv_sec = tv->tv_sec;
ts.tv_nsec = tv->tv_usec * 1000;
struct timespec ts;
if (!tt)
- return bputs(buf, "(null)");
+ return printfrr_time(buf, ea, NULL, TIMEFMT_SECONDS);
ts.tv_sec = *tt;
ts.tv_nsec = 0;
{
struct prefix_ipv4 p;
- memset(&p, 0, sizeof(struct prefix_ipv4));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET;
p.prefixlen = IPV4_MAX_BITLEN;
p.prefix = *addr;
{
struct prefix_ipv6 p;
- memset(&p, 0, sizeof(struct prefix_ipv6));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET6;
p.prefixlen = IPV6_MAX_BITLEN;
p.prefix = *addr;
int sock;
char port_str[BUFSIZ];
- memset(&req, 0, sizeof(struct addrinfo));
+ memset(&req, 0, sizeof(req));
req.ai_flags = AI_PASSIVE;
req.ai_family = AF_UNSPEC;
req.ai_socktype = SOCK_STREAM;
}
/* Make server socket. */
- memset(&serv, 0, sizeof(struct sockaddr_un));
+ memset(&serv, 0, sizeof(serv));
serv.sun_family = AF_UNIX;
strlcpy(serv.sun_path, path, sizeof(serv.sun_path));
#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN
vty_event_serv(VTYSH_SERV, vtyserv);
- memset(&client, 0, sizeof(struct sockaddr_un));
+ memset(&client, 0, sizeof(client));
client_len = sizeof(struct sockaddr_un);
sock = accept(accept_sock, (struct sockaddr *)&client,
struct structname *ptr = VTY_GET_CONTEXT(structname); \
VTY_CHECK_CONTEXT(ptr);
+#define VTY_DECLVAR_CONTEXT_VRF(vrfptr) \
+ struct vrf *vrfptr; \
+ if (vty->node == CONFIG_NODE) \
+ vrfptr = vrf_lookup_by_id(VRF_DEFAULT); \
+ else \
+ vrfptr = VTY_GET_CONTEXT(vrf); \
+ VTY_CHECK_CONTEXT(vrfptr); \
+ MACRO_REQUIRE_SEMICOLON() /* end */
+
/* XPath macros. */
#define VTY_PUSH_XPATH(nodeval, value) \
do { \
stream_reset(s);
zclient_create_header(s, command, VRF_DEFAULT);
- stream_write(s, pw->ifname, IF_NAMESIZE);
+ stream_write(s, pw->ifname, INTERFACE_NAMSIZ);
stream_putl(s, pw->ifindex);
/* Put type */
s = zclient->ibuf;
/* Get data. */
- stream_get(pw->ifname, s, IF_NAMESIZE);
+ stream_get(pw->ifname, s, INTERFACE_NAMSIZ);
STREAM_GETL(s, pw->ifindex);
STREAM_GETL(s, pw->status);
};
struct zapi_pw {
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
int type;
int af;
};
struct zapi_pw_status {
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
uint32_t status;
};
if (++p->next_request_id == 0)
p->next_request_id = 1;
r->cb = cb;
- hash_get(p->reqid_hash, r, hash_alloc_intern);
+ (void)hash_get(p->reqid_hash, r, hash_alloc_intern);
}
return r->request_id;
}
if (fd < 0)
return -1;
- memset(&addr, 0, sizeof(struct sockaddr_un));
+ memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
if (CHECK_FLAG(external->bits_metric, OSPF6_ASBR_BIT_F)) {
offset = sizeof(*external)
+ OSPF6_PREFIX_SPACE(external->prefix.prefix_length);
- memset(&fwd_addr, 0, sizeof(struct prefix));
+ memset(&fwd_addr, 0, sizeof(fwd_addr));
fwd_addr.family = AF_INET6;
fwd_addr.prefixlen = IPV6_MAX_BITLEN;
memcpy(&fwd_addr.u.prefix6, (caddr_t)external + offset,
ospf6_link_route_to_aggr(struct ospf6_external_aggr_rt *aggr,
struct ospf6_route *rt)
{
- hash_get(aggr->match_extnl_hash, rt, hash_alloc_intern);
+ (void)hash_get(aggr->match_extnl_hash, rt, hash_alloc_intern);
rt->aggr_route = aggr;
}
uint32_t i;
result = XCALLOC(MTYPE_OSPF6_AUTH_HASH_XOR, len);
- if (!result)
- return NULL;
for (i = 0; i < len; i++)
result[i] = mes1[i] ^ mes2[i];
return OSPF6_AUTH_VALIDATE_FAILURE;
}
- memset(&ospf6_auth_info, 0, sizeof(struct ospf6_auth_hdr));
+ memset(&ospf6_auth_info, 0, sizeof(ospf6_auth_info));
if ((*pkt_len - hdr_len - (*lls_block_len)) > sizeof(ospf6_auth_info)) {
if (IS_OSPF6_DEBUG_AUTH_RX)
zlog_err("RECV[%s] : Wrong auth data in %s packet",
int sum = 0;
lsah = (struct ospf6_lsa_header *)lsa->header;
+ if (ntohs(lsah->length) <= OSPF6_LSA_HEADER_SIZE) {
+ if (IS_DEBUG_OSPF6_GR)
+ zlog_debug("%s: undersized (%u B) lsa", __func__,
+ ntohs(lsah->length));
+ return OSPF6_FAILURE;
+ }
length = ntohs(lsah->length) - OSPF6_LSA_HEADER_SIZE;
} else {
/* Add the routerid to the enable router hash table */
- hash_get(ospf6->ospf6_helper_cfg.enable_rtr_list, &temp,
- ospf6_enable_rtr_hash_alloc);
+ (void)hash_get(ospf6->ospf6_helper_cfg.enable_rtr_list, &temp,
+ ospf6_enable_rtr_hash_alloc);
}
}
int sum = 0;
lsah = (struct ospf6_lsa_header *)lsa->header;
+ if (ntohs(lsah->length) <= OSPF6_LSA_HEADER_SIZE) {
+ if (IS_DEBUG_OSPF6_GR)
+ zlog_debug("%s: undersized (%u B) lsa", __func__,
+ ntohs(lsah->length));
+ return OSPF6_FAILURE;
+ }
length = ntohs(lsah->length) - OSPF6_LSA_HEADER_SIZE;
/* Schedule Hello */
if (!CHECK_FLAG(oi->flag, OSPF6_INTERFACE_PASSIVE)
&& !if_is_loopback(oi->interface)) {
- thread_add_event(master, ospf6_hello_send, oi, 0,
+ thread_add_timer(master, ospf6_hello_send, oi, 0,
&oi->thread_send_hello);
}
ospf6_interface_state_str[oi->state], oi->transdelay,
oi->priority);
vty_out(vty, " Timer intervals configured:\n");
- vty_out(vty, " Hello %d, Dead %d, Retransmit %d\n",
- oi->hello_interval, oi->dead_interval,
- oi->rxmt_interval);
+ vty_out(vty, " Hello %d(%pTHd), Dead %d, Retransmit %d\n",
+ oi->hello_interval, oi->thread_send_hello,
+ oi->dead_interval, oi->rxmt_interval);
}
inet_ntop(AF_INET, &oi->drouter, drouter, sizeof(drouter));
oi->hello_interval = strmatch(argv[0]->text, "no")
? OSPF_HELLO_INTERVAL_DEFAULT
: strtoul(argv[idx_number]->arg, NULL, 10);
+
+ /*
+ * If the thread is scheduled, send the new hello now.
+ */
+ if (thread_is_scheduled(oi->thread_send_hello)) {
+ THREAD_OFF(oi->thread_send_hello);
+
+ thread_add_timer(master, ospf6_hello_send, oi, 0,
+ &oi->thread_send_hello);
+ }
return CMD_SUCCESS;
}
/* don't send hellos over loopback interface */
if (!if_is_loopback(oi->interface))
- thread_add_event(master, ospf6_hello_send, oi, 0,
+ thread_add_timer(master, ospf6_hello_send, oi, 0,
&oi->thread_send_hello);
return CMD_SUCCESS;
break;
prefix_num--;
- memset(&prefix, 0, sizeof(struct prefix));
+ memset(&prefix, 0, sizeof(prefix));
prefix.family = AF_INET6;
prefix.prefixlen = op->prefix_length;
ospf6_prefix_in6_addr(&prefix.u.prefix6, intra_prefix_lsa, op);
uint16_t length = OSPF6_HEADER_SIZE;
oi = (struct ospf6_interface *)THREAD_ARG(thread);
- oi->thread_send_hello = (struct thread *)NULL;
if (oi->state <= OSPF6_INTERFACE_DOWN) {
if (IS_OSPF6_DEBUG_MESSAGE(OSPF6_MESSAGE_TYPE_HELLO, SEND_HDR))
memset(&cmsgbuf, 0, sizeof(cmsgbuf));
scmsgp = (struct cmsghdr *)&cmsgbuf;
pktinfo = (struct in6_pktinfo *)(CMSG_DATA(scmsgp));
- memset(&dst_sin6, 0, sizeof(struct sockaddr_in6));
+ memset(&dst_sin6, 0, sizeof(dst_sin6));
/* source address */
pktinfo->ipi6_ifindex = ifindex;
rcmsgp = (struct cmsghdr *)cmsgbuf;
pktinfo = (struct in6_pktinfo *)(CMSG_DATA(rcmsgp));
- memset(&src_sin6, 0, sizeof(struct sockaddr_in6));
+ memset(&src_sin6, 0, sizeof(src_sin6));
/* receive control msg */
rcmsgp->cmsg_level = IPPROTO_IPV6;
int arg_end = use_json ? (argc - 1) : argc;
json_object *json = NULL;
- memset(&prefix, 0, sizeof(struct prefix));
+ memset(&prefix, 0, sizeof(prefix));
if (use_json)
json = json_object_new_object();
int i, ret;
struct prefix router, id, prefix;
- memset(&router, 0, sizeof(struct prefix));
- memset(&id, 0, sizeof(struct prefix));
- memset(&prefix, 0, sizeof(struct prefix));
+ memset(&router, 0, sizeof(router));
+ memset(&id, 0, sizeof(id));
+ memset(&prefix, 0, sizeof(prefix));
for (i = idx_ipv4; i < argc; i++) {
if (strmatch(argv[i]->text, "detail")) {
void ospf6_master_init(struct thread_master *master)
{
- memset(&ospf6_master, 0, sizeof(struct ospf6_master));
+ memset(&ospf6_master, 0, sizeof(ospf6_master));
om6 = &ospf6_master;
om6->ospf6 = list_new();
{
struct in6_addr addr_zero;
- memset(&addr_zero, 0, sizeof(struct in6_addr));
+ memset(&addr_zero, 0, sizeof(addr_zero));
/* Default prefix validation*/
- if ((is_default_prefix((struct prefix *)p))
- || (!memcmp(&p->u.prefix6, &addr_zero, sizeof(struct in6_addr)))) {
+ if ((is_default_prefix(p)) ||
+ (!memcmp(&p->u.prefix6, &addr_zero, sizeof(struct in6_addr)))) {
vty_out(vty, "Default address should not be configured as summary address.\n");
return false;
}
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!ospf6_is_valid_summary_addr(vty, &p))
return CMD_WARNING_CONFIG_FAILED;
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!ospf6_is_valid_summary_addr(vty, &p))
return CMD_WARNING_CONFIG_FAILED;
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!ospf6_is_valid_summary_addr(vty, &p))
return CMD_WARNING_CONFIG_FAILED;
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!ospf6_is_valid_summary_addr(vty, &p))
return CMD_WARNING_CONFIG_FAILED;
if (!zclient || zclient->sock < 0 || !ospf6)
return 1;
- memset(&api, 0, sizeof(struct zapi_cap));
+ memset(&api, 0, sizeof(api));
api.cap = command;
api.stale_removal_time = stale_time;
api.vrf_id = ospf6->vrf_id;
/* Prepare socket for asynchronous messages */
/* Initialize async address structure */
- memset(&myaddr_async, 0, sizeof(struct sockaddr_in));
+ memset(&myaddr_async, 0, sizeof(myaddr_async));
myaddr_async.sin_family = AF_INET;
myaddr_async.sin_addr.s_addr = htonl(INADDR_ANY);
myaddr_async.sin_port = htons(syncport + 1);
want the sync port number on a fixed port number. The reverse
async channel will be at this port+1 */
- memset(&myaddr_sync, 0, sizeof(struct sockaddr_in));
+ memset(&myaddr_sync, 0, sizeof(myaddr_sync));
myaddr_sync.sin_family = AF_INET;
myaddr_sync.sin_port = htons(syncport);
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
/* Get port address and port number of peer to make reverse connection.
The reverse channel uses the port number of the peer port+1. */
- memset(&peer_sync, 0, sizeof(struct sockaddr_in));
+ memset(&peer_sync, 0, sizeof(peer_sync));
peerlen = sizeof(struct sockaddr_in);
ret = getpeername(new_sync_sock, (struct sockaddr *)&peer_sync,
"%s: Linking extrenal route(%pI4/%d) to aggregator(%pI4/%d)",
__func__, &ei->p.prefix, ei->p.prefixlen,
&aggr->p.prefix, aggr->p.prefixlen);
- hash_get(aggr->match_extnl_hash, ei, hash_alloc_intern);
+ (void)hash_get(aggr->match_extnl_hash, ei, hash_alloc_intern);
ei->aggr_route = aggr;
}
}
/* Prepare the extrenal_info for aggregator */
- memset(&ei_aggr, 0, sizeof(struct external_info));
+ memset(&ei_aggr, 0, sizeof(ei_aggr));
ei_aggr.p = aggr->p;
ei_aggr.tag = aggr->tag;
ei_aggr.type = 0;
aggr->action = OSPF_ROUTE_AGGR_NONE;
/* Prepare the extrenal_info for aggregator */
- memset(&ei_aggr, 0, sizeof(struct external_info));
+ memset(&ei_aggr, 0, sizeof(ei_aggr));
ei_aggr.p = aggr->p;
ei_aggr.tag = aggr->tag;
ei_aggr.type = 0;
{
int rc = 0;
- memset(&OspfEXT, 0, sizeof(struct ospf_ext_lp));
+ memset(&OspfEXT, 0, sizeof(OspfEXT));
OspfEXT.enabled = false;
/* Only Area flooding is supported yet */
OspfEXT.scope = OSPF_OPAQUE_AREA_LSA;
} else {
/* Add the routerid to the enable router hash table */
- hash_get(ospf->enable_rtr_list, &temp,
- ospf_enable_rtr_hash_alloc);
+ (void)hash_get(ospf->enable_rtr_list, &temp,
+ ospf_enable_rtr_hash_alloc);
}
}
if (!CHECK_FLAG(ldp_sync_info->flags, LDP_SYNC_FLAG_IF_CONFIG))
ldp_sync_info->enabled = LDP_IGP_SYNC_DEFAULT;
if (remove) {
- ldp_sync_info_free((struct ldp_sync_info **)&(ldp_sync_info));
+ ldp_sync_info_free(&ldp_sync_info);
params->ldp_sync_info = NULL;
}
}
struct route_node *rn;
struct prefix lsa_prefix;
- memset(&lsa_prefix, 0, sizeof(struct prefix));
+ memset(&lsa_prefix, 0, sizeof(lsa_prefix));
lsa_prefix.family = AF_UNSPEC;
lsa_prefix.prefixlen = sizeof(lsa_prefix.u.ptr) * CHAR_BIT;
lsa_prefix.u.ptr = (uintptr_t)lsa;
return;
}
- memset(&lsa_prefix, 0, sizeof(struct prefix));
+ memset(&lsa_prefix, 0, sizeof(lsa_prefix));
lsa_prefix.family = AF_UNSPEC;
lsa_prefix.prefixlen = sizeof(lsa_prefix.u.ptr) * CHAR_BIT;
lsa_prefix.u.ptr = (uintptr_t)lsa;
if (aggr) {
struct external_info ei_aggr;
- memset(&ei_aggr, 0,
- sizeof(struct external_info));
+ memset(&ei_aggr, 0, sizeof(ei_aggr));
ei_aggr.p = aggr->p;
ei_aggr.tag = aggr->tag;
ei_aggr.instance = ospf->instance;
table = lsdb->type[type].db;
- memset(&lp, 0, sizeof(struct prefix_ls));
+ memset(&lp, 0, sizeof(lp));
lp.family = AF_UNSPEC;
lp.prefixlen = 64;
lp.id = id;
table = lsdb->type[type].db;
- memset(&lp, 0, sizeof(struct prefix_ls));
+ memset(&lp, 0, sizeof(lp));
lp.family = AF_UNSPEC;
lp.prefixlen = 64;
lp.id = id;
/* reset get pointer */
stream_set_getp(op->s, 0);
- memset(&iph, 0, sizeof(struct ip));
+ memset(&iph, 0, sizeof(iph));
memset(&sa_dst, 0, sizeof(sa_dst));
sa_dst.sin_family = AF_INET;
char buff[CMSG_SPACE(SOPT_SIZE_CMSG_IFINDEX_IPV4())];
struct msghdr msgh;
- memset(&msgh, 0, sizeof(struct msghdr));
+ memset(&msgh, 0, sizeof(msgh));
msgh.msg_iov = &iov;
msgh.msg_iovlen = 1;
msgh.msg_control = (caddr_t)buff;
zlog_info("RI (%s): Initialize Router Information", __func__);
- memset(&OspfRI, 0, sizeof(struct ospf_router_info));
+ memset(&OspfRI, 0, sizeof(OspfRI));
OspfRI.enabled = false;
OspfRI.registered = 0;
OspfRI.scope = OSPF_OPAQUE_AS_LSA;
== MATCH_FAILED)
return NULL;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
area = ospfAreaLookup(v, name, length, &addr, exact);
if (!area)
== MATCH_FAILED)
return NULL;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
area = ospfStubAreaLookup(v, name, length, &addr, exact);
if (!area)
/* INDEX { ospfLsdbAreaId, ospfLsdbType,
ospfLsdbLsid, ospfLsdbRouterId } */
- memset(&area_id, 0, sizeof(struct in_addr));
+ memset(&area_id, 0, sizeof(area_id));
type = 0;
- memset(&ls_id, 0, sizeof(struct in_addr));
- memset(&router_id, 0, sizeof(struct in_addr));
+ memset(&ls_id, 0, sizeof(ls_id));
+ memset(&router_id, 0, sizeof(router_id));
/* Check OSPF instance. */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
if (ospf == NULL)
return NULL;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
nbr_nbma = ospfHostLookup(v, name, length, &addr, exact);
if (nbr_nbma == NULL)
return NULL;
ifindex = 0;
- memset(&ifaddr, 0, sizeof(struct in_addr));
+ memset(&ifaddr, 0, sizeof(ifaddr));
/* Check OSPF instance. */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
return NULL;
ifindex = 0;
- memset(&ifaddr, 0, sizeof(struct in_addr));
+ memset(&ifaddr, 0, sizeof(ifaddr));
/* Check OSPF instance. */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
struct prefix_ls lp;
struct route_node *rn;
- memset(&lp, 0, sizeof(struct prefix_ls));
+ memset(&lp, 0, sizeof(lp));
lp.family = AF_UNSPEC;
lp.prefixlen = 64;
lp.id = vl_data->vl_area_id;
struct prefix_ls lp;
struct route_node *rn;
- memset(&lp, 0, sizeof(struct prefix_ls));
+ memset(&lp, 0, sizeof(lp));
lp.family = AF_UNSPEC;
lp.prefixlen = 64;
lp.id = vl_data->vl_area_id;
struct route_node *rn;
struct ospf_vl_data *vl_data;
- memset(&lp, 0, sizeof(struct prefix_ls));
+ memset(&lp, 0, sizeof(lp));
lp.family = AF_UNSPEC;
lp.prefixlen = 64;
lp.id = *area_id;
struct route_node *rn;
struct ospf_vl_data *vl_data;
- memset(&lp, 0, sizeof(struct prefix_ls));
+ memset(&lp, 0, sizeof(lp));
lp.family = AF_UNSPEC;
lp.prefixlen = 64;
lp.id = *area_id;
== MATCH_FAILED)
return NULL;
- memset(&area_id, 0, sizeof(struct in_addr));
- memset(&neighbor, 0, sizeof(struct in_addr));
+ memset(&area_id, 0, sizeof(area_id));
+ memset(&neighbor, 0, sizeof(neighbor));
vl_data = ospfVirtIfLookup(v, name, length, &area_id, &neighbor, exact);
if (!vl_data)
== MATCH_FAILED)
return NULL;
- memset(&nbr_addr, 0, sizeof(struct in_addr));
+ memset(&nbr_addr, 0, sizeof(nbr_addr));
ifindex = 0;
nbr = ospfNbrLookup(v, name, length, &nbr_addr, &ifindex, exact);
== MATCH_FAILED)
return NULL;
- memset(&area_id, 0, sizeof(struct in_addr));
- memset(&neighbor, 0, sizeof(struct in_addr));
+ memset(&area_id, 0, sizeof(area_id));
+ memset(&neighbor, 0, sizeof(neighbor));
/* Check OSPF instance. */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
return NULL;
type = OSPF_AS_EXTERNAL_LSA;
- memset(&ls_id, 0, sizeof(struct in_addr));
- memset(&router_id, 0, sizeof(struct in_addr));
+ memset(&ls_id, 0, sizeof(ls_id));
+ memset(&router_id, 0, sizeof(router_id));
/* Check OSPF instance. */
ospf = ospf_lookup_by_vrf_id(VRF_DEFAULT);
osr_debug("SR (%s): Initialize SR Data Base", __func__);
- memset(&OspfSR, 0, sizeof(struct ospf_sr_db));
+ memset(&OspfSR, 0, sizeof(OspfSR));
OspfSR.status = SR_OFF;
/* Only AREA flooding is supported in this release */
OspfSR.scope = OSPF_OPAQUE_AREA_LSA;
srn = (struct sr_node *)hash_get(OspfSR.neighbors,
&lsah->adv_router,
(void *)sr_node_new);
- /* Sanity check */
- if (srn == NULL) {
- flog_err(
- EC_OSPF_SR_NODE_CREATE,
- "SR (%s): Abort! can't create SR node in hash table",
- __func__);
- return;
- }
/* update LSA ID */
srn->instance = ntohl(lsah->id.s_addr);
/* Copy SRGB */
(void *)&(lsah->adv_router),
(void *)sr_node_new);
- /* Sanity check */
- if (srn == NULL) {
- flog_err(EC_OSPF_SR_NODE_CREATE,
- "SR (%s): Abort! can't create SR node in hash table",
- __func__);
- return;
- }
-
/* Initialize TLV browsing */
length = lsa->size - OSPF_LSA_HEADER_SIZE;
for (tlvh = TLV_HDR_TOP(lsah); length > 0 && tlvh;
srn = (struct sr_node *)hash_get(OspfSR.neighbors,
(void *)&(lsah->adv_router),
(void *)sr_node_new);
-
- /* Sanity check */
- if (srn == NULL) {
- flog_err(EC_OSPF_SR_NODE_CREATE,
- "SR (%s): Abort! can't create SR node in hash table",
- __func__);
- return;
- }
-
/* Initialize TLV browsing */
length = lsa->size - OSPF_LSA_HEADER_SIZE;
for (tlvh = TLV_HDR_TOP(lsah); length > 0 && tlvh;
return rc;
}
- memset(&OspfMplsTE, 0, sizeof(struct ospf_mpls_te));
+ memset(&OspfMplsTE, 0, sizeof(OspfMplsTE));
OspfMplsTE.enabled = false;
OspfMplsTE.export = false;
OspfMplsTE.inter_as = Off;
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!is_valid_summary_addr(&p)) {
vty_out(vty, "Not a valid summary address.\n");
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!is_valid_summary_addr(&p)) {
vty_out(vty, "Not a valid summary address.\n");
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!is_valid_summary_addr(&p)) {
vty_out(vty, "Not a valid summary address.\n");
}
/* Apply mask for given prefix. */
- apply_mask((struct prefix *)&p);
+ apply_mask(&p);
if (!is_valid_summary_addr(&p)) {
vty_out(vty, "Not a valid summary address.\n");
if (!zclient || zclient->sock < 0 || !ospf)
return 1;
- memset(&api, 0, sizeof(struct zapi_cap));
+ memset(&api, 0, sizeof(api));
api.cap = command;
api.stale_removal_time = stale_time;
api.vrf_id = ospf->vrf_id;
void ospf_master_init(struct thread_master *master)
{
- memset(&ospf_master, 0, sizeof(struct ospf_master));
+ memset(&ospf_master, 0, sizeof(ospf_master));
om = &ospf_master;
om->ospf = list_new();
/* Verify the PCE has the IP set */
struct in6_addr zero_v6_addr;
- memset(&zero_v6_addr, 0, sizeof(struct in6_addr));
+ memset(&zero_v6_addr, 0, sizeof(zero_v6_addr));
if (memcmp(&pce_opts->addr.ip, &zero_v6_addr, IPADDRSZ(&pce_opts->addr))
== 0) {
vty_out(vty,
RB_FOREACH (pbrm, pbr_map_entry_head, &pbr_maps) {
for (ALL_LIST_ELEMENTS_RO(pbrm->incoming, inode, pmi)) {
- if (strncmp(pmi->ifp->name, ifname, INTERFACE_NAMSIZ)
- != 0)
+ if (strcmp(pmi->ifp->name, ifname) != 0)
continue;
if (ppmi)
nhrcvi.nhrc);
nhrcvi.nhrc->nexthop.vrf_id =
pbr_vrf_id(pnhi->pbr_vrf);
- hash_get(pbr_nhrc_hash, nhrcvi.nhrc,
- hash_alloc_intern);
+ (void)hash_get(pbr_nhrc_hash,
+ nhrcvi.nhrc,
+ hash_alloc_intern);
pbr_send_rnh(&nhrcvi.nhrc->nexthop, true);
}
} while (nhrcvi.nhrc);
if (pnhi.pnhc) {
pnhi.pnhc->nexthop.vrf_id = pbr_vrf_id(pbr_vrf);
- hash_get(pnhgc->nhh, pnhi.pnhc, hash_alloc_intern);
+ (void)hash_get(pnhgc->nhh, pnhi.pnhc,
+ hash_alloc_intern);
} else
pnhc->nexthop.vrf_id = pbr_vrf_id(pbr_vrf);
if (nhrc) {
hash_release(pbr_nhrc_hash, nhrc);
nhrc->nexthop.ifindex = ifp->ifindex;
- hash_get(pbr_nhrc_hash, nhrc, hash_alloc_intern);
+ (void)hash_get(pbr_nhrc_hash, nhrc, hash_alloc_intern);
}
pnhi.pnhc->nexthop.ifindex = ifp->ifindex;
- hash_get(pnhgc->nhh, pnhi.pnhc, hash_alloc_intern);
+ (void)hash_get(pnhgc->nhh, pnhi.pnhc, hash_alloc_intern);
pbr_map_check_interface_nh_group_change(pnhgc->name, ifp,
old_ifindex);
nhgc->table_id = pbr_next_unallocated_table_id;
/* Mark table id as allocated in id-indexed hash */
- hash_get(pbr_nhg_allocated_id_hash, nhgc, hash_alloc_intern);
+ (void)hash_get(pbr_nhg_allocated_id_hash, nhgc, hash_alloc_intern);
/* Pre-compute the next unallocated table id */
pbr_nht_update_next_unallocated_table_id();
}
obj->nai_list = dll_initialize();
/* Since the IP has to be stored in the list, copy it so the caller
- * doesnt have any restrictions about the type of memory used externally
- * for the IP. This memory will be freed with the object is freed. */
+ * doesn't have any restrictions about the type of memory used
+ * externally for the IP. This memory will be freed with the object is
+ * freed. */
struct in_addr *ipv4_node_id_copy =
pceplib_malloc(PCEPLIB_MESSAGES, sizeof(struct in_addr));
ipv4_node_id_copy->s_addr = ipv4_node_id->s_addr;
struct pcep_object_bandwidth *bandwidth =
(struct pcep_object_bandwidth *)hdr;
uint32_t *uint32_ptr = (uint32_t *)obj_body_buf;
- /* Seems like the compiler doesnt correctly copy the float, so memcpy()
+ /* Seems like the compiler doesn't correctly copy the float, so memcpy()
* it */
memcpy(uint32_ptr, &(bandwidth->bandwidth), sizeof(uint32_t));
*uint32_ptr = htonl(*uint32_ptr);
| (metric->flag_b ? OBJECT_METRIC_FLAC_B : 0x00));
obj_body_buf[3] = metric->type;
uint32_t *uint32_ptr = (uint32_t *)(obj_body_buf + 4);
- /* Seems like the compiler doesnt correctly copy the float, so memcpy()
+ /* Seems like the compiler doesn't correctly copy the float, so memcpy()
* it */
memcpy(uint32_ptr, &(metric->value), sizeof(uint32_t));
*uint32_ptr = htonl(*uint32_ptr);
hdr, sizeof(struct pcep_object_bandwidth));
uint32_t value = ntohl(*((uint32_t *)obj_buf));
- /* Seems like the compiler doesnt correctly copy to the float, so
+ /* Seems like the compiler doesn't correctly copy to the float, so
* memcpy() it */
memcpy(&obj->bandwidth, &value, sizeof(uint32_t));
obj->flag_c = (obj_buf[2] & OBJECT_METRIC_FLAC_C);
obj->type = obj_buf[3];
uint32_t value = ntohl(*((uint32_t *)(obj_buf + 4)));
- /* Seems like the compiler doesnt correctly copy to the float, so
+ /* Seems like the compiler doesn't correctly copy to the float, so
* memcpy() it */
memcpy(&obj->value, &value, sizeof(uint32_t));
return (struct pcep_object_tlv_header *)ipv4;
} else {
ipv4->is_ipv4 = false;
- struct pcep_object_tlv_srpag_pol_id *ipv6 =
- (struct pcep_object_tlv_srpag_pol_id *)ipv4;
+ struct pcep_object_tlv_srpag_pol_id *ipv6 = ipv4;
ipv6->color = ntohl(uint32_ptr[0]);
decode_ipv6(&uint32_ptr[1], &ipv6->end_point.ipv6);
return (struct pcep_object_tlv_header *)ipv6;
int setup_signals()
{
struct sigaction sa;
- memset(&sa, 0, sizeof(struct sigaction));
+ memset(&sa, 0, sizeof(sa));
sa.sa_handler = handle_signal_action;
if (sigaction(SIGINT, &sa, 0) != 0) {
perror("sigaction()");
if (comm_session->close_after_write == true) {
if (comm_session->message_queue->num_entries == 0) {
/* TODO check to make sure modifying the
- * write_list while iterating it doesnt cause
+ * write_list while iterating it doesn't cause
* problems. */
pcep_log(
LOG_DEBUG,
void test_handle_socket_comm_event_null_params()
{
- /* Verify it doesnt core dump */
+ /* Verify it doesn't core dump */
handle_socket_comm_event(NULL);
verify_socket_comm_times_called(0, 0, 0, 0, 0, 0, 0);
reset_mock_socket_comm_info();
#include "pim_nb.h"
#include "pim_addr.h"
#include "pim_nht.h"
-
+#include "pim_bsm.h"
+#include "pim_iface.h"
+#include "pim_zebra.h"
+#include "pim_instance.h"
#ifndef VTYSH_EXTRACT_PL
#include "pimd/pim6_cmd_clippy.c"
#endif
+static struct cmd_node debug_node = {
+ .name = "debug",
+ .node = DEBUG_NODE,
+ .prompt = "",
+ .config_write = pim_debug_config_write,
+};
+
DEFPY (ipv6_pim_joinprune_time,
ipv6_pim_joinprune_time_cmd,
"ipv6 pim join-prune-interval (1-65535)$jpi",
DEFPY (interface_ipv6_pim,
interface_ipv6_pim_cmd,
- "ipv6 pim",
+ "ipv6 pim [passive$passive]",
IPV6_STR
- PIM_STR)
+ PIM_STR
+ "Disable exchange of protocol packets\n")
{
- return pim_process_ip_pim_cmd(vty);
+ int ret;
+
+ ret = pim_process_ip_pim_cmd(vty);
+
+ if (ret != NB_OK)
+ return ret;
+
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, true);
+
+ return CMD_SUCCESS;
}
DEFPY (interface_no_ipv6_pim,
interface_no_ipv6_pim_cmd,
- "no ipv6 pim",
+ "no ipv6 pim [passive$passive]",
NO_STR
IPV6_STR
- PIM_STR)
+ PIM_STR
+ "Disable exchange of protocol packets\n")
{
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, false);
+
return pim_process_no_ip_pim_cmd(vty);
}
"Configure group limit for watermark warning\n"
"Group count to generate watermark warning\n")
{
- PIM_DECLVAR_CONTEXT(vrf, pim);
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
+
/* TBD Depends on MLD data structure changes */
+ (void)pim;
+
return CMD_SUCCESS;
}
"Unconfigure group limit for watermark warning\n"
IGNORED_IN_NO_STR)
{
- PIM_DECLVAR_CONTEXT(vrf, pim);
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
+
/* TBD Depends on MLD data structure changes */
+ (void)pim;
+
return CMD_SUCCESS;
}
IPV6_STR
IFACE_MLD_STR
IFACE_MLD_QUERY_MAX_RESPONSE_TIME_STR
- "Query response value in deci-seconds\n")
+ "Query response value in milliseconds\n")
{
return gm_process_query_max_response_time_cmd(vty, qmrt_str);
}
return CMD_SUCCESS;
}
+DEFPY (show_ipv6_mroute,
+ show_ipv6_mroute_cmd,
+ "show ipv6 mroute [vrf NAME] [X:X::X:X$s_or_g [X:X::X:X$g]] [fill$fill] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "The Source or Group\n"
+ "The Group\n"
+ "Fill in Assumed data\n"
+ JSON_STR)
+{
+ pim_sgaddr sg = {0};
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = pim_get_pim_instance(v->vrf_id);
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ if (!pim_addr_is_any(s_or_g)) {
+ if (!pim_addr_is_any(g)) {
+ sg.src = s_or_g;
+ sg.grp = g;
+ } else
+ sg.grp = s_or_g;
+ }
+
+ show_mroute(pim, vty, &sg, !!fill, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ipv6_mroute_vrf_all,
+ show_ipv6_mroute_vrf_all_cmd,
+ "show ipv6 mroute vrf all [fill$fill] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Fill in Assumed data\n"
+ JSON_STR)
+{
+ pim_sgaddr sg = {0};
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+ show_mroute(vrf->info, vty, &sg, !!fill, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ipv6_mroute_count,
+ show_ipv6_mroute_count_cmd,
+ "show ipv6 mroute [vrf NAME] count [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n"
+ JSON_STR)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = pim_get_pim_instance(v->vrf_id);
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ show_mroute_count(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ipv6_mroute_count_vrf_all,
+ show_ipv6_mroute_count_vrf_all_cmd,
+ "show ipv6 mroute vrf all count [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n"
+ JSON_STR)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+ show_mroute_count(vrf->info, vty, json_vrf);
+
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ipv6_mroute_summary,
+ show_ipv6_mroute_summary_cmd,
+ "show ipv6 mroute [vrf NAME] summary [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n"
+ JSON_STR)
+{
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
+
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = pim_get_pim_instance(v->vrf_id);
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ show_mroute_summary(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (show_ipv6_mroute_summary_vrf_all,
+ show_ipv6_mroute_summary_vrf_all_cmd,
+ "show ipv6 mroute vrf all summary [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Summary of all mroutes\n"
+ JSON_STR)
+{
+ struct vrf *vrf;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ if (!json)
+ vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
+
+ show_mroute_summary(vrf->info, vty, json_vrf);
+
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
+ }
+
+ if (json)
+ vty_json(vty, json_parent);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_pim_statistics,
+ clear_ipv6_pim_statistics_cmd,
+ "clear ipv6 pim statistics [vrf NAME]$name",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Reset PIM statistics\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_pim_statistics(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_mroute,
+ clear_ipv6_mroute_cmd,
+ "clear ipv6 mroute [vrf NAME]$name",
+ CLEAR_STR
+ IPV6_STR
+ "Reset multicast routes\n"
+ VRF_CMD_HELP_STR)
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ clear_mroute(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_pim_oil,
+ clear_ipv6_pim_oil_cmd,
+ "clear ipv6 pim [vrf NAME]$name oil",
+ CLEAR_STR
+ IPV6_STR
+ CLEAR_IP_PIM_STR
+ VRF_CMD_HELP_STR
+ "Rescan PIMv6 OIL (output interface list)\n")
+{
+ struct vrf *v = pim_cmd_lookup(vty, name);
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim_scan_oil(v->info);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY (clear_ipv6_mroute_count,
+ clear_ipv6_mroute_count_cmd,
+ "clear ipv6 mroute [vrf NAME]$name count",
+ CLEAR_STR
+ IPV6_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n")
+{
+ return clear_ip_mroute_count_command(vty, name);
+}
+
+DEFPY (debug_pimv6,
+ debug_pimv6_cmd,
+ "[no] debug pimv6",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR)
+{
+ if (!no)
+ return pim_debug_pim_cmd();
+ else
+ return pim_no_debug_pim_cmd();
+}
+
+DEFPY (debug_pimv6_nht,
+ debug_pimv6_nht_cmd,
+ "[no] debug pimv6 nht",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ "Nexthop Tracking\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT;
+ else
+ PIM_DONT_DEBUG_PIM_NHT;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_nht_det,
+ debug_pimv6_nht_det_cmd,
+ "[no] debug pimv6 nht detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ "Nexthop Tracking\n"
+ "Detailed Information\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_NHT_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_events,
+ debug_pimv6_events_cmd,
+ "[no] debug pimv6 events",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_EVENTS_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_EVENTS;
+ else
+ PIM_DONT_DEBUG_PIM_EVENTS;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_packets,
+ debug_pimv6_packets_cmd,
+ "[no] debug pimv6 packets [<hello$hello|joins$joins|register$registers>]",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_PACKETS_STR
+ DEBUG_PIMV6_HELLO_PACKETS_STR
+ DEBUG_PIMV6_J_P_PACKETS_STR
+ DEBUG_PIMV6_PIM_REG_PACKETS_STR)
+{
+ if (!no)
+ return pim_debug_pim_packets_cmd(hello, joins, registers, vty);
+ else
+ return pim_no_debug_pim_packets_cmd(hello, joins, registers,
+ vty);
+}
+
+DEFPY (debug_pimv6_packetdump_send,
+ debug_pimv6_packetdump_send_cmd,
+ "[no] debug pimv6 packet-dump send",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_PACKETDUMP_STR
+ DEBUG_PIMV6_PACKETDUMP_SEND_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_SEND;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_packetdump_recv,
+ debug_pimv6_packetdump_recv_cmd,
+ "[no] debug pimv6 packet-dump receive",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_PACKETDUMP_STR
+ DEBUG_PIMV6_PACKETDUMP_RECV_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_RECV;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_trace,
+ debug_pimv6_trace_cmd,
+ "[no] debug pimv6 trace",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_TRACE_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_trace_detail,
+ debug_pimv6_trace_detail_cmd,
+ "[no] debug pimv6 trace detail",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_TRACE_STR
+ "Detailed Information\n")
+{
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE_DETAIL;
+ return CMD_SUCCESS;
+}
+
+DEFPY (debug_pimv6_zebra,
+ debug_pimv6_zebra_cmd,
+ "[no] debug pimv6 zebra",
+ NO_STR
+ DEBUG_STR
+ DEBUG_PIMV6_STR
+ DEBUG_PIMV6_ZEBRA_STR)
+{
+ if (!no)
+ PIM_DO_DEBUG_ZEBRA;
+ else
+ PIM_DONT_DEBUG_ZEBRA;
+ return CMD_SUCCESS;
+}
+
void pim_cmd_init(void)
{
if_cmd_init(pim_interface_config_write);
+ install_node(&debug_node);
+
install_element(CONFIG_NODE, &ipv6_pim_joinprune_time_cmd);
install_element(CONFIG_NODE, &no_ipv6_pim_joinprune_time_cmd);
install_element(CONFIG_NODE, &ipv6_pim_spt_switchover_infinity_cmd);
install_element(VIEW_NODE, &show_ipv6_multicast_vrf_all_cmd);
install_element(VIEW_NODE, &show_ipv6_multicast_count_cmd);
install_element(VIEW_NODE, &show_ipv6_multicast_count_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_count_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_count_vrf_all_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_summary_cmd);
+ install_element(VIEW_NODE, &show_ipv6_mroute_summary_vrf_all_cmd);
+
+ install_element(ENABLE_NODE, &clear_ipv6_pim_statistics_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_mroute_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_pim_oil_cmd);
+ install_element(ENABLE_NODE, &clear_ipv6_mroute_count_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_nht_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_nht_det_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_events_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_packets_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_packetdump_send_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_packetdump_recv_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_trace_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_trace_detail_cmd);
+ install_element(ENABLE_NODE, &debug_pimv6_zebra_cmd);
+
+ install_element(CONFIG_NODE, &debug_pimv6_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_nht_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_nht_det_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_events_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_packets_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_packetdump_send_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_packetdump_recv_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_trace_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_trace_detail_cmd);
+ install_element(CONFIG_NODE, &debug_pimv6_zebra_cmd);
}
#define IFACE_PIM_HELLO_TIME_STR "Time in seconds for Hello Interval\n"
#define IFACE_PIM_HELLO_HOLD_STR "Time in seconds for Hold Interval\n"
#define MROUTE_STR "IP multicast routing table\n"
+#define CLEAR_IP_PIM_STR "PIM clear commands\n"
#define DEBUG_MLD_STR "MLD protocol activity\n"
#define DEBUG_MLD_EVENTS_STR "MLD protocol events\n"
#define DEBUG_MLD_PACKETS_STR "MLD protocol packets\n"
#define DEBUG_MLD_TRACE_STR "MLD internal daemon activity\n"
#define CONF_SSMPINGD_STR "Enable ssmpingd operation\n"
+#define DEBUG_PIMV6_STR "PIMv6 protocol activity\n"
+#define DEBUG_PIMV6_EVENTS_STR "PIMv6 protocol events\n"
+#define DEBUG_PIMV6_PACKETS_STR "PIMv6 protocol packets\n"
+#define DEBUG_PIMV6_HELLO_PACKETS_STR "PIMv6 Hello protocol packets\n"
+#define DEBUG_PIMV6_J_P_PACKETS_STR "PIMv6 Join/Prune protocol packets\n"
+#define DEBUG_PIMV6_PIM_REG_PACKETS_STR \
+ "PIMv6 Register/Reg-Stop protocol packets\n"
+#define DEBUG_PIMV6_PACKETDUMP_STR "PIMv6 packet dump\n"
+#define DEBUG_PIMV6_PACKETDUMP_SEND_STR "Dump sent packets\n"
+#define DEBUG_PIMV6_PACKETDUMP_RECV_STR "Dump received packets\n"
+#define DEBUG_PIMV6_TRACE_STR "PIMv6 internal daemon activity\n"
+#define DEBUG_PIMV6_ZEBRA_STR "ZEBRA protocol activity\n"
void pim_cmd_init(void);
#include "pim_zebra.h"
#include "pim_nb.h"
#include "pim6_cmd.h"
+#include "pim6_mld.h"
zebra_capabilities_t _caps_p[] = {
ZCAP_SYS_ADMIN,
);
/* clang-format on */
-
int main(int argc, char **argv, char **envp)
{
static struct option longopts[] = {
*/
pim_iface_init();
+ gm_cli_init();
+
pim_zebra_init();
#if 0
pim_bfd_init();
--- /dev/null
+/*
+ * PIMv6 MLD querier
+ * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * keep pim6_mld.h open when working on this code. Most data structures are
+ * commented in the header.
+ *
+ * IPv4 support is pre-planned but hasn't been tackled yet. It is intended
+ * that this code will replace the old IGMP querier at some point.
+ */
+
+#include <zebra.h>
+#include <netinet/ip6.h>
+
+#include "lib/memory.h"
+#include "lib/jhash.h"
+#include "lib/prefix.h"
+#include "lib/checksum.h"
+#include "lib/thread.h"
+
+#include "pimd/pim6_mld.h"
+#include "pimd/pim6_mld_protocol.h"
+#include "pimd/pim_memory.h"
+#include "pimd/pim_instance.h"
+#include "pimd/pim_iface.h"
+#include "pimd/pim_util.h"
+#include "pimd/pim_tib.h"
+#include "pimd/pimd.h"
+
+#ifndef IPV6_MULTICAST_ALL
+#define IPV6_MULTICAST_ALL 29
+#endif
+
+DEFINE_MTYPE_STATIC(PIMD, GM_IFACE, "MLD interface");
+DEFINE_MTYPE_STATIC(PIMD, GM_PACKET, "MLD packet");
+DEFINE_MTYPE_STATIC(PIMD, GM_SUBSCRIBER, "MLD subscriber");
+DEFINE_MTYPE_STATIC(PIMD, GM_STATE, "MLD subscription state");
+DEFINE_MTYPE_STATIC(PIMD, GM_SG, "MLD (S,G)");
+DEFINE_MTYPE_STATIC(PIMD, GM_GRP_PENDING, "MLD group query state");
+DEFINE_MTYPE_STATIC(PIMD, GM_GSQ_PENDING, "MLD group/source query aggregate");
+
+static void gm_t_query(struct thread *t);
+static void gm_trigger_specific(struct gm_sg *sg);
+static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
+ struct timeval expire_wait);
+
+/* shorthand for log messages */
+#define log_ifp(msg) \
+ "[MLD %s:%s] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name
+#define log_pkt_src(msg) \
+ "[MLD %s:%s %pI6] " msg, gm_ifp->ifp->vrf->name, gm_ifp->ifp->name, \
+ &pkt_src->sin6_addr
+#define log_sg(sg, msg) \
+ "[MLD %s:%s %pSG] " msg, sg->iface->ifp->vrf->name, \
+ sg->iface->ifp->name, &sg->sgaddr
+
+/* clang-format off */
+#if PIM_IPV == 6
+static const pim_addr gm_all_hosts = {
+ .s6_addr = {
+ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+};
+static const pim_addr gm_all_routers = {
+ .s6_addr = {
+ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16,
+ },
+};
+/* MLDv1 does not allow subscriber tracking due to report suppression
+ * hence, the source address is replaced with ffff:...:ffff
+ */
+static const pim_addr gm_dummy_untracked = {
+ .s6_addr = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ },
+};
+#else
+/* 224.0.0.1 */
+static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), };
+/* 224.0.0.22 */
+static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), };
+static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, };
+#endif
+/* clang-format on */
+
+#define IPV6_MULTICAST_SCOPE_LINK 2
+
+static inline uint8_t in6_multicast_scope(const pim_addr *addr)
+{
+ return addr->s6_addr[1] & 0xf;
+}
+
+static inline bool in6_multicast_nofwd(const pim_addr *addr)
+{
+ return in6_multicast_scope(addr) <= IPV6_MULTICAST_SCOPE_LINK;
+}
+
+/*
+ * (S,G) -> subscriber,(S,G)
+ */
+
+static int gm_packet_sg_cmp(const struct gm_packet_sg *a,
+ const struct gm_packet_sg *b)
+{
+ const struct gm_packet_state *s_a, *s_b;
+
+ s_a = gm_packet_sg2state(a);
+ s_b = gm_packet_sg2state(b);
+ return IPV6_ADDR_CMP(&s_a->subscriber->addr, &s_b->subscriber->addr);
+}
+
+DECLARE_RBTREE_UNIQ(gm_packet_sg_subs, struct gm_packet_sg, subs_itm,
+ gm_packet_sg_cmp);
+
+static struct gm_packet_sg *gm_packet_sg_find(struct gm_sg *sg,
+ enum gm_sub_sense sense,
+ struct gm_subscriber *sub)
+{
+ struct {
+ struct gm_packet_state hdr;
+ struct gm_packet_sg item;
+ } ref = {
+ /* clang-format off */
+ .hdr = {
+ .subscriber = sub,
+ },
+ .item = {
+ .offset = 0,
+ },
+ /* clang-format on */
+ };
+
+ return gm_packet_sg_subs_find(&sg->subs[sense], &ref.item);
+}
+
+/*
+ * interface -> (*,G),pending
+ */
+
+static int gm_grp_pending_cmp(const struct gm_grp_pending *a,
+ const struct gm_grp_pending *b)
+{
+ return IPV6_ADDR_CMP(&a->grp, &b->grp);
+}
+
+DECLARE_RBTREE_UNIQ(gm_grp_pends, struct gm_grp_pending, itm,
+ gm_grp_pending_cmp);
+
+/*
+ * interface -> ([S1,S2,...],G),pending
+ */
+
+static int gm_gsq_pending_cmp(const struct gm_gsq_pending *a,
+ const struct gm_gsq_pending *b)
+{
+ if (a->s_bit != b->s_bit)
+ return numcmp(a->s_bit, b->s_bit);
+
+ return IPV6_ADDR_CMP(&a->grp, &b->grp);
+}
+
+static uint32_t gm_gsq_pending_hash(const struct gm_gsq_pending *a)
+{
+ uint32_t seed = a->s_bit ? 0x68f0eb5e : 0x156b7f19;
+
+ return jhash(&a->grp, sizeof(a->grp), seed);
+}
+
+DECLARE_HASH(gm_gsq_pends, struct gm_gsq_pending, itm, gm_gsq_pending_cmp,
+ gm_gsq_pending_hash);
+
+/*
+ * interface -> (S,G)
+ */
+
+static int gm_sg_cmp(const struct gm_sg *a, const struct gm_sg *b)
+{
+ return pim_sgaddr_cmp(a->sgaddr, b->sgaddr);
+}
+
+DECLARE_RBTREE_UNIQ(gm_sgs, struct gm_sg, itm, gm_sg_cmp);
+
+static struct gm_sg *gm_sg_find(struct gm_if *gm_ifp, pim_addr grp,
+ pim_addr src)
+{
+ struct gm_sg ref = {};
+
+ ref.sgaddr.grp = grp;
+ ref.sgaddr.src = src;
+ return gm_sgs_find(gm_ifp->sgs, &ref);
+}
+
+static struct gm_sg *gm_sg_make(struct gm_if *gm_ifp, pim_addr grp,
+ pim_addr src)
+{
+ struct gm_sg *ret, *prev;
+
+ ret = XCALLOC(MTYPE_GM_SG, sizeof(*ret));
+ ret->sgaddr.grp = grp;
+ ret->sgaddr.src = src;
+ ret->iface = gm_ifp;
+ prev = gm_sgs_add(gm_ifp->sgs, ret);
+
+ if (prev) {
+ XFREE(MTYPE_GM_SG, ret);
+ ret = prev;
+ } else {
+ monotime(&ret->created);
+ gm_packet_sg_subs_init(ret->subs_positive);
+ gm_packet_sg_subs_init(ret->subs_negative);
+ }
+ return ret;
+}
+
+/*
+ * interface -> packets, sorted by expiry (because add_tail insert order)
+ */
+
+DECLARE_DLIST(gm_packet_expires, struct gm_packet_state, exp_itm);
+
+/*
+ * subscriber -> packets
+ */
+
+DECLARE_DLIST(gm_packets, struct gm_packet_state, pkt_itm);
+
+/*
+ * interface -> subscriber
+ */
+
+static int gm_subscriber_cmp(const struct gm_subscriber *a,
+ const struct gm_subscriber *b)
+{
+ return IPV6_ADDR_CMP(&a->addr, &b->addr);
+}
+
+static uint32_t gm_subscriber_hash(const struct gm_subscriber *a)
+{
+ return jhash(&a->addr, sizeof(a->addr), 0xd0e94ad4);
+}
+
+DECLARE_HASH(gm_subscribers, struct gm_subscriber, itm, gm_subscriber_cmp,
+ gm_subscriber_hash);
+
+static struct gm_subscriber *gm_subscriber_findref(struct gm_if *gm_ifp,
+ pim_addr addr)
+{
+ struct gm_subscriber ref = {}, *ret;
+
+ ref.addr = addr;
+ ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
+ if (ret)
+ ret->refcount++;
+ return ret;
+}
+
+static struct gm_subscriber *gm_subscriber_get(struct gm_if *gm_ifp,
+ pim_addr addr)
+{
+ struct gm_subscriber ref = {}, *ret;
+
+ ref.addr = addr;
+ ret = gm_subscribers_find(gm_ifp->subscribers, &ref);
+
+ if (!ret) {
+ ret = XCALLOC(MTYPE_GM_SUBSCRIBER, sizeof(*ret));
+ ret->iface = gm_ifp;
+ ret->addr = addr;
+ ret->refcount = 1;
+ monotime(&ret->created);
+ gm_packets_init(ret->packets);
+
+ gm_subscribers_add(gm_ifp->subscribers, ret);
+ }
+ return ret;
+}
+
+static void gm_subscriber_drop(struct gm_subscriber **subp)
+{
+ struct gm_subscriber *sub = *subp;
+ struct gm_if *gm_ifp;
+
+ if (!sub)
+ return;
+ gm_ifp = sub->iface;
+
+ *subp = NULL;
+ sub->refcount--;
+
+ if (sub->refcount)
+ return;
+
+ gm_subscribers_del(gm_ifp->subscribers, sub);
+ XFREE(MTYPE_GM_SUBSCRIBER, sub);
+}
+
+/****************************************************************************/
+
+/* bundle query timer values for combined v1/v2 handling */
+struct gm_query_timers {
+ unsigned int qrv;
+ unsigned int max_resp_ms;
+ unsigned int qqic_ms;
+
+ struct timeval fuzz;
+ struct timeval expire_wait;
+};
+
+static void gm_expiry_calc(struct gm_query_timers *timers)
+{
+ unsigned int expire =
+ (timers->qrv - 1) * timers->qqic_ms + timers->max_resp_ms;
+ ldiv_t exp_div = ldiv(expire, 1000);
+
+ timers->expire_wait.tv_sec = exp_div.quot;
+ timers->expire_wait.tv_usec = exp_div.rem * 1000;
+ timeradd(&timers->expire_wait, &timers->fuzz, &timers->expire_wait);
+}
+
+static void gm_sg_free(struct gm_sg *sg)
+{
+ /* t_sg_expiry is handled before this is reached */
+ THREAD_OFF(sg->t_sg_query);
+ gm_packet_sg_subs_fini(sg->subs_negative);
+ gm_packet_sg_subs_fini(sg->subs_positive);
+ XFREE(MTYPE_GM_SG, sg);
+}
+
+/* clang-format off */
+static const char *const gm_states[] = {
+ [GM_SG_NOINFO] = "NOINFO",
+ [GM_SG_JOIN] = "JOIN",
+ [GM_SG_JOIN_EXPIRING] = "JOIN_EXPIRING",
+ [GM_SG_PRUNE] = "PRUNE",
+ [GM_SG_NOPRUNE] = "NOPRUNE",
+ [GM_SG_NOPRUNE_EXPIRING] = "NOPRUNE_EXPIRING",
+};
+/* clang-format on */
+
+CPP_NOTICE("TODO: S,G entries in EXCLUDE (i.e. prune) unsupported");
+/* tib_sg_gm_prune() below is an "un-join", it doesn't prune S,G when *,G is
+ * joined. Whether we actually want/need to support this is a separate
+ * question - it is almost never used. In fact this is exactly what RFC5790
+ * ("lightweight" MLDv2) does: it removes S,G EXCLUDE support.
+ */
+
+static void gm_sg_update(struct gm_sg *sg, bool has_expired)
+{
+ struct gm_if *gm_ifp = sg->iface;
+ enum gm_sg_state prev, desired;
+ bool new_join;
+ struct gm_sg *grp = NULL;
+
+ if (!pim_addr_is_any(sg->sgaddr.src))
+ grp = gm_sg_find(gm_ifp, sg->sgaddr.grp, PIMADDR_ANY);
+ else
+ assert(sg->state != GM_SG_PRUNE);
+
+ if (gm_packet_sg_subs_count(sg->subs_positive)) {
+ desired = GM_SG_JOIN;
+ assert(!sg->t_sg_expire);
+ } else if ((sg->state == GM_SG_JOIN ||
+ sg->state == GM_SG_JOIN_EXPIRING) &&
+ !has_expired)
+ desired = GM_SG_JOIN_EXPIRING;
+ else if (!grp || !gm_packet_sg_subs_count(grp->subs_positive))
+ desired = GM_SG_NOINFO;
+ else if (gm_packet_sg_subs_count(grp->subs_positive) ==
+ gm_packet_sg_subs_count(sg->subs_negative)) {
+ if ((sg->state == GM_SG_NOPRUNE ||
+ sg->state == GM_SG_NOPRUNE_EXPIRING) &&
+ !has_expired)
+ desired = GM_SG_NOPRUNE_EXPIRING;
+ else
+ desired = GM_SG_PRUNE;
+ } else if (gm_packet_sg_subs_count(sg->subs_negative))
+ desired = GM_SG_NOPRUNE;
+ else
+ desired = GM_SG_NOINFO;
+
+ if (desired != sg->state && !gm_ifp->stopping) {
+ if (PIM_DEBUG_IGMP_EVENTS)
+ zlog_debug(log_sg(sg, "%s => %s"), gm_states[sg->state],
+ gm_states[desired]);
+
+ if (desired == GM_SG_JOIN_EXPIRING ||
+ desired == GM_SG_NOPRUNE_EXPIRING) {
+ struct gm_query_timers timers;
+
+ timers.qrv = gm_ifp->cur_qrv;
+ timers.max_resp_ms = gm_ifp->cur_max_resp;
+ timers.qqic_ms = gm_ifp->cur_query_intv_trig;
+ timers.fuzz = gm_ifp->cfg_timing_fuzz;
+
+ gm_expiry_calc(&timers);
+ gm_sg_timer_start(gm_ifp, sg, timers.expire_wait);
+
+ THREAD_OFF(sg->t_sg_query);
+ sg->n_query = gm_ifp->cur_qrv;
+ sg->query_sbit = false;
+ gm_trigger_specific(sg);
+ }
+ }
+ prev = sg->state;
+ sg->state = desired;
+
+ if (in6_multicast_nofwd(&sg->sgaddr.grp) || gm_ifp->stopping)
+ new_join = false;
+ else
+ new_join = gm_sg_state_want_join(desired);
+
+ if (new_join && !sg->tib_joined) {
+ /* this will retry if join previously failed */
+ sg->tib_joined = tib_sg_gm_join(gm_ifp->pim, sg->sgaddr,
+ gm_ifp->ifp, &sg->oil);
+ if (!sg->tib_joined)
+ zlog_warn(
+ "MLD join for %pSG%%%s not propagated into TIB",
+ &sg->sgaddr, gm_ifp->ifp->name);
+ else
+ zlog_info(log_ifp("%pSG%%%s TIB joined"), &sg->sgaddr,
+ gm_ifp->ifp->name);
+
+ } else if (sg->tib_joined && !new_join) {
+ tib_sg_gm_prune(gm_ifp->pim, sg->sgaddr, gm_ifp->ifp, &sg->oil);
+
+ sg->oil = NULL;
+ sg->tib_joined = false;
+ }
+
+ if (desired == GM_SG_NOINFO) {
+ assertf((!sg->t_sg_expire &&
+ !gm_packet_sg_subs_count(sg->subs_positive) &&
+ !gm_packet_sg_subs_count(sg->subs_negative)),
+ "%pSG%%%s hx=%u exp=%pTHD state=%s->%s pos=%zu neg=%zu grp=%p",
+ &sg->sgaddr, gm_ifp->ifp->name, has_expired,
+ sg->t_sg_expire, gm_states[prev], gm_states[desired],
+ gm_packet_sg_subs_count(sg->subs_positive),
+ gm_packet_sg_subs_count(sg->subs_negative), grp);
+
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_sg(sg, "dropping"));
+
+ gm_sgs_del(gm_ifp->sgs, sg);
+ gm_sg_free(sg);
+ }
+}
+
+/****************************************************************************/
+
+/* the following bunch of functions deals with transferring state from
+ * received packets into gm_packet_state. As a reminder, the querier is
+ * structured to keep all items received in one packet together, since they
+ * will share expiry timers and thus allows efficient handling.
+ */
+
+static void gm_packet_free(struct gm_packet_state *pkt)
+{
+ gm_packet_expires_del(pkt->iface->expires, pkt);
+ gm_packets_del(pkt->subscriber->packets, pkt);
+ gm_subscriber_drop(&pkt->subscriber);
+ XFREE(MTYPE_GM_STATE, pkt);
+}
+
+static struct gm_packet_sg *gm_packet_sg_setup(struct gm_packet_state *pkt,
+ struct gm_sg *sg, bool is_excl,
+ bool is_src)
+{
+ struct gm_packet_sg *item;
+
+ assert(pkt->n_active < pkt->n_sg);
+
+ item = &pkt->items[pkt->n_active];
+ item->sg = sg;
+ item->is_excl = is_excl;
+ item->is_src = is_src;
+ item->offset = pkt->n_active;
+
+ pkt->n_active++;
+ return item;
+}
+
+static bool gm_packet_sg_drop(struct gm_packet_sg *item)
+{
+ struct gm_packet_state *pkt;
+ size_t i;
+
+ assert(item->sg);
+
+ pkt = gm_packet_sg2state(item);
+ if (item->sg->most_recent == item)
+ item->sg->most_recent = NULL;
+
+ for (i = 0; i < item->n_exclude; i++) {
+ struct gm_packet_sg *excl_item;
+
+ excl_item = item + 1 + i;
+ if (!excl_item->sg)
+ continue;
+
+ gm_packet_sg_subs_del(excl_item->sg->subs_negative, excl_item);
+ excl_item->sg = NULL;
+ pkt->n_active--;
+
+ assert(pkt->n_active > 0);
+ }
+
+ if (item->is_excl && item->is_src)
+ gm_packet_sg_subs_del(item->sg->subs_negative, item);
+ else
+ gm_packet_sg_subs_del(item->sg->subs_positive, item);
+ item->sg = NULL;
+ pkt->n_active--;
+
+ if (!pkt->n_active) {
+ gm_packet_free(pkt);
+ return true;
+ }
+ return false;
+}
+
+static void gm_packet_drop(struct gm_packet_state *pkt, bool trace)
+{
+ for (size_t i = 0; i < pkt->n_sg; i++) {
+ struct gm_sg *sg = pkt->items[i].sg;
+ bool deleted;
+
+ if (!sg)
+ continue;
+
+ if (trace && PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_sg(sg, "general-dropping from %pPA"),
+ &pkt->subscriber->addr);
+ deleted = gm_packet_sg_drop(&pkt->items[i]);
+
+ gm_sg_update(sg, true);
+ if (deleted)
+ break;
+ }
+}
+
+static void gm_packet_sg_remove_sources(struct gm_if *gm_ifp,
+ struct gm_subscriber *subscriber,
+ pim_addr grp, pim_addr *srcs,
+ size_t n_src, enum gm_sub_sense sense)
+{
+ struct gm_sg *sg;
+ struct gm_packet_sg *old_src;
+ size_t i;
+
+ for (i = 0; i < n_src; i++) {
+ sg = gm_sg_find(gm_ifp, grp, srcs[i]);
+ if (!sg)
+ continue;
+
+ old_src = gm_packet_sg_find(sg, sense, subscriber);
+ if (!old_src)
+ continue;
+
+ gm_packet_sg_drop(old_src);
+ gm_sg_update(sg, false);
+ }
+}
+
+static void gm_sg_expiry_cancel(struct gm_sg *sg)
+{
+ if (sg->t_sg_expire && PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_sg(sg, "alive, cancelling expiry timer"));
+ THREAD_OFF(sg->t_sg_expire);
+ sg->query_sbit = true;
+}
+
+/* first pass: process all changes resulting in removal of state:
+ * - {TO,IS}_INCLUDE removes *,G EXCLUDE state (and S,G)
+ * - ALLOW_NEW_SOURCES, if *,G in EXCLUDE removes S,G state
+ * - BLOCK_OLD_SOURCES, if *,G in INCLUDE removes S,G state
+ * - {TO,IS}_EXCLUDE, if *,G in INCLUDE removes S,G state
+ * note *replacing* state is NOT considered *removing* state here
+ *
+ * everything else is thrown into pkt for creation of state in pass 2
+ */
+static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
+ struct mld_v2_rec_hdr *rechdr)
+{
+ /* NB: pkt->subscriber can be NULL here if the subscriber was not
+ * previously seen!
+ */
+ struct gm_subscriber *subscriber = pkt->subscriber;
+ struct gm_sg *grp;
+ struct gm_packet_sg *old_grp = NULL;
+ struct gm_packet_sg *item;
+ size_t n_src = ntohs(rechdr->n_src);
+ size_t j;
+ bool is_excl = false;
+
+ grp = gm_sg_find(pkt->iface, rechdr->grp, PIMADDR_ANY);
+ if (grp && subscriber)
+ old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
+
+ assert(old_grp == NULL || old_grp->is_excl);
+
+ switch (rechdr->type) {
+ case MLD_RECTYPE_IS_EXCLUDE:
+ case MLD_RECTYPE_CHANGE_TO_EXCLUDE:
+ /* this always replaces or creates state */
+ is_excl = true;
+ if (!grp)
+ grp = gm_sg_make(pkt->iface, rechdr->grp, PIMADDR_ANY);
+
+ item = gm_packet_sg_setup(pkt, grp, is_excl, false);
+ item->n_exclude = n_src;
+
+ /* [EXCL_INCL_SG_NOTE] referenced below
+ *
+ * in theory, we should drop any S,G that the host may have
+ * previously added in INCLUDE mode. In practice, this is both
+ * incredibly rare and entirely irrelevant. It only makes any
+ * difference if an S,G that the host previously had on the
+ * INCLUDE list is now on the blocked list for EXCLUDE, which
+ * we can cover in processing the S,G list in pass2_excl().
+ *
+ * Other S,G from the host are simply left to expire
+ * "naturally" through general expiry.
+ */
+ break;
+
+ case MLD_RECTYPE_IS_INCLUDE:
+ case MLD_RECTYPE_CHANGE_TO_INCLUDE:
+ if (old_grp) {
+ /* INCLUDE has no *,G state, so old_grp here refers to
+ * previous EXCLUDE => delete it
+ */
+ gm_packet_sg_drop(old_grp);
+ gm_sg_update(grp, false);
+ CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
+ }
+ break;
+
+ case MLD_RECTYPE_ALLOW_NEW_SOURCES:
+ if (old_grp) {
+ /* remove S,Gs from EXCLUDE, and then we're done */
+ gm_packet_sg_remove_sources(pkt->iface, subscriber,
+ rechdr->grp, rechdr->srcs,
+ n_src, GM_SUB_NEG);
+ return;
+ }
+ /* in INCLUDE mode => ALLOW_NEW_SOURCES is functionally
+ * idential to IS_INCLUDE (because the list of sources in
+ * IS_INCLUDE is not exhaustive)
+ */
+ break;
+
+ case MLD_RECTYPE_BLOCK_OLD_SOURCES:
+ if (old_grp) {
+ /* this is intentionally not implemented because it
+ * would be complicated as hell. we only take the list
+ * of blocked sources from full group state records
+ */
+ return;
+ }
+
+ if (subscriber)
+ gm_packet_sg_remove_sources(pkt->iface, subscriber,
+ rechdr->grp, rechdr->srcs,
+ n_src, GM_SUB_POS);
+ return;
+ }
+
+ for (j = 0; j < n_src; j++) {
+ struct gm_sg *sg;
+
+ sg = gm_sg_find(pkt->iface, rechdr->grp, rechdr->srcs[j]);
+ if (!sg)
+ sg = gm_sg_make(pkt->iface, rechdr->grp,
+ rechdr->srcs[j]);
+
+ gm_packet_sg_setup(pkt, sg, is_excl, true);
+ }
+}
+
+/* second pass: creating/updating/refreshing state. All the items from the
+ * received packet have already been thrown into gm_packet_state.
+ */
+
+static void gm_handle_v2_pass2_incl(struct gm_packet_state *pkt, size_t i)
+{
+ struct gm_packet_sg *item = &pkt->items[i];
+ struct gm_packet_sg *old = NULL;
+ struct gm_sg *sg = item->sg;
+
+ /* EXCLUDE state was already dropped in pass1 */
+ assert(!gm_packet_sg_find(sg, GM_SUB_NEG, pkt->subscriber));
+
+ old = gm_packet_sg_find(sg, GM_SUB_POS, pkt->subscriber);
+ if (old)
+ gm_packet_sg_drop(old);
+
+ pkt->n_active++;
+ gm_packet_sg_subs_add(sg->subs_positive, item);
+
+ sg->most_recent = item;
+ gm_sg_expiry_cancel(sg);
+ gm_sg_update(sg, false);
+}
+
+static void gm_handle_v2_pass2_excl(struct gm_packet_state *pkt, size_t offs)
+{
+ struct gm_packet_sg *item = &pkt->items[offs];
+ struct gm_packet_sg *old_grp, *item_dup;
+ struct gm_sg *sg_grp = item->sg;
+ size_t i;
+
+ old_grp = gm_packet_sg_find(sg_grp, GM_SUB_POS, pkt->subscriber);
+ if (old_grp) {
+ for (i = 0; i < item->n_exclude; i++) {
+ struct gm_packet_sg *item_src, *old_src;
+
+ item_src = &pkt->items[offs + 1 + i];
+ old_src = gm_packet_sg_find(item_src->sg, GM_SUB_NEG,
+ pkt->subscriber);
+ if (old_src)
+ gm_packet_sg_drop(old_src);
+
+ /* See [EXCL_INCL_SG_NOTE] above - we can have old S,G
+ * items left over if the host previously had INCLUDE
+ * mode going. Remove them here if we find any.
+ */
+ old_src = gm_packet_sg_find(item_src->sg, GM_SUB_POS,
+ pkt->subscriber);
+ if (old_src)
+ gm_packet_sg_drop(old_src);
+ }
+
+ /* the previous loop has removed the S,G entries which are
+ * still excluded after this update. So anything left on the
+ * old item was previously excluded but is now included
+ * => need to trigger update on S,G
+ */
+ for (i = 0; i < old_grp->n_exclude; i++) {
+ struct gm_packet_sg *old_src;
+ struct gm_sg *old_sg_src;
+
+ old_src = old_grp + 1 + i;
+ old_sg_src = old_src->sg;
+ if (!old_sg_src)
+ continue;
+
+ gm_packet_sg_drop(old_src);
+ gm_sg_update(old_sg_src, false);
+ }
+
+ gm_packet_sg_drop(old_grp);
+ }
+
+ item_dup = gm_packet_sg_subs_add(sg_grp->subs_positive, item);
+ assert(!item_dup);
+ pkt->n_active++;
+
+ sg_grp->most_recent = item;
+ gm_sg_expiry_cancel(sg_grp);
+
+ for (i = 0; i < item->n_exclude; i++) {
+ struct gm_packet_sg *item_src;
+
+ item_src = &pkt->items[offs + 1 + i];
+ item_dup = gm_packet_sg_subs_add(item_src->sg->subs_negative,
+ item_src);
+
+ if (item_dup)
+ item_src->sg = NULL;
+ else {
+ pkt->n_active++;
+ gm_sg_update(item_src->sg, false);
+ }
+ }
+
+ /* TODO: determine best ordering between gm_sg_update(S,G) and (*,G)
+ * to get lower PIM churn/flapping
+ */
+ gm_sg_update(sg_grp, false);
+}
+
+CPP_NOTICE("TODO: QRV/QQIC are not copied from queries to local state");
+/* on receiving a query, we need to update our robustness/query interval to
+ * match, so we correctly process group/source specific queries after last
+ * member leaves
+ */
+
+static void gm_handle_v2_report(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, char *data,
+ size_t len)
+{
+ struct mld_v2_report_hdr *hdr;
+ size_t i, n_records, max_entries;
+ struct gm_packet_state *pkt;
+
+ if (len < sizeof(*hdr)) {
+ if (PIM_DEBUG_IGMP_PACKETS)
+ zlog_debug(log_pkt_src(
+ "malformed MLDv2 report (truncated header)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ /* errors after this may at least partially process the packet */
+ gm_ifp->stats.rx_new_report++;
+
+ hdr = (struct mld_v2_report_hdr *)data;
+ data += sizeof(*hdr);
+ len -= sizeof(*hdr);
+
+ /* can't have more *,G and S,G items than there is space for ipv6
+ * addresses, so just use this to allocate temporary buffer
+ */
+ max_entries = len / sizeof(pim_addr);
+ pkt = XCALLOC(MTYPE_GM_STATE,
+ offsetof(struct gm_packet_state, items[max_entries]));
+ pkt->n_sg = max_entries;
+ pkt->iface = gm_ifp;
+ pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
+
+ n_records = ntohs(hdr->n_records);
+
+ /* validate & remove state in v2_pass1() */
+ for (i = 0; i < n_records; i++) {
+ struct mld_v2_rec_hdr *rechdr;
+ size_t n_src, record_size;
+
+ if (len < sizeof(*rechdr)) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 report (truncated record header)"));
+ gm_ifp->stats.rx_trunc_report++;
+ break;
+ }
+
+ rechdr = (struct mld_v2_rec_hdr *)data;
+ data += sizeof(*rechdr);
+ len -= sizeof(*rechdr);
+
+ n_src = ntohs(rechdr->n_src);
+ record_size = n_src * sizeof(pim_addr) + rechdr->aux_len * 4;
+
+ if (len < record_size) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 report (truncated source list)"));
+ gm_ifp->stats.rx_trunc_report++;
+ break;
+ }
+ if (!IN6_IS_ADDR_MULTICAST(&rechdr->grp)) {
+ zlog_warn(
+ log_pkt_src(
+ "malformed MLDv2 report (invalid group %pI6)"),
+ &rechdr->grp);
+ gm_ifp->stats.rx_trunc_report++;
+ break;
+ }
+
+ data += record_size;
+ len -= record_size;
+
+ gm_handle_v2_pass1(pkt, rechdr);
+ }
+
+ if (!pkt->n_active) {
+ gm_subscriber_drop(&pkt->subscriber);
+ XFREE(MTYPE_GM_STATE, pkt);
+ return;
+ }
+
+ pkt = XREALLOC(MTYPE_GM_STATE, pkt,
+ offsetof(struct gm_packet_state, items[pkt->n_active]));
+ pkt->n_sg = pkt->n_active;
+ pkt->n_active = 0;
+
+ monotime(&pkt->received);
+ if (!pkt->subscriber)
+ pkt->subscriber = gm_subscriber_get(gm_ifp, pkt_src->sin6_addr);
+ gm_packets_add_tail(pkt->subscriber->packets, pkt);
+ gm_packet_expires_add_tail(gm_ifp->expires, pkt);
+
+ for (i = 0; i < pkt->n_sg; i++)
+ if (!pkt->items[i].is_excl)
+ gm_handle_v2_pass2_incl(pkt, i);
+ else {
+ gm_handle_v2_pass2_excl(pkt, i);
+ i += pkt->items[i].n_exclude;
+ }
+
+ if (pkt->n_active == 0)
+ gm_packet_free(pkt);
+}
+
+static void gm_handle_v1_report(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, char *data,
+ size_t len)
+{
+ struct mld_v1_pkt *hdr;
+ struct gm_packet_state *pkt;
+ struct gm_sg *grp;
+ struct gm_packet_sg *item;
+ size_t max_entries;
+
+ if (len < sizeof(*hdr)) {
+ if (PIM_DEBUG_IGMP_PACKETS)
+ zlog_debug(log_pkt_src(
+ "malformed MLDv1 report (truncated)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ gm_ifp->stats.rx_old_report++;
+
+ hdr = (struct mld_v1_pkt *)data;
+
+ max_entries = 1;
+ pkt = XCALLOC(MTYPE_GM_STATE,
+ offsetof(struct gm_packet_state, items[max_entries]));
+ pkt->n_sg = max_entries;
+ pkt->iface = gm_ifp;
+ pkt->subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
+
+ /* { equivalent of gm_handle_v2_pass1() with IS_EXCLUDE */
+
+ grp = gm_sg_find(pkt->iface, hdr->grp, PIMADDR_ANY);
+ if (!grp)
+ grp = gm_sg_make(pkt->iface, hdr->grp, PIMADDR_ANY);
+
+ item = gm_packet_sg_setup(pkt, grp, true, false);
+ item->n_exclude = 0;
+ CPP_NOTICE("set v1-seen timer on grp here");
+
+ /* } */
+
+ /* pass2 will count n_active back up to 1. Also since a v1 report
+ * has exactly 1 group, we can skip the realloc() that v2 needs here.
+ */
+ assert(pkt->n_active == 1);
+ pkt->n_sg = pkt->n_active;
+ pkt->n_active = 0;
+
+ monotime(&pkt->received);
+ if (!pkt->subscriber)
+ pkt->subscriber = gm_subscriber_get(gm_ifp, gm_dummy_untracked);
+ gm_packets_add_tail(pkt->subscriber->packets, pkt);
+ gm_packet_expires_add_tail(gm_ifp->expires, pkt);
+
+ /* pass2 covers installing state & removing old state; all the v1
+ * compat is handled at this point.
+ *
+ * Note that "old state" may be v2; subscribers will switch from v2
+ * reports to v1 reports when the querier changes from v2 to v1. So,
+ * limiting this to v1 would be wrong.
+ */
+ gm_handle_v2_pass2_excl(pkt, 0);
+
+ if (pkt->n_active == 0)
+ gm_packet_free(pkt);
+}
+
+static void gm_handle_v1_leave(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, char *data,
+ size_t len)
+{
+ struct mld_v1_pkt *hdr;
+ struct gm_subscriber *subscriber;
+ struct gm_sg *grp;
+ struct gm_packet_sg *old_grp;
+
+ if (len < sizeof(*hdr)) {
+ if (PIM_DEBUG_IGMP_PACKETS)
+ zlog_debug(log_pkt_src(
+ "malformed MLDv1 leave (truncated)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ gm_ifp->stats.rx_old_leave++;
+
+ hdr = (struct mld_v1_pkt *)data;
+
+ subscriber = gm_subscriber_findref(gm_ifp, gm_dummy_untracked);
+ if (!subscriber)
+ return;
+
+ /* { equivalent of gm_handle_v2_pass1() with IS_INCLUDE */
+
+ grp = gm_sg_find(gm_ifp, hdr->grp, PIMADDR_ANY);
+ if (grp) {
+ old_grp = gm_packet_sg_find(grp, GM_SUB_POS, subscriber);
+ if (old_grp) {
+ gm_packet_sg_drop(old_grp);
+ gm_sg_update(grp, false);
+ CPP_NOTICE("need S,G PRUNE => NO_INFO transition here");
+ }
+ }
+
+ /* } */
+
+ /* nothing more to do here, pass2 is no-op for leaves */
+ gm_subscriber_drop(&subscriber);
+}
+
+/* for each general query received (or sent), a timer is started to expire
+ * _everything_ at the appropriate time (including robustness multiplier).
+ *
+ * So when this timer hits, all packets - with all of their items - that were
+ * received *before* the query are aged out, and state updated accordingly.
+ * Note that when we receive a refresh/update, the previous/old packet is
+ * already dropped and replaced with a new one, so in normal steady-state
+ * operation, this timer won't be doing anything.
+ *
+ * Additionally, if a subscriber actively leaves a group, that goes through
+ * its own path too and won't hit this. This is really only triggered when a
+ * host straight up disappears.
+ */
+static void gm_t_expire(struct thread *t)
+{
+ struct gm_if *gm_ifp = THREAD_ARG(t);
+ struct gm_packet_state *pkt;
+
+ zlog_info(log_ifp("general expiry timer"));
+
+ while (gm_ifp->n_pending) {
+ struct gm_general_pending *pend = gm_ifp->pending;
+ struct timeval remain;
+ int64_t remain_ms;
+
+ remain_ms = monotime_until(&pend->expiry, &remain);
+ if (remain_ms > 0) {
+ if (PIM_DEBUG_IGMP_EVENTS)
+ zlog_debug(
+ log_ifp("next general expiry in %" PRId64 "ms"),
+ remain_ms / 1000);
+
+ thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &remain, &gm_ifp->t_expire);
+ return;
+ }
+
+ while ((pkt = gm_packet_expires_first(gm_ifp->expires))) {
+ if (timercmp(&pkt->received, &pend->query, >=))
+ break;
+
+ if (PIM_DEBUG_IGMP_PACKETS)
+ zlog_debug(log_ifp("expire packet %p"), pkt);
+ gm_packet_drop(pkt, true);
+ }
+
+ gm_ifp->n_pending--;
+ memmove(gm_ifp->pending, gm_ifp->pending + 1,
+ gm_ifp->n_pending * sizeof(gm_ifp->pending[0]));
+ }
+
+ if (PIM_DEBUG_IGMP_EVENTS)
+ zlog_debug(log_ifp("next general expiry waiting for query"));
+}
+
+/* NB: the receive handlers will also run when sending packets, since we
+ * receive our own packets back in.
+ */
+static void gm_handle_q_general(struct gm_if *gm_ifp,
+ struct gm_query_timers *timers)
+{
+ struct timeval now, expiry;
+ struct gm_general_pending *pend;
+
+ monotime(&now);
+ timeradd(&now, &timers->expire_wait, &expiry);
+
+ while (gm_ifp->n_pending) {
+ pend = &gm_ifp->pending[gm_ifp->n_pending - 1];
+
+ if (timercmp(&pend->expiry, &expiry, <))
+ break;
+
+ /* if we end up here, the last item in pending[] has an expiry
+ * later than the expiry for this query. But our query time
+ * (now) is later than that of the item (because, well, that's
+ * how time works.) This makes this query meaningless since
+ * it's "supersetted" within the preexisting query
+ */
+
+ if (PIM_DEBUG_IGMP_TRACE_DETAIL)
+ zlog_debug(
+ log_ifp("zapping supersetted general timer %pTVMu"),
+ &pend->expiry);
+
+ gm_ifp->n_pending--;
+ if (!gm_ifp->n_pending)
+ THREAD_OFF(gm_ifp->t_expire);
+ }
+
+ /* people might be messing with their configs or something */
+ if (gm_ifp->n_pending == array_size(gm_ifp->pending))
+ return;
+
+ pend = &gm_ifp->pending[gm_ifp->n_pending];
+ pend->query = now;
+ pend->expiry = expiry;
+
+ if (!gm_ifp->n_pending++) {
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(
+ log_ifp("starting general timer @ 0: %pTVMu"),
+ &pend->expiry);
+ thread_add_timer_tv(router->master, gm_t_expire, gm_ifp,
+ &timers->expire_wait, &gm_ifp->t_expire);
+ } else if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_ifp("appending general timer @ %u: %pTVMu"),
+ gm_ifp->n_pending, &pend->expiry);
+}
+
+static void gm_t_sg_expire(struct thread *t)
+{
+ struct gm_sg *sg = THREAD_ARG(t);
+ struct gm_if *gm_ifp = sg->iface;
+ struct gm_packet_sg *item;
+
+ assertf(sg->state == GM_SG_JOIN_EXPIRING ||
+ sg->state == GM_SG_NOPRUNE_EXPIRING,
+ "%pSG%%%s %pTHD", &sg->sgaddr, gm_ifp->ifp->name, t);
+
+ frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
+ /* this will also drop EXCLUDE mode S,G lists together with
+ * the *,G entry
+ */
+ gm_packet_sg_drop(item);
+
+ /* subs_negative items are only timed out together with the *,G entry
+ * since we won't get any reports for a group-and-source query
+ */
+ gm_sg_update(sg, true);
+}
+
+static bool gm_sg_check_recent(struct gm_if *gm_ifp, struct gm_sg *sg,
+ struct timeval ref)
+{
+ struct gm_packet_state *pkt;
+
+ if (!sg->most_recent) {
+ struct gm_packet_state *best_pkt = NULL;
+ struct gm_packet_sg *item;
+
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ pkt = gm_packet_sg2state(item);
+
+ if (!best_pkt ||
+ timercmp(&pkt->received, &best_pkt->received, >)) {
+ best_pkt = pkt;
+ sg->most_recent = item;
+ }
+ }
+ }
+ if (sg->most_recent) {
+ struct timeval fuzz;
+
+ pkt = gm_packet_sg2state(sg->most_recent);
+
+ /* this shouldn't happen on plain old real ethernet segment,
+ * but on something like a VXLAN or VPLS it is very possible
+ * that we get a report before the query that triggered it.
+ * (imagine a triangle scenario with 3 datacenters, it's very
+ * possible A->B + B->C is faster than A->C due to odd routing)
+ *
+ * This makes a little tolerance allowance to handle that case.
+ */
+ timeradd(&pkt->received, &gm_ifp->cfg_timing_fuzz, &fuzz);
+
+ if (timercmp(&fuzz, &ref, >))
+ return true;
+ }
+ return false;
+}
+
+static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg,
+ struct timeval expire_wait)
+{
+ struct timeval now;
+
+ if (!sg)
+ return;
+ if (sg->state == GM_SG_PRUNE)
+ return;
+
+ monotime(&now);
+ if (gm_sg_check_recent(gm_ifp, sg, now))
+ return;
+
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_sg(sg, "expiring in %pTVI"), &expire_wait);
+
+ if (sg->t_sg_expire) {
+ struct timeval remain;
+
+ remain = thread_timer_remain(sg->t_sg_expire);
+ if (timercmp(&remain, &expire_wait, <=))
+ return;
+
+ THREAD_OFF(sg->t_sg_expire);
+ }
+
+ thread_add_timer_tv(router->master, gm_t_sg_expire, sg, &expire_wait,
+ &sg->t_sg_expire);
+}
+
+static void gm_handle_q_groupsrc(struct gm_if *gm_ifp,
+ struct gm_query_timers *timers, pim_addr grp,
+ const pim_addr *srcs, size_t n_src)
+{
+ struct gm_sg *sg;
+ size_t i;
+
+ for (i = 0; i < n_src; i++) {
+ sg = gm_sg_find(gm_ifp, grp, srcs[i]);
+ gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
+ }
+}
+
+static void gm_t_grp_expire(struct thread *t)
+{
+ /* if we're here, that means when we received the group-specific query
+ * there was one or more active S,G for this group. For *,G the timer
+ * in sg->t_sg_expire is running separately and gets cancelled when we
+ * receive a report, so that work is left to gm_t_sg_expire and we
+ * shouldn't worry about it here.
+ */
+ struct gm_grp_pending *pend = THREAD_ARG(t);
+ struct gm_if *gm_ifp = pend->iface;
+ struct gm_sg *sg, *sg_start, sg_ref = {};
+
+ if (PIM_DEBUG_IGMP_EVENTS)
+ zlog_debug(log_ifp("*,%pPAs S,G timer expired"), &pend->grp);
+
+ /* gteq lookup - try to find *,G or S,G (S,G is > *,G)
+ * could technically be gt to skip a possible *,G
+ */
+ sg_ref.sgaddr.grp = pend->grp;
+ sg_ref.sgaddr.src = PIMADDR_ANY;
+ sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
+
+ frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
+ struct gm_packet_sg *item;
+
+ if (pim_addr_cmp(sg->sgaddr.grp, pend->grp))
+ break;
+ if (pim_addr_is_any(sg->sgaddr.src))
+ /* handled by gm_t_sg_expire / sg->t_sg_expire */
+ continue;
+ if (gm_sg_check_recent(gm_ifp, sg, pend->query))
+ continue;
+
+ /* we may also have a group-source-specific query going on in
+ * parallel. But if we received nothing for the *,G query,
+ * the S,G query is kinda irrelevant.
+ */
+ THREAD_OFF(sg->t_sg_expire);
+
+ frr_each_safe (gm_packet_sg_subs, sg->subs_positive, item)
+ /* this will also drop the EXCLUDE S,G lists */
+ gm_packet_sg_drop(item);
+
+ gm_sg_update(sg, true);
+ }
+
+ gm_grp_pends_del(gm_ifp->grp_pends, pend);
+ XFREE(MTYPE_GM_GRP_PENDING, pend);
+}
+
+static void gm_handle_q_group(struct gm_if *gm_ifp,
+ struct gm_query_timers *timers, pim_addr grp)
+{
+ struct gm_sg *sg, sg_ref = {};
+ struct gm_grp_pending *pend, pend_ref = {};
+
+ sg_ref.sgaddr.grp = grp;
+ sg_ref.sgaddr.src = PIMADDR_ANY;
+ /* gteq lookup - try to find *,G or S,G (S,G is > *,G) */
+ sg = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
+
+ if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
+ /* we have nothing at all for this group - don't waste RAM */
+ return;
+
+ if (pim_addr_is_any(sg->sgaddr.src)) {
+ /* actually found *,G entry here */
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_ifp("*,%pPAs expiry timer starting"),
+ &grp);
+ gm_sg_timer_start(gm_ifp, sg, timers->expire_wait);
+
+ sg = gm_sgs_next(gm_ifp->sgs, sg);
+ if (!sg || pim_addr_cmp(sg->sgaddr.grp, grp))
+ /* no S,G for this group */
+ return;
+ }
+
+ pend_ref.grp = grp;
+ pend = gm_grp_pends_find(gm_ifp->grp_pends, &pend_ref);
+
+ if (pend) {
+ struct timeval remain;
+
+ remain = thread_timer_remain(pend->t_expire);
+ if (timercmp(&remain, &timers->expire_wait, <=))
+ return;
+
+ THREAD_OFF(pend->t_expire);
+ } else {
+ pend = XCALLOC(MTYPE_GM_GRP_PENDING, sizeof(*pend));
+ pend->grp = grp;
+ pend->iface = gm_ifp;
+ gm_grp_pends_add(gm_ifp->grp_pends, pend);
+ }
+
+ monotime(&pend->query);
+ thread_add_timer_tv(router->master, gm_t_grp_expire, pend,
+ &timers->expire_wait, &pend->t_expire);
+
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_ifp("*,%pPAs S,G timer started: %pTHD"), &grp,
+ pend->t_expire);
+}
+
+static void gm_bump_querier(struct gm_if *gm_ifp)
+{
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+
+ THREAD_OFF(gm_ifp->t_query);
+
+ if (pim_addr_is_any(pim_ifp->ll_lowest))
+ return;
+ if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
+ return;
+
+ gm_ifp->n_startup = gm_ifp->cur_qrv;
+
+ thread_execute(router->master, gm_t_query, gm_ifp, 0);
+}
+
+static void gm_t_other_querier(struct thread *t)
+{
+ struct gm_if *gm_ifp = THREAD_ARG(t);
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+
+ zlog_info(log_ifp("other querier timer expired"));
+
+ gm_ifp->querier = pim_ifp->ll_lowest;
+ gm_ifp->n_startup = gm_ifp->cur_qrv;
+
+ thread_execute(router->master, gm_t_query, gm_ifp, 0);
+}
+
+static void gm_handle_query(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src,
+ pim_addr *pkt_dst, char *data, size_t len)
+{
+ struct mld_v2_query_hdr *hdr;
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+ struct gm_query_timers timers;
+ bool general_query;
+
+ if (len < sizeof(struct mld_v2_query_hdr) &&
+ len != sizeof(struct mld_v1_pkt)) {
+ zlog_warn(log_pkt_src("invalid query size"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ hdr = (struct mld_v2_query_hdr *)data;
+ general_query = pim_addr_is_any(hdr->grp);
+
+ if (!general_query && !IN6_IS_ADDR_MULTICAST(&hdr->grp)) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 query (invalid group %pI6)"),
+ &hdr->grp);
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ if (len >= sizeof(struct mld_v2_query_hdr)) {
+ size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
+
+ if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 query (truncated source list)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ if (general_query && src_space) {
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 query (general query with non-empty source list)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+ }
+
+ /* accepting queries unicast to us (or addressed to a wrong group)
+ * can mess up querier election as well as cause us to terminate
+ * traffic (since after a unicast query no reports will be coming in)
+ */
+ if (!IPV6_ADDR_SAME(pkt_dst, &gm_all_hosts)) {
+ if (pim_addr_is_any(hdr->grp)) {
+ zlog_warn(
+ log_pkt_src(
+ "wrong destination %pPA for general query"),
+ pkt_dst);
+ gm_ifp->stats.rx_drop_dstaddr++;
+ return;
+ }
+
+ if (!IPV6_ADDR_SAME(&hdr->grp, pkt_dst)) {
+ gm_ifp->stats.rx_drop_dstaddr++;
+ zlog_warn(
+ log_pkt_src(
+ "wrong destination %pPA for group specific query"),
+ pkt_dst);
+ return;
+ }
+ }
+
+ if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &gm_ifp->querier) < 0) {
+ if (PIM_DEBUG_IGMP_EVENTS)
+ zlog_debug(
+ log_pkt_src("replacing elected querier %pPA"),
+ &gm_ifp->querier);
+
+ gm_ifp->querier = pkt_src->sin6_addr;
+ }
+
+ if (len == sizeof(struct mld_v1_pkt)) {
+ timers.qrv = gm_ifp->cur_qrv;
+ timers.max_resp_ms = hdr->max_resp_code;
+ timers.qqic_ms = gm_ifp->cur_query_intv;
+ } else {
+ timers.qrv = (hdr->flags & 0x7) ?: 8;
+ timers.max_resp_ms = mld_max_resp_decode(hdr->max_resp_code);
+ timers.qqic_ms = igmp_msg_decode8to16(hdr->qqic) * 1000;
+ }
+ timers.fuzz = gm_ifp->cfg_timing_fuzz;
+
+ gm_expiry_calc(&timers);
+
+ if (PIM_DEBUG_IGMP_TRACE_DETAIL)
+ zlog_debug(
+ log_ifp("query timers: QRV=%u max_resp=%ums qqic=%ums expire_wait=%pTVI"),
+ timers.qrv, timers.max_resp_ms, timers.qqic_ms,
+ &timers.expire_wait);
+
+ if (IPV6_ADDR_CMP(&pkt_src->sin6_addr, &pim_ifp->ll_lowest) < 0) {
+ unsigned int other_ms;
+
+ THREAD_OFF(gm_ifp->t_query);
+ THREAD_OFF(gm_ifp->t_other_querier);
+
+ other_ms = timers.qrv * timers.qqic_ms + timers.max_resp_ms / 2;
+ thread_add_timer_msec(router->master, gm_t_other_querier,
+ gm_ifp, other_ms,
+ &gm_ifp->t_other_querier);
+ }
+
+ if (len == sizeof(struct mld_v1_pkt)) {
+ if (general_query) {
+ gm_handle_q_general(gm_ifp, &timers);
+ gm_ifp->stats.rx_query_old_general++;
+ } else {
+ gm_handle_q_group(gm_ifp, &timers, hdr->grp);
+ gm_ifp->stats.rx_query_old_group++;
+ }
+ return;
+ }
+
+ /* v2 query - [S]uppress bit */
+ if (hdr->flags & 0x8) {
+ gm_ifp->stats.rx_query_new_sbit++;
+ return;
+ }
+
+ if (general_query) {
+ gm_handle_q_general(gm_ifp, &timers);
+ gm_ifp->stats.rx_query_new_general++;
+ } else if (!ntohs(hdr->n_src)) {
+ gm_handle_q_group(gm_ifp, &timers, hdr->grp);
+ gm_ifp->stats.rx_query_new_group++;
+ } else {
+ gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
+ ntohs(hdr->n_src));
+ gm_ifp->stats.rx_query_new_groupsrc++;
+ }
+}
+
+static void gm_rx_process(struct gm_if *gm_ifp,
+ const struct sockaddr_in6 *pkt_src, pim_addr *pkt_dst,
+ void *data, size_t pktlen)
+{
+ struct icmp6_plain_hdr *icmp6 = data;
+ uint16_t pkt_csum, ref_csum;
+ struct ipv6_ph ph6 = {
+ .src = pkt_src->sin6_addr,
+ .dst = *pkt_dst,
+ .ulpl = htons(pktlen),
+ .next_hdr = IPPROTO_ICMPV6,
+ };
+
+ pkt_csum = icmp6->icmp6_cksum;
+ icmp6->icmp6_cksum = 0;
+ ref_csum = in_cksum_with_ph6(&ph6, data, pktlen);
+
+ if (pkt_csum != ref_csum) {
+ zlog_warn(
+ log_pkt_src(
+ "(dst %pPA) packet RX checksum failure, expected %04hx, got %04hx"),
+ pkt_dst, pkt_csum, ref_csum);
+ gm_ifp->stats.rx_drop_csum++;
+ return;
+ }
+
+ data = (icmp6 + 1);
+ pktlen -= sizeof(*icmp6);
+
+ switch (icmp6->icmp6_type) {
+ case ICMP6_MLD_QUERY:
+ gm_handle_query(gm_ifp, pkt_src, pkt_dst, data, pktlen);
+ break;
+ case ICMP6_MLD_V1_REPORT:
+ gm_handle_v1_report(gm_ifp, pkt_src, data, pktlen);
+ break;
+ case ICMP6_MLD_V1_DONE:
+ gm_handle_v1_leave(gm_ifp, pkt_src, data, pktlen);
+ break;
+ case ICMP6_MLD_V2_REPORT:
+ gm_handle_v2_report(gm_ifp, pkt_src, data, pktlen);
+ break;
+ }
+}
+
+static bool ip6_check_hopopts_ra(uint8_t *hopopts, size_t hopopt_len,
+ uint16_t alert_type)
+{
+ uint8_t *hopopt_end;
+
+ if (hopopt_len < 8)
+ return false;
+ if (hopopt_len < (hopopts[1] + 1U) * 8U)
+ return false;
+
+ hopopt_end = hopopts + (hopopts[1] + 1) * 8;
+ hopopts += 2;
+
+ while (hopopts < hopopt_end) {
+ if (hopopts[0] == IP6OPT_PAD1) {
+ hopopts++;
+ continue;
+ }
+
+ if (hopopts > hopopt_end - 2)
+ break;
+ if (hopopts > hopopt_end - 2 - hopopts[1])
+ break;
+
+ if (hopopts[0] == IP6OPT_ROUTER_ALERT && hopopts[1] == 2) {
+ uint16_t have_type = (hopopts[2] << 8) | hopopts[3];
+
+ if (have_type == alert_type)
+ return true;
+ }
+
+ hopopts += 2 + hopopts[1];
+ }
+ return false;
+}
+
+static void gm_t_recv(struct thread *t)
+{
+ struct pim_instance *pim = THREAD_ARG(t);
+ union {
+ char buf[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
+ CMSG_SPACE(256) /* hop options */ +
+ CMSG_SPACE(sizeof(int)) /* hopcount */];
+ struct cmsghdr align;
+ } cmsgbuf;
+ struct cmsghdr *cmsg;
+ struct in6_pktinfo *pktinfo = NULL;
+ uint8_t *hopopts = NULL;
+ size_t hopopt_len = 0;
+ int *hoplimit = NULL;
+ char rxbuf[2048];
+ struct msghdr mh[1] = {};
+ struct iovec iov[1];
+ struct sockaddr_in6 pkt_src[1];
+ ssize_t nread;
+ size_t pktlen;
+
+ thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
+
+ iov->iov_base = rxbuf;
+ iov->iov_len = sizeof(rxbuf);
+
+ mh->msg_name = pkt_src;
+ mh->msg_namelen = sizeof(pkt_src);
+ mh->msg_control = cmsgbuf.buf;
+ mh->msg_controllen = sizeof(cmsgbuf.buf);
+ mh->msg_iov = iov;
+ mh->msg_iovlen = array_size(iov);
+ mh->msg_flags = 0;
+
+ nread = recvmsg(pim->gm_socket, mh, MSG_PEEK | MSG_TRUNC);
+ if (nread <= 0) {
+ zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
+ pim->gm_rx_drop_sys++;
+ return;
+ }
+
+ if ((size_t)nread > sizeof(rxbuf)) {
+ iov->iov_base = XMALLOC(MTYPE_GM_PACKET, nread);
+ iov->iov_len = nread;
+ }
+ nread = recvmsg(pim->gm_socket, mh, 0);
+ if (nread <= 0) {
+ zlog_err("(VRF %s) RX error: %m", pim->vrf->name);
+ pim->gm_rx_drop_sys++;
+ goto out_free;
+ }
+
+ struct interface *ifp;
+
+ ifp = if_lookup_by_index(pkt_src->sin6_scope_id, pim->vrf->vrf_id);
+ if (!ifp || !ifp->info)
+ goto out_free;
+
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp = pim_ifp->mld;
+
+ if (!gm_ifp)
+ goto out_free;
+
+ for (cmsg = CMSG_FIRSTHDR(mh); cmsg; cmsg = CMSG_NXTHDR(mh, cmsg)) {
+ if (cmsg->cmsg_level != SOL_IPV6)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case IPV6_PKTINFO:
+ pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ break;
+ case IPV6_HOPOPTS:
+ hopopts = CMSG_DATA(cmsg);
+ hopopt_len = cmsg->cmsg_len - sizeof(*cmsg);
+ break;
+ case IPV6_HOPLIMIT:
+ hoplimit = (int *)CMSG_DATA(cmsg);
+ break;
+ }
+ }
+
+ if (!pktinfo || !hoplimit) {
+ zlog_err(log_ifp(
+ "BUG: packet without IPV6_PKTINFO or IPV6_HOPLIMIT"));
+ pim->gm_rx_drop_sys++;
+ goto out_free;
+ }
+
+ if (*hoplimit != 1) {
+ zlog_err(log_pkt_src("packet with hop limit != 1"));
+ /* spoofing attempt => count on srcaddr counter */
+ gm_ifp->stats.rx_drop_srcaddr++;
+ goto out_free;
+ }
+
+ if (!ip6_check_hopopts_ra(hopopts, hopopt_len, IP6_ALERT_MLD)) {
+ zlog_err(log_pkt_src(
+ "packet without IPv6 Router Alert MLD option"));
+ gm_ifp->stats.rx_drop_ra++;
+ goto out_free;
+ }
+
+ if (IN6_IS_ADDR_UNSPECIFIED(&pkt_src->sin6_addr))
+ /* reports from :: happen in normal operation for DAD, so
+ * don't spam log messages about this
+ */
+ goto out_free;
+
+ if (!IN6_IS_ADDR_LINKLOCAL(&pkt_src->sin6_addr)) {
+ zlog_warn(log_pkt_src("packet from invalid source address"));
+ gm_ifp->stats.rx_drop_srcaddr++;
+ goto out_free;
+ }
+
+ pktlen = nread;
+ if (pktlen < sizeof(struct icmp6_plain_hdr)) {
+ zlog_warn(log_pkt_src("truncated packet"));
+ gm_ifp->stats.rx_drop_malformed++;
+ goto out_free;
+ }
+
+ gm_rx_process(gm_ifp, pkt_src, &pktinfo->ipi6_addr, iov->iov_base,
+ pktlen);
+
+out_free:
+ if (iov->iov_base != rxbuf)
+ XFREE(MTYPE_GM_PACKET, iov->iov_base);
+}
+
+static void gm_send_query(struct gm_if *gm_ifp, pim_addr grp,
+ const pim_addr *srcs, size_t n_srcs, bool s_bit)
+{
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+ struct sockaddr_in6 dstaddr = {
+ .sin6_family = AF_INET6,
+ .sin6_scope_id = gm_ifp->ifp->ifindex,
+ };
+ struct {
+ struct icmp6_plain_hdr hdr;
+ struct mld_v2_query_hdr v2_query;
+ } query = {
+ /* clang-format off */
+ .hdr = {
+ .icmp6_type = ICMP6_MLD_QUERY,
+ .icmp6_code = 0,
+ },
+ .v2_query = {
+ .grp = grp,
+ },
+ /* clang-format on */
+ };
+ struct ipv6_ph ph6 = {
+ .src = pim_ifp->ll_lowest,
+ .ulpl = htons(sizeof(query)),
+ .next_hdr = IPPROTO_ICMPV6,
+ };
+ union {
+ char buf[CMSG_SPACE(8) /* hop options */ +
+ CMSG_SPACE(sizeof(struct in6_pktinfo))];
+ struct cmsghdr align;
+ } cmsg = {};
+ struct cmsghdr *cmh;
+ struct msghdr mh[1] = {};
+ struct iovec iov[3];
+ size_t iov_len;
+ ssize_t ret, expect_ret;
+ uint8_t *dp;
+ struct in6_pktinfo *pktinfo;
+
+ if (if_is_loopback(gm_ifp->ifp)) {
+ /* Linux is a bit odd with multicast on loopback */
+ ph6.src = in6addr_loopback;
+ dstaddr.sin6_addr = in6addr_loopback;
+ } else if (pim_addr_is_any(grp))
+ dstaddr.sin6_addr = gm_all_hosts;
+ else
+ dstaddr.sin6_addr = grp;
+
+ query.v2_query.max_resp_code =
+ mld_max_resp_encode(gm_ifp->cur_max_resp);
+ query.v2_query.flags = (gm_ifp->cur_qrv < 8) ? gm_ifp->cur_qrv : 0;
+ if (s_bit)
+ query.v2_query.flags |= 0x08;
+ query.v2_query.qqic =
+ igmp_msg_encode16to8(gm_ifp->cur_query_intv / 1000);
+ query.v2_query.n_src = htons(n_srcs);
+
+ ph6.dst = dstaddr.sin6_addr;
+
+ /* ph6 not included in sendmsg */
+ iov[0].iov_base = &ph6;
+ iov[0].iov_len = sizeof(ph6);
+ iov[1].iov_base = &query;
+ if (gm_ifp->cur_version == GM_MLDV1) {
+ iov_len = 2;
+ iov[1].iov_len = sizeof(query.hdr) + sizeof(struct mld_v1_pkt);
+ } else if (!n_srcs) {
+ iov_len = 2;
+ iov[1].iov_len = sizeof(query);
+ } else {
+ iov[1].iov_len = sizeof(query);
+ iov[2].iov_base = (void *)srcs;
+ iov[2].iov_len = n_srcs * sizeof(srcs[0]);
+ iov_len = 3;
+ }
+
+ query.hdr.icmp6_cksum = in_cksumv(iov, iov_len);
+
+ if (PIM_DEBUG_IGMP_PACKETS)
+ zlog_debug(
+ log_ifp("MLD query %pPA -> %pI6 (grp=%pPA, %zu srcs)"),
+ &pim_ifp->ll_lowest, &dstaddr.sin6_addr, &grp, n_srcs);
+
+ mh->msg_name = &dstaddr;
+ mh->msg_namelen = sizeof(dstaddr);
+ mh->msg_iov = iov + 1;
+ mh->msg_iovlen = iov_len - 1;
+ mh->msg_control = &cmsg;
+ mh->msg_controllen = sizeof(cmsg.buf);
+
+ cmh = CMSG_FIRSTHDR(mh);
+ cmh->cmsg_level = IPPROTO_IPV6;
+ cmh->cmsg_type = IPV6_HOPOPTS;
+ cmh->cmsg_len = CMSG_LEN(8);
+ dp = CMSG_DATA(cmh);
+ *dp++ = 0; /* next header */
+ *dp++ = 0; /* length (8-byte blocks, minus 1) */
+ *dp++ = IP6OPT_ROUTER_ALERT; /* router alert */
+ *dp++ = 2; /* length */
+ *dp++ = 0; /* value (2 bytes) */
+ *dp++ = 0; /* value (2 bytes) (0 = MLD) */
+ *dp++ = 0; /* pad0 */
+ *dp++ = 0; /* pad0 */
+
+ cmh = CMSG_NXTHDR(mh, cmh);
+ cmh->cmsg_level = IPPROTO_IPV6;
+ cmh->cmsg_type = IPV6_PKTINFO;
+ cmh->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo));
+ pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmh);
+ pktinfo->ipi6_ifindex = gm_ifp->ifp->ifindex;
+ pktinfo->ipi6_addr = gm_ifp->cur_ll_lowest;
+
+ expect_ret = iov[1].iov_len;
+ if (iov_len == 3)
+ expect_ret += iov[2].iov_len;
+
+ frr_with_privs (&pimd_privs) {
+ ret = sendmsg(gm_ifp->pim->gm_socket, mh, 0);
+ }
+
+ if (ret != expect_ret) {
+ zlog_warn(log_ifp("failed to send query: %m"));
+ gm_ifp->stats.tx_query_fail++;
+ } else {
+ if (gm_ifp->cur_version == GM_MLDV1) {
+ if (pim_addr_is_any(grp))
+ gm_ifp->stats.tx_query_old_general++;
+ else
+ gm_ifp->stats.tx_query_old_group++;
+ } else {
+ if (pim_addr_is_any(grp))
+ gm_ifp->stats.tx_query_new_general++;
+ else if (!n_srcs)
+ gm_ifp->stats.tx_query_new_group++;
+ else
+ gm_ifp->stats.tx_query_new_groupsrc++;
+ }
+ }
+}
+
+static void gm_t_query(struct thread *t)
+{
+ struct gm_if *gm_ifp = THREAD_ARG(t);
+ unsigned int timer_ms = gm_ifp->cur_query_intv;
+
+ if (gm_ifp->n_startup) {
+ timer_ms /= 4;
+ gm_ifp->n_startup--;
+ }
+
+ thread_add_timer_msec(router->master, gm_t_query, gm_ifp, timer_ms,
+ &gm_ifp->t_query);
+
+ gm_send_query(gm_ifp, PIMADDR_ANY, NULL, 0, false);
+}
+
+static void gm_t_sg_query(struct thread *t)
+{
+ struct gm_sg *sg = THREAD_ARG(t);
+
+ gm_trigger_specific(sg);
+}
+
+/* S,G specific queries (triggered by a member leaving) get a little slack
+ * time so we can bundle queries for [S1,S2,S3,...],G into the same query
+ */
+static void gm_send_specific(struct gm_gsq_pending *pend_gsq)
+{
+ struct gm_if *gm_ifp = pend_gsq->iface;
+
+ gm_send_query(gm_ifp, pend_gsq->grp, pend_gsq->srcs, pend_gsq->n_src,
+ pend_gsq->s_bit);
+
+ gm_gsq_pends_del(gm_ifp->gsq_pends, pend_gsq);
+ XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
+}
+
+static void gm_t_gsq_pend(struct thread *t)
+{
+ struct gm_gsq_pending *pend_gsq = THREAD_ARG(t);
+
+ gm_send_specific(pend_gsq);
+}
+
+static void gm_trigger_specific(struct gm_sg *sg)
+{
+ struct gm_if *gm_ifp = sg->iface;
+ struct pim_interface *pim_ifp = gm_ifp->ifp->info;
+ struct gm_gsq_pending *pend_gsq, ref = {};
+
+ sg->n_query--;
+ if (sg->n_query)
+ thread_add_timer_msec(router->master, gm_t_sg_query, sg,
+ gm_ifp->cur_query_intv_trig,
+ &sg->t_sg_query);
+
+ if (!IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest))
+ return;
+ if (gm_ifp->pim->gm_socket == -1)
+ return;
+
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_sg(sg, "triggered query"));
+
+ if (pim_addr_is_any(sg->sgaddr.src)) {
+ gm_send_query(gm_ifp, sg->sgaddr.grp, NULL, 0, sg->query_sbit);
+ return;
+ }
+
+ ref.grp = sg->sgaddr.grp;
+ ref.s_bit = sg->query_sbit;
+
+ pend_gsq = gm_gsq_pends_find(gm_ifp->gsq_pends, &ref);
+ if (!pend_gsq) {
+ pend_gsq = XCALLOC(MTYPE_GM_GSQ_PENDING, sizeof(*pend_gsq));
+ pend_gsq->grp = sg->sgaddr.grp;
+ pend_gsq->s_bit = sg->query_sbit;
+ pend_gsq->iface = gm_ifp;
+ gm_gsq_pends_add(gm_ifp->gsq_pends, pend_gsq);
+
+ thread_add_timer_tv(router->master, gm_t_gsq_pend, pend_gsq,
+ &gm_ifp->cfg_timing_fuzz,
+ &pend_gsq->t_send);
+ }
+
+ assert(pend_gsq->n_src < array_size(pend_gsq->srcs));
+
+ pend_gsq->srcs[pend_gsq->n_src] = sg->sgaddr.src;
+ pend_gsq->n_src++;
+
+ if (pend_gsq->n_src == array_size(pend_gsq->srcs)) {
+ THREAD_OFF(pend_gsq->t_send);
+ gm_send_specific(pend_gsq);
+ pend_gsq = NULL;
+ }
+}
+
+static void gm_vrf_socket_incref(struct pim_instance *pim)
+{
+ struct vrf *vrf = pim->vrf;
+ int ret, intval;
+ struct icmp6_filter filter[1];
+
+ if (pim->gm_socket_if_count++ && pim->gm_socket != -1)
+ return;
+
+ ICMP6_FILTER_SETBLOCKALL(filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_QUERY, filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_REPORT, filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_V1_DONE, filter);
+ ICMP6_FILTER_SETPASS(ICMP6_MLD_V2_REPORT, filter);
+
+ frr_with_privs (&pimd_privs) {
+ pim->gm_socket = vrf_socket(AF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
+ vrf->vrf_id, vrf->name);
+ if (pim->gm_socket < 0) {
+ zlog_err("(VRF %s) could not create MLD socket: %m",
+ vrf->name);
+ return;
+ }
+
+ ret = setsockopt(pim->gm_socket, SOL_ICMPV6, ICMP6_FILTER,
+ filter, sizeof(filter));
+ if (ret)
+ zlog_err("(VRF %s) failed to set ICMP6_FILTER: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVPKTINFO,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err("(VRF %s) failed to set IPV6_RECVPKTINFO: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPOPTS,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err("(VRF %s) failed to set IPV6_HOPOPTS: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_RECVHOPLIMIT,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err("(VRF %s) failed to set IPV6_HOPLIMIT: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_LOOP,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err(
+ "(VRF %s) failed to disable IPV6_MULTICAST_LOOP: %m",
+ vrf->name);
+
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_HOPS,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_err(
+ "(VRF %s) failed to set IPV6_MULTICAST_HOPS: %m",
+ vrf->name);
+
+ /* NB: IPV6_MULTICAST_ALL does not completely bypass multicast
+ * RX filtering in Linux. It only means "receive all groups
+ * that something on the system has joined". To actually
+ * receive *all* MLD packets - which is what we need -
+ * multicast routing must be enabled on the interface. And
+ * this only works for MLD packets specifically.
+ *
+ * For reference, check ip6_mc_input() in net/ipv6/ip6_input.c
+ * and in particular the #ifdef CONFIG_IPV6_MROUTE block there.
+ *
+ * Also note that the code there explicitly checks for the IPv6
+ * router alert MLD option (which is required by the RFC to be
+ * on MLD packets.) That implies trying to support hosts which
+ * erroneously don't add that option is just not possible.
+ */
+ intval = 1;
+ ret = setsockopt(pim->gm_socket, SOL_IPV6, IPV6_MULTICAST_ALL,
+ &intval, sizeof(intval));
+ if (ret)
+ zlog_info(
+ "(VRF %s) failed to set IPV6_MULTICAST_ALL: %m (OK on old kernels)",
+ vrf->name);
+ }
+
+ thread_add_read(router->master, gm_t_recv, pim, pim->gm_socket,
+ &pim->t_gm_recv);
+}
+
+static void gm_vrf_socket_decref(struct pim_instance *pim)
+{
+ if (--pim->gm_socket_if_count)
+ return;
+
+ THREAD_OFF(pim->t_gm_recv);
+ close(pim->gm_socket);
+ pim->gm_socket = -1;
+}
+
+static void gm_start(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+
+ assert(pim_ifp);
+ assert(pim_ifp->pim);
+ assert(pim_ifp->mroute_vif_index >= 0);
+ assert(!pim_ifp->mld);
+
+ gm_vrf_socket_incref(pim_ifp->pim);
+
+ gm_ifp = XCALLOC(MTYPE_GM_IFACE, sizeof(*gm_ifp));
+ gm_ifp->ifp = ifp;
+ pim_ifp->mld = gm_ifp;
+ gm_ifp->pim = pim_ifp->pim;
+ monotime(&gm_ifp->started);
+
+ zlog_info(log_ifp("starting MLD"));
+
+ if (pim_ifp->mld_version == 1)
+ gm_ifp->cur_version = GM_MLDV1;
+ else
+ gm_ifp->cur_version = GM_MLDV2;
+
+ /* hardcoded for dev without CLI */
+ gm_ifp->cur_qrv = 2;
+ gm_ifp->cur_query_intv = pim_ifp->gm_default_query_interval * 1000;
+ gm_ifp->cur_query_intv_trig = gm_ifp->cur_query_intv;
+ gm_ifp->cur_max_resp = 250;
+
+ gm_ifp->cfg_timing_fuzz.tv_sec = 0;
+ gm_ifp->cfg_timing_fuzz.tv_usec = 10 * 1000;
+
+ gm_sgs_init(gm_ifp->sgs);
+ gm_subscribers_init(gm_ifp->subscribers);
+ gm_packet_expires_init(gm_ifp->expires);
+ gm_grp_pends_init(gm_ifp->grp_pends);
+ gm_gsq_pends_init(gm_ifp->gsq_pends);
+
+ frr_with_privs (&pimd_privs) {
+ struct ipv6_mreq mreq;
+ int ret;
+
+ /* all-MLDv2 group */
+ mreq.ipv6mr_multiaddr = gm_all_routers;
+ mreq.ipv6mr_interface = ifp->ifindex;
+ ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
+ IPV6_JOIN_GROUP, &mreq, sizeof(mreq));
+ if (ret)
+ zlog_err("(%s) failed to join ff02::16 (all-MLDv2): %m",
+ ifp->name);
+ }
+}
+
+void gm_ifp_teardown(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+ struct gm_packet_state *pkt;
+ struct gm_grp_pending *pend_grp;
+ struct gm_gsq_pending *pend_gsq;
+ struct gm_subscriber *subscriber;
+ struct gm_sg *sg;
+
+ if (!pim_ifp || !pim_ifp->mld)
+ return;
+
+ gm_ifp = pim_ifp->mld;
+ gm_ifp->stopping = true;
+ if (PIM_DEBUG_IGMP_EVENTS)
+ zlog_debug(log_ifp("MLD stop"));
+
+ THREAD_OFF(gm_ifp->t_query);
+ THREAD_OFF(gm_ifp->t_other_querier);
+ THREAD_OFF(gm_ifp->t_expire);
+
+ frr_with_privs (&pimd_privs) {
+ struct ipv6_mreq mreq;
+ int ret;
+
+ /* all-MLDv2 group */
+ mreq.ipv6mr_multiaddr = gm_all_routers;
+ mreq.ipv6mr_interface = ifp->ifindex;
+ ret = setsockopt(gm_ifp->pim->gm_socket, SOL_IPV6,
+ IPV6_LEAVE_GROUP, &mreq, sizeof(mreq));
+ if (ret)
+ zlog_err(
+ "(%s) failed to leave ff02::16 (all-MLDv2): %m",
+ ifp->name);
+ }
+
+ gm_vrf_socket_decref(gm_ifp->pim);
+
+ while ((pkt = gm_packet_expires_first(gm_ifp->expires)))
+ gm_packet_drop(pkt, false);
+
+ while ((pend_grp = gm_grp_pends_pop(gm_ifp->grp_pends))) {
+ THREAD_OFF(pend_grp->t_expire);
+ XFREE(MTYPE_GM_GRP_PENDING, pend_grp);
+ }
+
+ while ((pend_gsq = gm_gsq_pends_pop(gm_ifp->gsq_pends))) {
+ THREAD_OFF(pend_gsq->t_send);
+ XFREE(MTYPE_GM_GSQ_PENDING, pend_gsq);
+ }
+
+ while ((sg = gm_sgs_pop(gm_ifp->sgs))) {
+ THREAD_OFF(sg->t_sg_expire);
+ assertf(!gm_packet_sg_subs_count(sg->subs_negative), "%pSG",
+ &sg->sgaddr);
+ assertf(!gm_packet_sg_subs_count(sg->subs_positive), "%pSG",
+ &sg->sgaddr);
+
+ gm_sg_free(sg);
+ }
+
+ while ((subscriber = gm_subscribers_pop(gm_ifp->subscribers))) {
+ assertf(!gm_packets_count(subscriber->packets), "%pPA",
+ &subscriber->addr);
+ XFREE(MTYPE_GM_SUBSCRIBER, subscriber);
+ }
+
+ gm_grp_pends_fini(gm_ifp->grp_pends);
+ gm_packet_expires_fini(gm_ifp->expires);
+ gm_subscribers_fini(gm_ifp->subscribers);
+ gm_sgs_fini(gm_ifp->sgs);
+
+ XFREE(MTYPE_GM_IFACE, gm_ifp);
+ pim_ifp->mld = NULL;
+}
+
+static void gm_update_ll(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp = pim_ifp ? pim_ifp->mld : NULL;
+ bool was_querier;
+
+ was_querier =
+ !IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) &&
+ !pim_addr_is_any(gm_ifp->querier);
+
+ gm_ifp->cur_ll_lowest = pim_ifp->ll_lowest;
+ if (was_querier)
+ gm_ifp->querier = pim_ifp->ll_lowest;
+ THREAD_OFF(gm_ifp->t_query);
+
+ if (pim_addr_is_any(gm_ifp->cur_ll_lowest)) {
+ if (was_querier)
+ zlog_info(log_ifp(
+ "lost link-local address, stopping querier"));
+ return;
+ }
+
+ if (was_querier)
+ zlog_info(log_ifp("new link-local %pPA while querier"),
+ &gm_ifp->cur_ll_lowest);
+ else if (IPV6_ADDR_CMP(&gm_ifp->cur_ll_lowest, &gm_ifp->querier) < 0 ||
+ pim_addr_is_any(gm_ifp->querier)) {
+ zlog_info(log_ifp("new link-local %pPA, becoming querier"),
+ &gm_ifp->cur_ll_lowest);
+ gm_ifp->querier = gm_ifp->cur_ll_lowest;
+ } else
+ return;
+
+ gm_ifp->n_startup = gm_ifp->cur_qrv;
+ thread_execute(router->master, gm_t_query, gm_ifp, 0);
+}
+
+void gm_ifp_update(struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = ifp->info;
+ struct gm_if *gm_ifp;
+ bool changed = false;
+
+ if (!pim_ifp)
+ return;
+ if (!if_is_operative(ifp) || !pim_ifp->pim ||
+ pim_ifp->mroute_vif_index < 0) {
+ gm_ifp_teardown(ifp);
+ return;
+ }
+
+ if (!pim_ifp->mld)
+ gm_start(ifp);
+
+ gm_ifp = pim_ifp->mld;
+ if (IPV6_ADDR_CMP(&pim_ifp->ll_lowest, &gm_ifp->cur_ll_lowest))
+ gm_update_ll(ifp);
+
+ unsigned int cfg_query_intv = pim_ifp->gm_default_query_interval * 1000;
+
+ if (gm_ifp->cur_query_intv != cfg_query_intv) {
+ gm_ifp->cur_query_intv = cfg_query_intv;
+ gm_ifp->cur_query_intv_trig = cfg_query_intv;
+ changed = true;
+ }
+
+ enum gm_version cfg_version;
+
+ if (pim_ifp->mld_version == 1)
+ cfg_version = GM_MLDV1;
+ else
+ cfg_version = GM_MLDV2;
+ if (gm_ifp->cur_version != cfg_version) {
+ gm_ifp->cur_version = cfg_version;
+ changed = true;
+ }
+
+ if (changed) {
+ if (PIM_DEBUG_IGMP_TRACE)
+ zlog_debug(log_ifp(
+ "MLD querier config changed, querying"));
+ gm_bump_querier(gm_ifp);
+ }
+}
+
+/*
+ * CLI (show commands only)
+ */
+
+#include "lib/command.h"
+
+#ifndef VTYSH_EXTRACT_PL
+#include "pimd/pim6_mld_clippy.c"
+#endif
+
+#define MLD_STR "Multicast Listener Discovery\n"
+
+static struct vrf *gm_cmd_vrf_lookup(struct vty *vty, const char *vrf_str,
+ int *err)
+{
+ struct vrf *ret;
+
+ if (!vrf_str)
+ return vrf_lookup_by_id(VRF_DEFAULT);
+ if (!strcmp(vrf_str, "all"))
+ return NULL;
+ ret = vrf_lookup_by_name(vrf_str);
+ if (ret)
+ return ret;
+
+ vty_out(vty, "%% VRF %pSQq does not exist\n", vrf_str);
+ *err = CMD_WARNING;
+ return NULL;
+}
+
+static void gm_show_if_one_detail(struct vty *vty, struct interface *ifp)
+{
+ struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
+ struct gm_if *gm_ifp;
+ bool querier;
+ size_t i;
+
+ if (!pim_ifp) {
+ vty_out(vty, "Interface %s: no PIM/MLD config\n\n", ifp->name);
+ return;
+ }
+
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp) {
+ vty_out(vty, "Interface %s: MLD not running\n\n", ifp->name);
+ return;
+ }
+
+ querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
+
+ vty_out(vty, "Interface %s: MLD running\n", ifp->name);
+ vty_out(vty, " Uptime: %pTVMs\n", &gm_ifp->started);
+ vty_out(vty, " MLD version: %d\n", gm_ifp->cur_version);
+ vty_out(vty, " Querier: %pPA%s\n", &gm_ifp->querier,
+ querier ? " (this system)" : "");
+ vty_out(vty, " Query timer: %pTH\n", gm_ifp->t_query);
+ vty_out(vty, " Other querier timer: %pTH\n",
+ gm_ifp->t_other_querier);
+ vty_out(vty, " Robustness value: %u\n", gm_ifp->cur_qrv);
+ vty_out(vty, " Query interval: %ums\n",
+ gm_ifp->cur_query_intv);
+ vty_out(vty, " Query response timer: %ums\n", gm_ifp->cur_max_resp);
+ vty_out(vty, " Last member query intv.: %ums\n",
+ gm_ifp->cur_query_intv_trig);
+ vty_out(vty, " %u expiry timers from general queries:\n",
+ gm_ifp->n_pending);
+ for (i = 0; i < gm_ifp->n_pending; i++) {
+ struct gm_general_pending *p = &gm_ifp->pending[i];
+
+ vty_out(vty, " %9pTVMs ago (query) -> %9pTVMu (expiry)\n",
+ &p->query, &p->expiry);
+ }
+ vty_out(vty, " %zu expiry timers from *,G queries\n",
+ gm_grp_pends_count(gm_ifp->grp_pends));
+ vty_out(vty, " %zu expiry timers from S,G queries\n",
+ gm_gsq_pends_count(gm_ifp->gsq_pends));
+ vty_out(vty, " %zu total *,G/S,G from %zu hosts in %zu bundles\n",
+ gm_sgs_count(gm_ifp->sgs),
+ gm_subscribers_count(gm_ifp->subscribers),
+ gm_packet_expires_count(gm_ifp->expires));
+ vty_out(vty, "\n");
+}
+
+static void gm_show_if_one(struct vty *vty, struct interface *ifp,
+ json_object *js_if)
+{
+ struct pim_interface *pim_ifp = (struct pim_interface *)ifp->info;
+ struct gm_if *gm_ifp = pim_ifp->mld;
+ bool querier;
+
+ if (!gm_ifp) {
+ if (js_if)
+ json_object_string_add(js_if, "state", "down");
+ else
+ vty_out(vty, "%-16s %5s\n", ifp->name, "down");
+ return;
+ }
+
+ querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
+
+ if (js_if) {
+ json_object_string_add(js_if, "state", "up");
+ json_object_string_addf(js_if, "version", "%d",
+ gm_ifp->cur_version);
+ json_object_string_addf(js_if, "upTime", "%pTVMs",
+ &gm_ifp->started);
+ json_object_boolean_add(js_if, "querier", querier);
+ json_object_string_addf(js_if, "querierIp", "%pPA",
+ &gm_ifp->querier);
+ if (querier)
+ json_object_string_addf(js_if, "queryTimer", "%pTH",
+ gm_ifp->t_query);
+ else
+ json_object_string_addf(js_if, "otherQuerierTimer",
+ "%pTH",
+ gm_ifp->t_other_querier);
+ } else {
+ vty_out(vty, "%-16s %-5s %d %-25pPA %-5s %11pTH %pTVMs\n",
+ ifp->name, "up", gm_ifp->cur_version, &gm_ifp->querier,
+ querier ? "query" : "other",
+ querier ? gm_ifp->t_query : gm_ifp->t_other_querier,
+ &gm_ifp->started);
+ }
+}
+
+static void gm_show_if_vrf(struct vty *vty, struct vrf *vrf, const char *ifname,
+ bool detail, json_object *js)
+{
+ struct interface *ifp;
+ json_object *js_vrf;
+
+ if (js) {
+ js_vrf = json_object_new_object();
+ json_object_object_add(js, vrf->name, js_vrf);
+ }
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ json_object *js_if = NULL;
+
+ if (ifname && strcmp(ifp->name, ifname))
+ continue;
+ if (detail && !js) {
+ gm_show_if_one_detail(vty, ifp);
+ continue;
+ }
+
+ if (!ifp->info)
+ continue;
+ if (js) {
+ js_if = json_object_new_object();
+ json_object_object_add(js_vrf, ifp->name, js_if);
+ }
+
+ gm_show_if_one(vty, ifp, js_if);
+ }
+}
+
+static void gm_show_if(struct vty *vty, struct vrf *vrf, const char *ifname,
+ bool detail, json_object *js)
+{
+ if (!js && !detail)
+ vty_out(vty, "%-16s %-5s V %-25s %-18s %s\n", "Interface",
+ "State", "Querier", "Timer", "Uptime");
+
+ if (vrf)
+ gm_show_if_vrf(vty, vrf, ifname, detail, js);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_if_vrf(vty, vrf, ifname, detail, js);
+}
+
+DEFPY(gm_show_interface,
+ gm_show_interface_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] interface [IFNAME] [detail$detail|json$json]",
+ DEBUG_STR
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ "MLD interface information\n"
+ "Detailed output\n"
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+ json_object *js = NULL;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (json)
+ js = json_object_new_object();
+ gm_show_if(vty, vrf, ifname, !!detail, js);
+ return vty_json(vty, js);
+}
+
+static void gm_show_stats_one(struct vty *vty, struct gm_if *gm_ifp,
+ json_object *js_if)
+{
+ struct gm_if_stats *stats = &gm_ifp->stats;
+ /* clang-format off */
+ struct {
+ const char *text;
+ const char *js_key;
+ uint64_t *val;
+ } *item, items[] = {
+ { "v2 reports received", "rxV2Reports", &stats->rx_new_report },
+ { "v1 reports received", "rxV1Reports", &stats->rx_old_report },
+ { "v1 done received", "rxV1Done", &stats->rx_old_leave },
+
+ { "v2 *,* queries received", "rxV2QueryGeneral", &stats->rx_query_new_general },
+ { "v2 *,G queries received", "rxV2QueryGroup", &stats->rx_query_new_group },
+ { "v2 S,G queries received", "rxV2QueryGroupSource", &stats->rx_query_new_groupsrc },
+ { "v2 S-bit queries received", "rxV2QuerySBit", &stats->rx_query_new_sbit },
+ { "v1 *,* queries received", "rxV1QueryGeneral", &stats->rx_query_old_general },
+ { "v1 *,G queries received", "rxV1QueryGroup", &stats->rx_query_old_group },
+
+ { "v2 *,* queries sent", "txV2QueryGeneral", &stats->tx_query_new_general },
+ { "v2 *,G queries sent", "txV2QueryGroup", &stats->tx_query_new_group },
+ { "v2 S,G queries sent", "txV2QueryGroupSource", &stats->tx_query_new_groupsrc },
+ { "v1 *,* queries sent", "txV1QueryGeneral", &stats->tx_query_old_general },
+ { "v1 *,G queries sent", "txV1QueryGroup", &stats->tx_query_old_group },
+ { "TX errors", "txErrors", &stats->tx_query_fail },
+
+ { "RX dropped (checksum error)", "rxDropChecksum", &stats->rx_drop_csum },
+ { "RX dropped (invalid source)", "rxDropSrcAddr", &stats->rx_drop_srcaddr },
+ { "RX dropped (invalid dest.)", "rxDropDstAddr", &stats->rx_drop_dstaddr },
+ { "RX dropped (missing alert)", "rxDropRtrAlert", &stats->rx_drop_ra },
+ { "RX dropped (malformed pkt.)", "rxDropMalformed", &stats->rx_drop_malformed },
+ { "RX truncated reports", "rxTruncatedRep", &stats->rx_trunc_report },
+ };
+ /* clang-format on */
+
+ for (item = items; item < items + array_size(items); item++) {
+ if (js_if)
+ json_object_int_add(js_if, item->js_key, *item->val);
+ else
+ vty_out(vty, " %-30s %" PRIu64 "\n", item->text,
+ *item->val);
+ }
+}
+
+static void gm_show_stats_vrf(struct vty *vty, struct vrf *vrf,
+ const char *ifname, json_object *js)
+{
+ struct interface *ifp;
+ json_object *js_vrf;
+
+ if (js) {
+ js_vrf = json_object_new_object();
+ json_object_object_add(js, vrf->name, js_vrf);
+ }
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+ json_object *js_if = NULL;
+
+ if (ifname && strcmp(ifp->name, ifname))
+ continue;
+
+ if (!ifp->info)
+ continue;
+ pim_ifp = ifp->info;
+ if (!pim_ifp->mld)
+ continue;
+ gm_ifp = pim_ifp->mld;
+
+ if (js) {
+ js_if = json_object_new_object();
+ json_object_object_add(js_vrf, ifp->name, js_if);
+ } else {
+ vty_out(vty, "Interface: %s\n", ifp->name);
+ }
+ gm_show_stats_one(vty, gm_ifp, js_if);
+ if (!js)
+ vty_out(vty, "\n");
+ }
+}
+
+DEFPY(gm_show_interface_stats,
+ gm_show_interface_stats_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] statistics [interface IFNAME] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ "MLD statistics\n"
+ INTERFACE_STR
+ "Interface name\n"
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+ json_object *js = NULL;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (json)
+ js = json_object_new_object();
+
+ if (vrf)
+ gm_show_stats_vrf(vty, vrf, ifname, js);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_stats_vrf(vty, vrf, ifname, js);
+ return vty_json(vty, js);
+}
+
+static void gm_show_joins_one(struct vty *vty, struct gm_if *gm_ifp,
+ const struct prefix_ipv6 *groups,
+ const struct prefix_ipv6 *sources, bool detail,
+ json_object *js_if)
+{
+ struct gm_sg *sg, *sg_start;
+ json_object *js_group = NULL;
+ pim_addr js_grpaddr = PIMADDR_ANY;
+ struct gm_subscriber sub_ref = {}, *sub_untracked;
+
+ if (groups) {
+ struct gm_sg sg_ref = {};
+
+ sg_ref.sgaddr.grp = pim_addr_from_prefix(groups);
+ sg_start = gm_sgs_find_gteq(gm_ifp->sgs, &sg_ref);
+ } else
+ sg_start = gm_sgs_first(gm_ifp->sgs);
+
+ sub_ref.addr = gm_dummy_untracked;
+ sub_untracked = gm_subscribers_find(gm_ifp->subscribers, &sub_ref);
+ /* NB: sub_untracked may be NULL if no untracked joins exist */
+
+ frr_each_from (gm_sgs, gm_ifp->sgs, sg, sg_start) {
+ struct timeval *recent = NULL, *untracked = NULL;
+ json_object *js_src;
+
+ if (groups) {
+ struct prefix grp_p;
+
+ pim_addr_to_prefix(&grp_p, sg->sgaddr.grp);
+ if (!prefix_match(groups, &grp_p))
+ break;
+ }
+
+ if (sources) {
+ struct prefix src_p;
+
+ pim_addr_to_prefix(&src_p, sg->sgaddr.src);
+ if (!prefix_match(sources, &src_p))
+ continue;
+ }
+
+ if (sg->most_recent) {
+ struct gm_packet_state *packet;
+
+ packet = gm_packet_sg2state(sg->most_recent);
+ recent = &packet->received;
+ }
+
+ if (sub_untracked) {
+ struct gm_packet_state *packet;
+ struct gm_packet_sg *item;
+
+ item = gm_packet_sg_find(sg, GM_SUB_POS, sub_untracked);
+ if (item) {
+ packet = gm_packet_sg2state(item);
+ untracked = &packet->received;
+ }
+ }
+
+ if (!js_if) {
+ FMT_NSTD_BEGIN; /* %.0p */
+ vty_out(vty,
+ "%-30pPA %-30pPAs %-16s %10.0pTVMs %10.0pTVMs %10.0pTVMs\n",
+ &sg->sgaddr.grp, &sg->sgaddr.src,
+ gm_states[sg->state], recent, untracked,
+ &sg->created);
+
+ if (!detail)
+ continue;
+
+ struct gm_packet_sg *item;
+ struct gm_packet_state *packet;
+
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ packet = gm_packet_sg2state(item);
+
+ if (packet->subscriber == sub_untracked)
+ continue;
+ vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
+ &packet->subscriber->addr, "(JOIN)",
+ &packet->received);
+ }
+ frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
+ packet = gm_packet_sg2state(item);
+
+ if (packet->subscriber == sub_untracked)
+ continue;
+ vty_out(vty, " %-58pPA %-16s %10.0pTVMs\n",
+ &packet->subscriber->addr, "(PRUNE)",
+ &packet->received);
+ }
+ FMT_NSTD_END; /* %.0p */
+ continue;
+ }
+ /* if (js_if) */
+
+ if (!js_group || pim_addr_cmp(js_grpaddr, sg->sgaddr.grp)) {
+ js_group = json_object_new_object();
+ json_object_object_addf(js_if, js_group, "%pPA",
+ &sg->sgaddr.grp);
+ js_grpaddr = sg->sgaddr.grp;
+ }
+
+ js_src = json_object_new_object();
+ json_object_object_addf(js_group, js_src, "%pPA",
+ &sg->sgaddr.src);
+
+ json_object_string_add(js_src, "state", gm_states[sg->state]);
+ json_object_string_addf(js_src, "created", "%pTVMs",
+ &sg->created);
+ json_object_string_addf(js_src, "lastSeen", "%pTVMs", recent);
+
+ if (untracked)
+ json_object_string_addf(js_src, "untrackedLastSeen",
+ "%pTVMs", untracked);
+ if (!detail)
+ continue;
+
+ json_object *js_subs;
+ struct gm_packet_sg *item;
+ struct gm_packet_state *packet;
+
+ js_subs = json_object_new_object();
+ json_object_object_add(js_src, "joinedBy", js_subs);
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ packet = gm_packet_sg2state(item);
+ if (packet->subscriber == sub_untracked)
+ continue;
+
+ json_object *js_sub;
+
+ js_sub = json_object_new_object();
+ json_object_object_addf(js_subs, js_sub, "%pPA",
+ &packet->subscriber->addr);
+ json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
+ &packet->received);
+ }
+
+ js_subs = json_object_new_object();
+ json_object_object_add(js_src, "prunedBy", js_subs);
+ frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
+ packet = gm_packet_sg2state(item);
+ if (packet->subscriber == sub_untracked)
+ continue;
+
+ json_object *js_sub;
+
+ js_sub = json_object_new_object();
+ json_object_object_addf(js_subs, js_sub, "%pPA",
+ &packet->subscriber->addr);
+ json_object_string_addf(js_sub, "lastSeen", "%pTVMs",
+ &packet->received);
+ }
+ }
+}
+
+static void gm_show_joins_vrf(struct vty *vty, struct vrf *vrf,
+ const char *ifname,
+ const struct prefix_ipv6 *groups,
+ const struct prefix_ipv6 *sources, bool detail,
+ json_object *js)
+{
+ struct interface *ifp;
+ json_object *js_vrf;
+
+ if (js) {
+ js_vrf = json_object_new_object();
+ json_object_object_add(js, vrf->name, js_vrf);
+ }
+
+ FOR_ALL_INTERFACES (vrf, ifp) {
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+ json_object *js_if = NULL;
+
+ if (ifname && strcmp(ifp->name, ifname))
+ continue;
+
+ if (!ifp->info)
+ continue;
+ pim_ifp = ifp->info;
+ if (!pim_ifp->mld)
+ continue;
+ gm_ifp = pim_ifp->mld;
+
+ if (js) {
+ js_if = json_object_new_object();
+ json_object_object_add(js_vrf, ifp->name, js_if);
+ }
+
+ if (!js && !ifname)
+ vty_out(vty, "\nOn interface %s:\n", ifp->name);
+
+ gm_show_joins_one(vty, gm_ifp, groups, sources, detail, js_if);
+ }
+}
+
+DEFPY(gm_show_interface_joins,
+ gm_show_interface_joins_cmd,
+ "show ipv6 mld [vrf <VRF|all>$vrf_str] joins [{interface IFNAME|groups X:X::X:X/M|sources X:X::X:X/M|detail$detail}] [json$json]",
+ SHOW_STR
+ IPV6_STR
+ MLD_STR
+ VRF_FULL_CMD_HELP_STR
+ "MLD joined groups & sources\n"
+ INTERFACE_STR
+ "Interface name\n"
+ "Limit output to group range\n"
+ "Show groups covered by this prefix\n"
+ "Limit output to source range\n"
+ "Show sources covered by this prefix\n"
+ "Show details, including tracked receivers\n"
+ JSON_STR)
+{
+ int ret = CMD_SUCCESS;
+ struct vrf *vrf;
+ json_object *js = NULL;
+
+ vrf = gm_cmd_vrf_lookup(vty, vrf_str, &ret);
+ if (ret != CMD_SUCCESS)
+ return ret;
+
+ if (json)
+ js = json_object_new_object();
+ else
+ vty_out(vty, "%-30s %-30s %-16s %10s %10s %10s\n", "Group",
+ "Source", "State", "LastSeen", "NonTrkSeen", "Created");
+
+ if (vrf)
+ gm_show_joins_vrf(vty, vrf, ifname, groups, sources, !!detail,
+ js);
+ else
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name)
+ gm_show_joins_vrf(vty, vrf, ifname, groups, sources,
+ !!detail, js);
+ return vty_json(vty, js);
+}
+
+DEFPY(gm_debug_show,
+ gm_debug_show_cmd,
+ "debug show mld interface IFNAME",
+ DEBUG_STR
+ SHOW_STR
+ "MLD"
+ INTERFACE_STR
+ "interface name")
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+
+ ifp = if_lookup_by_name(ifname, VRF_DEFAULT);
+ if (!ifp) {
+ vty_out(vty, "%% no such interface: %pSQq\n", ifname);
+ return CMD_WARNING;
+ }
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ vty_out(vty, "%% no PIM state for interface %pSQq\n", ifname);
+ return CMD_WARNING;
+ }
+
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp) {
+ vty_out(vty, "%% no MLD state for interface %pSQq\n", ifname);
+ return CMD_WARNING;
+ }
+
+ vty_out(vty, "querier: %pPA\n", &gm_ifp->querier);
+ vty_out(vty, "ll_lowest: %pPA\n\n", &pim_ifp->ll_lowest);
+ vty_out(vty, "t_query: %pTHD\n", gm_ifp->t_query);
+ vty_out(vty, "t_other_querier: %pTHD\n", gm_ifp->t_other_querier);
+ vty_out(vty, "t_expire: %pTHD\n", gm_ifp->t_expire);
+
+ vty_out(vty, "\nn_pending: %u\n", gm_ifp->n_pending);
+ for (size_t i = 0; i < gm_ifp->n_pending; i++) {
+ int64_t query, expiry;
+
+ query = monotime_since(&gm_ifp->pending[i].query, NULL);
+ expiry = monotime_until(&gm_ifp->pending[i].expiry, NULL);
+
+ vty_out(vty, "[%zu]: query %"PRId64"ms ago, expiry in %"PRId64"ms\n",
+ i, query / 1000, expiry / 1000);
+ }
+
+ struct gm_sg *sg;
+ struct gm_packet_state *pkt;
+ struct gm_packet_sg *item;
+ struct gm_subscriber *subscriber;
+
+ vty_out(vty, "\n%zu S,G entries:\n", gm_sgs_count(gm_ifp->sgs));
+ frr_each (gm_sgs, gm_ifp->sgs, sg) {
+ vty_out(vty, "\t%pSG t_expire=%pTHD\n", &sg->sgaddr,
+ sg->t_sg_expire);
+
+ vty_out(vty, "\t @pos:%zu\n",
+ gm_packet_sg_subs_count(sg->subs_positive));
+ frr_each (gm_packet_sg_subs, sg->subs_positive, item) {
+ pkt = gm_packet_sg2state(item);
+
+ vty_out(vty, "\t\t+%s%s [%pPAs %p] %p+%u\n",
+ item->is_src ? "S" : "",
+ item->is_excl ? "E" : "",
+ &pkt->subscriber->addr, pkt->subscriber, pkt,
+ item->offset);
+
+ assert(item->sg == sg);
+ }
+ vty_out(vty, "\t @neg:%zu\n",
+ gm_packet_sg_subs_count(sg->subs_negative));
+ frr_each (gm_packet_sg_subs, sg->subs_negative, item) {
+ pkt = gm_packet_sg2state(item);
+
+ vty_out(vty, "\t\t-%s%s [%pPAs %p] %p+%u\n",
+ item->is_src ? "S" : "",
+ item->is_excl ? "E" : "",
+ &pkt->subscriber->addr, pkt->subscriber, pkt,
+ item->offset);
+
+ assert(item->sg == sg);
+ }
+ }
+
+ vty_out(vty, "\n%zu subscribers:\n",
+ gm_subscribers_count(gm_ifp->subscribers));
+ frr_each (gm_subscribers, gm_ifp->subscribers, subscriber) {
+ vty_out(vty, "\t%pPA %p %zu packets\n", &subscriber->addr,
+ subscriber, gm_packets_count(subscriber->packets));
+
+ frr_each (gm_packets, subscriber->packets, pkt) {
+ vty_out(vty, "\t\t%p %.3fs ago %u of %u items active\n",
+ pkt,
+ monotime_since(&pkt->received, NULL) *
+ 0.000001f,
+ pkt->n_active, pkt->n_sg);
+
+ for (size_t i = 0; i < pkt->n_sg; i++) {
+ item = pkt->items + i;
+
+ vty_out(vty, "\t\t[%zu]", i);
+
+ if (!item->sg) {
+ vty_out(vty, " inactive\n");
+ continue;
+ }
+
+ vty_out(vty, " %s%s %pSG nE=%u\n",
+ item->is_src ? "S" : "",
+ item->is_excl ? "E" : "",
+ &item->sg->sgaddr, item->n_exclude);
+ }
+ }
+ }
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(gm_debug_iface_cfg,
+ gm_debug_iface_cfg_cmd,
+ "debug ipv6 mld {"
+ "robustness (0-7)|"
+ "query-max-response-time (1-8387584)"
+ "}",
+ DEBUG_STR
+ IPV6_STR
+ "Multicast Listener Discovery\n"
+ "QRV\nQRV\n"
+ "maxresp\nmaxresp\n")
+{
+ VTY_DECLVAR_CONTEXT(interface, ifp);
+ struct pim_interface *pim_ifp;
+ struct gm_if *gm_ifp;
+ bool changed = false;
+
+ pim_ifp = ifp->info;
+ if (!pim_ifp) {
+ vty_out(vty, "%% no PIM state for interface %pSQq\n",
+ ifp->name);
+ return CMD_WARNING;
+ }
+ gm_ifp = pim_ifp->mld;
+ if (!gm_ifp) {
+ vty_out(vty, "%% no MLD state for interface %pSQq\n",
+ ifp->name);
+ return CMD_WARNING;
+ }
+
+ if (robustness_str && gm_ifp->cur_qrv != robustness) {
+ gm_ifp->cur_qrv = robustness;
+ changed = true;
+ }
+ if (query_max_response_time_str &&
+ gm_ifp->cur_max_resp != (unsigned int)query_max_response_time) {
+ gm_ifp->cur_max_resp = query_max_response_time;
+ changed = true;
+ }
+
+ if (changed) {
+ vty_out(vty, "%% MLD querier config changed, bumping\n");
+ gm_bump_querier(gm_ifp);
+ }
+ return CMD_SUCCESS;
+}
+
+void gm_cli_init(void);
+
+void gm_cli_init(void)
+{
+ install_element(VIEW_NODE, &gm_show_interface_cmd);
+ install_element(VIEW_NODE, &gm_show_interface_stats_cmd);
+ install_element(VIEW_NODE, &gm_show_interface_joins_cmd);
+
+ install_element(VIEW_NODE, &gm_debug_show_cmd);
+ install_element(INTERFACE_NODE, &gm_debug_iface_cfg_cmd);
+}
--- /dev/null
+/*
+ * PIMv6 MLD querier
+ * Copyright (C) 2021-2022 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef PIM6_MLD_H
+#define PIM6_MLD_H
+
+#include "typesafe.h"
+#include "pim_addr.h"
+
+struct thread;
+struct pim_instance;
+struct gm_packet_sg;
+struct gm_if;
+struct channel_oil;
+
+#define MLD_DEFAULT_VERSION 2
+
+/* see comment below on subs_negative/subs_positive */
+enum gm_sub_sense {
+ /* negative/pruning: S,G in EXCLUDE */
+ GM_SUB_NEG = 0,
+ /* positive/joining: *,G in EXCLUDE and S,G in INCLUDE */
+ GM_SUB_POS = 1,
+};
+
+enum gm_sg_state {
+ GM_SG_NOINFO = 0,
+ GM_SG_JOIN,
+ GM_SG_JOIN_EXPIRING,
+ /* remaining 3 only valid for S,G when *,G in EXCLUDE */
+ GM_SG_PRUNE,
+ GM_SG_NOPRUNE,
+ GM_SG_NOPRUNE_EXPIRING,
+};
+
+static inline bool gm_sg_state_want_join(enum gm_sg_state state)
+{
+ return state != GM_SG_NOINFO && state != GM_SG_PRUNE;
+}
+
+/* MLD (S,G) state (on an interface)
+ *
+ * group is always != ::, src is :: for (*,G) joins. sort order in RB tree is
+ * such that sources for a particular group can be iterated by starting at the
+ * group. For INCLUDE, no (*,G) entry exists, only (S,G).
+ */
+
+PREDECL_RBTREE_UNIQ(gm_packet_sg_subs);
+PREDECL_RBTREE_UNIQ(gm_sgs);
+struct gm_sg {
+ pim_sgaddr sgaddr;
+ struct gm_if *iface;
+ struct gm_sgs_item itm;
+
+ enum gm_sg_state state;
+ struct channel_oil *oil;
+ bool tib_joined;
+
+ struct timeval created;
+
+ /* if a group- or group-and-source specific query is running
+ * (implies we haven't received any report yet, since it's cancelled
+ * by that)
+ */
+ struct thread *t_sg_expire;
+
+ /* last-member-left triggered queries (group/group-source specific)
+ *
+ * this timer will be running even if we aren't the elected querier,
+ * in case the election result changes midway through.
+ */
+ struct thread *t_sg_query;
+
+ /* we must keep sending (QRV) queries even if we get a positive
+ * response, to make sure other routers are updated. query_sbit
+ * will be set in that case, since other routers need the *response*,
+ * not the *query*
+ */
+ uint8_t n_query;
+ bool query_sbit;
+
+ /* subs_positive tracks gm_packet_sg resulting in a JOIN, i.e. for
+ * (*,G) it has *EXCLUDE* items, for (S,G) it has *INCLUDE* items.
+ *
+ * subs_negative is always empty for (*,G) and tracks EXCLUDE items
+ * for (S,G). This means that an (S,G) entry is active as a PRUNE if
+ * len(src->subs_negative) == len(grp->subs_positive)
+ * && len(src->subs_positive) == 0
+ * (i.e. all receivers for the group opted to exclude this S,G and
+ * noone did an SSM join for the S,G)
+ */
+ union {
+ struct {
+ struct gm_packet_sg_subs_head subs_negative[1];
+ struct gm_packet_sg_subs_head subs_positive[1];
+ };
+ struct gm_packet_sg_subs_head subs[2];
+ };
+
+ /* If the elected querier is not ourselves, queries and reports might
+ * get reordered in rare circumstances, i.e. the report could arrive
+ * just a microsecond before the query kicks off the timer. This can
+ * then result in us thinking there are no more receivers since no
+ * report might be received during the query period.
+ *
+ * To avoid this, keep track of the most recent report for this (S,G)
+ * so we can do a quick check to add just a little bit of slack.
+ *
+ * EXCLUDE S,Gs are never in most_recent.
+ */
+ struct gm_packet_sg *most_recent;
+};
+
+/* host tracking entry. addr will be one of:
+ *
+ * :: - used by hosts during address acquisition
+ * ::1 - may show up on some OS for joins by the router itself
+ * link-local - regular operation by MLDv2 hosts
+ * ffff:..:ffff - MLDv1 entry (cannot be tracked due to report suppression)
+ *
+ * global scope IPv6 addresses can never show up here
+ */
+PREDECL_HASH(gm_subscribers);
+PREDECL_DLIST(gm_packets);
+struct gm_subscriber {
+ pim_addr addr;
+ struct gm_subscribers_item itm;
+
+ struct gm_if *iface;
+ size_t refcount;
+
+ struct gm_packets_head packets[1];
+
+ struct timeval created;
+};
+
+/*
+ * MLD join state is kept batched by packet. Since the timers for all items
+ * in a packet are the same, this reduces the number of timers we're keeping
+ * track of. It also eases tracking for EXCLUDE state groups because the
+ * excluded sources are in the same packet. (MLD does not support splitting
+ * that if it exceeds MTU, it's always a full replace for exclude.)
+ *
+ * Since packets may be partially superseded by newer packets, the "active"
+ * field is used to track this.
+ */
+
+/* gm_packet_sg is allocated as part of gm_packet_state, note the items[0]
+ * array at the end of that. gm_packet_sg is NEVER directly allocated with
+ * XMALLOC/XFREE.
+ */
+struct gm_packet_sg {
+ /* non-NULL as long as this gm_packet_sg is the most recent entry
+ * for (subscriber,S,G). Cleared to NULL when a newer packet by the
+ * subscriber replaces this item.
+ *
+ * (Old items are kept around so we don't need to realloc/resize
+ * gm_packet_state, which would mess up a whole lot of pointers)
+ */
+ struct gm_sg *sg;
+
+ /* gm_sg -> (subscriber, gm_packet_sg)
+ * only on RB-tree while sg != NULL, i.e. not superseded by newer.
+ */
+ struct gm_packet_sg_subs_item subs_itm;
+
+ bool is_src : 1; /* := (src != ::) */
+ bool is_excl : 1;
+
+ /* for getting back to struct gm_packet_state, cf.
+ * gm_packet_sg2state() below
+ */
+ uint16_t offset;
+
+ /* if this is a group entry in EXCLUDE state, n_exclude counts how
+ * many sources are on the exclude list here. They follow immediately
+ * after.
+ */
+ uint16_t n_exclude;
+};
+
+#define gm_packet_sg2state(sg) \
+ container_of(sg, struct gm_packet_state, items[sg->offset])
+
+PREDECL_DLIST(gm_packet_expires);
+struct gm_packet_state {
+ struct gm_if *iface;
+ struct gm_subscriber *subscriber;
+ struct gm_packets_item pkt_itm;
+
+ struct timeval received;
+ struct gm_packet_expires_item exp_itm;
+
+ /* n_active starts equal to n_sg; whenever active is set to false on
+ * an item it is decremented. When n_active == 0, the packet can be
+ * freed.
+ */
+ uint16_t n_sg, n_active;
+ struct gm_packet_sg items[0];
+};
+
+/* general queries are rather different from group/S,G specific queries; it's
+ * not particularly efficient or useful to try to shoehorn them into the S,G
+ * timers. Instead, we keep a history of recent queries and their implied
+ * expiries.
+ */
+struct gm_general_pending {
+ struct timeval query, expiry;
+};
+
+/* similarly, group queries also age out S,G entries for the group, but in
+ * this case we only keep one query for each group
+ *
+ * why is this not in the *,G gm_sg? There may not be one (for INCLUDE mode
+ * groups, or groups we don't know about.) Also, malicious clients could spam
+ * random group-specific queries to trigger resource exhaustion, so it makes
+ * sense to limit these.
+ */
+PREDECL_RBTREE_UNIQ(gm_grp_pends);
+struct gm_grp_pending {
+ struct gm_grp_pends_item itm;
+ struct gm_if *iface;
+ pim_addr grp;
+
+ struct timeval query;
+ struct thread *t_expire;
+};
+
+/* guaranteed MTU for IPv6 is 1280 bytes. IPv6 header is 40 bytes, MLDv2
+ * query header is 24 bytes, RA option is 8 bytes - leaves 1208 bytes for the
+ * source list, which is 151 IPv6 addresses. But we may have some more IPv6
+ * extension headers (e.g. IPsec AH), so just cap to 128
+ */
+#define MLD_V2Q_MTU_MAX_SOURCES 128
+
+/* group-and-source-specific queries are bundled together, if some host joins
+ * multiple sources it's likely to drop all at the same time.
+ *
+ * Unlike gm_grp_pending, this is only used for aggregation since the S,G
+ * state is kept directly in the gm_sg structure.
+ */
+PREDECL_HASH(gm_gsq_pends);
+struct gm_gsq_pending {
+ struct gm_gsq_pends_item itm;
+
+ struct gm_if *iface;
+ struct thread *t_send;
+
+ pim_addr grp;
+ bool s_bit;
+
+ size_t n_src;
+ pim_addr srcs[MLD_V2Q_MTU_MAX_SOURCES];
+};
+
+
+/* The size of this history is limited by QRV, i.e. there can't be more than
+ * 8 items here.
+ */
+#define GM_MAX_PENDING 8
+
+enum gm_version {
+ GM_NONE,
+ GM_MLDV1,
+ GM_MLDV2,
+};
+
+struct gm_if_stats {
+ uint64_t rx_drop_csum;
+ uint64_t rx_drop_srcaddr;
+ uint64_t rx_drop_dstaddr;
+ uint64_t rx_drop_ra;
+ uint64_t rx_drop_malformed;
+ uint64_t rx_trunc_report;
+
+ /* since the types are different, this is rx_old_* not of rx_*_old */
+ uint64_t rx_old_report;
+ uint64_t rx_old_leave;
+ uint64_t rx_new_report;
+
+ uint64_t rx_query_new_general;
+ uint64_t rx_query_new_group;
+ uint64_t rx_query_new_groupsrc;
+ uint64_t rx_query_new_sbit;
+ uint64_t rx_query_old_general;
+ uint64_t rx_query_old_group;
+
+ uint64_t tx_query_new_general;
+ uint64_t tx_query_new_group;
+ uint64_t tx_query_new_groupsrc;
+ uint64_t tx_query_old_general;
+ uint64_t tx_query_old_group;
+
+ uint64_t tx_query_fail;
+};
+
+struct gm_if {
+ struct interface *ifp;
+ struct pim_instance *pim;
+ struct thread *t_query, *t_other_querier, *t_expire;
+
+ bool stopping;
+
+ uint8_t n_startup;
+
+ uint8_t cur_qrv;
+ unsigned int cur_query_intv; /* ms */
+ unsigned int cur_query_intv_trig; /* ms */
+ unsigned int cur_max_resp; /* ms */
+ enum gm_version cur_version;
+
+ /* this value (positive, default 10ms) defines our "timing tolerance":
+ * - added to deadlines for expiring joins
+ * - used to look backwards in time for queries, in case a report was
+ * reordered before the query
+ */
+ struct timeval cfg_timing_fuzz;
+
+ /* items in pending[] are sorted by expiry, pending[0] is earliest */
+ struct gm_general_pending pending[GM_MAX_PENDING];
+ uint8_t n_pending;
+ struct gm_grp_pends_head grp_pends[1];
+ struct gm_gsq_pends_head gsq_pends[1];
+
+ pim_addr querier;
+ pim_addr cur_ll_lowest;
+
+ struct gm_sgs_head sgs[1];
+ struct gm_subscribers_head subscribers[1];
+ struct gm_packet_expires_head expires[1];
+
+ struct timeval started;
+ struct gm_if_stats stats;
+};
+
+#if PIM_IPV == 6
+extern void gm_ifp_update(struct interface *ifp);
+extern void gm_ifp_teardown(struct interface *ifp);
+#else
+static inline void gm_ifp_update(struct interface *ifp)
+{
+}
+
+static inline void gm_ifp_teardown(struct interface *ifp)
+{
+}
+#endif
+
+extern void gm_cli_init(void);
+
+#endif /* PIM6_MLD_H */
--- /dev/null
+/*
+ * MLD protocol definitions
+ * Copyright (C) 2022 David Lamparter for NetDEF, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; see the file COPYING; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _PIM6_MLD_PROTOCOL_H
+#define _PIM6_MLD_PROTOCOL_H
+
+#include <stdalign.h>
+#include <stdint.h>
+
+/* There is a struct icmp6_hdr provided by OS, but it includes 4 bytes of data.
+ * Not helpful for us if we want to put the MLD struct after it.
+ */
+
+struct icmp6_plain_hdr {
+ uint8_t icmp6_type;
+ uint8_t icmp6_code;
+ uint16_t icmp6_cksum;
+};
+static_assert(sizeof(struct icmp6_plain_hdr) == 4, "struct mismatch");
+static_assert(alignof(struct icmp6_plain_hdr) <= 4, "struct mismatch");
+
+/* for MLDv1 query, report and leave all use the same packet format */
+struct mld_v1_pkt {
+ uint16_t max_resp_code;
+ uint16_t rsvd0;
+ struct in6_addr grp;
+};
+static_assert(sizeof(struct mld_v1_pkt) == 20, "struct mismatch");
+static_assert(alignof(struct mld_v1_pkt) <= 4, "struct mismatch");
+
+
+struct mld_v2_query_hdr {
+ uint16_t max_resp_code;
+ uint16_t rsvd0;
+ struct in6_addr grp;
+ uint8_t flags;
+ uint8_t qqic;
+ uint16_t n_src;
+ struct in6_addr srcs[0];
+};
+static_assert(sizeof(struct mld_v2_query_hdr) == 24, "struct mismatch");
+static_assert(alignof(struct mld_v2_query_hdr) <= 4, "struct mismatch");
+
+
+struct mld_v2_report_hdr {
+ uint16_t rsvd;
+ uint16_t n_records;
+};
+static_assert(sizeof(struct mld_v2_report_hdr) == 4, "struct mismatch");
+static_assert(alignof(struct mld_v2_report_hdr) <= 4, "struct mismatch");
+
+
+struct mld_v2_rec_hdr {
+ uint8_t type;
+ uint8_t aux_len;
+ uint16_t n_src;
+ struct in6_addr grp;
+ struct in6_addr srcs[0];
+};
+static_assert(sizeof(struct mld_v2_rec_hdr) == 20, "struct mismatch");
+static_assert(alignof(struct mld_v2_rec_hdr) <= 4, "struct mismatch");
+
+/* clang-format off */
+enum icmp6_mld_type {
+ ICMP6_MLD_QUERY = 130,
+ ICMP6_MLD_V1_REPORT = 131,
+ ICMP6_MLD_V1_DONE = 132,
+ ICMP6_MLD_V2_REPORT = 143,
+};
+
+enum mld_v2_rec_type {
+ MLD_RECTYPE_IS_INCLUDE = 1,
+ MLD_RECTYPE_IS_EXCLUDE = 2,
+ MLD_RECTYPE_CHANGE_TO_INCLUDE = 3,
+ MLD_RECTYPE_CHANGE_TO_EXCLUDE = 4,
+ MLD_RECTYPE_ALLOW_NEW_SOURCES = 5,
+ MLD_RECTYPE_BLOCK_OLD_SOURCES = 6,
+};
+/* clang-format on */
+
+/* helper functions */
+
+static inline unsigned int mld_max_resp_decode(uint16_t wire)
+{
+ uint16_t code = ntohs(wire);
+ uint8_t exp;
+
+ if (code < 0x8000)
+ return code;
+ exp = (code >> 12) & 0x7;
+ return ((code & 0xfff) | 0x1000) << (exp + 3);
+}
+
+static inline uint16_t mld_max_resp_encode(uint32_t value)
+{
+ uint16_t code;
+ uint8_t exp;
+
+ if (value < 0x8000)
+ code = value;
+ else {
+ exp = 16 - __builtin_clz(value);
+ code = (value >> (exp + 3)) & 0xfff;
+ code |= 0x8000 | (exp << 12);
+ }
+ return htons(code);
+}
+
+#endif /* _PIM6_MLD_PROTOCOL_H */
#include "lib/network.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_mroute.h"
#include "pim_oil.h"
#include "pim_str.h"
int err;
int opt, data;
socklen_t data_len = sizeof(data);
- static const struct sock_filter filter[] = {
- BPF_STMT(BPF_LD+BPF_B+BPF_ABS, 0),
- BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0, 0, 1),
- BPF_STMT(BPF_RET | BPF_K, 0xffff),
- BPF_STMT(BPF_RET | BPF_K, 0),
- };
-
- static const struct sock_fprog bpf = {
- .len = array_size(filter),
- .filter = (struct sock_filter *)filter,
- };
/*
* We need to create the VRF table for the pim mroute_socket
zlog_warn(
"PIM-SM will not work properly on this platform, until the ability to receive the WHOLEPKT upcall");
#endif
- if (setsockopt(pim->mroute_socket, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf))) {
- zlog_warn("Failure to attach SO_ATTACH_FILTER on fd %d: %d %s",
- pim->mroute_socket, errno, safe_strerror(errno));
- }
}
return 0;
msg);
case MRT6MSG_WHOLEPKT:
return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
- (const char *)msg);
+ (const char *)msg,
+ buf_size);
case MRT6MSG_WRMIFWHOLE:
- return pim_mroute_msg_wrvifwhole(
- pim->mroute_socket, ifp, (const char *)msg);
+ return pim_mroute_msg_wrvifwhole(pim->mroute_socket,
+ ifp, (const char *)msg,
+ buf_size);
default:
break;
}
#include "pim_pim.h"
#include "pim_register.h"
#include "pim_cmd.h"
+#include "pim_bsm.h"
/*
* NH lookup / NHT
{
}
-/*
- * PIM register
- */
-void pim_register_join(struct pim_upstream *up)
-{
-}
-
-void pim_null_register_send(struct pim_upstream *up)
-{
-}
-
-void pim_reg_del_on_couldreg_fail(struct interface *ifp)
-{
-}
-
bool pim_bsm_new_nbr_fwd(struct pim_neighbor *neigh, struct interface *ifp)
{
return false;
{
return 0;
}
-
-void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
- struct pim_rpf *rpg, int null_register,
- struct pim_upstream *up)
-{
-}
-
-void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src,
- pim_addr originator)
-{
-}
-
-int pim_register_recv(struct interface *ifp, pim_addr dest_addr,
- pim_addr src_addr, uint8_t *tlv_buf, int tlv_buf_size)
-{
- return 0;
-}
-
-int pim_register_stop_recv(struct interface *ifp, uint8_t *buf, int buf_size)
-{
- return 0;
-}
#include "if.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_str.h"
#include "pim_tlv.h"
#include "pim_msg.h"
pim_ifp = ifp->info;
assert(pim_ifp);
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
++pim_ifp->pim_ifstat_assert_recv;
return dispatch_assert(ifp, msg_source_addr, sg.grp, msg_metric);
metric.metric_preference, metric.route_metric,
PIM_FORCE_BOOLEAN(metric.rpt_bit_flag));
}
- ++pim_ifp->pim_ifstat_assert_send;
+ if (!pim_ifp->pim_passive_enable)
+ ++pim_ifp->pim_ifstat_assert_send;
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
- ifp->name)) {
+ ifp)) {
zlog_warn("%s: could not send PIM message on interface %s",
__func__, ifp->name);
return -3;
}
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
- dst_addr, buf, len, ifp->name)) {
+ dst_addr, buf, len, ifp)) {
zlog_warn("%s: Could not send BSM message on interface: %s",
__func__, ifp->name);
return false;
}
- pim_ifp->pim_ifstat_bsm_tx++;
+ if (!pim_ifp->pim_passive_enable)
+ pim_ifp->pim_ifstat_bsm_tx++;
+
pim_ifp->pim->bsm_sent++;
return true;
}
return -1;
}
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
pim_ifp->pim_ifstat_bsm_rx++;
pim = pim_ifp->pim;
pim->bsm_rcvd++;
json_object_int_add(json_row, "registerRx",
pim_ifp->pim_ifstat_reg_recv);
json_object_int_add(json_row, "registerTx",
- pim_ifp->pim_ifstat_reg_recv);
+ pim_ifp->pim_ifstat_reg_send);
json_object_int_add(json_row, "registerStopRx",
pim_ifp->pim_ifstat_reg_stop_recv);
json_object_int_add(json_row, "registerStopTx",
json_object_int_add(json_row, "registerRx",
pim_ifp->pim_ifstat_reg_recv);
json_object_int_add(json_row, "registerTx",
- pim_ifp->pim_ifstat_reg_recv);
+ pim_ifp->pim_ifstat_reg_send);
json_object_int_add(json_row, "registerStopRx",
pim_ifp->pim_ifstat_reg_stop_recv);
json_object_int_add(json_row, "registerStopTx",
vty_json(vty, json);
}
-static void clear_pim_statistics(struct pim_instance *pim)
-{
- struct interface *ifp;
-
- pim->bsm_rcvd = 0;
- pim->bsm_sent = 0;
- pim->bsm_dropped = 0;
-
- /* scan interfaces */
- FOR_ALL_INTERFACES (pim->vrf, ifp) {
- struct pim_interface *pim_ifp = ifp->info;
-
- if (!pim_ifp)
- continue;
-
- pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
- pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
- pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
- }
-}
-
static void igmp_show_groups(struct pim_instance *pim, struct vty *vty, bool uj)
{
struct interface *ifp;
return CMD_SUCCESS;
}
-DEFUN (clear_ip_pim_statistics,
+DEFPY (clear_ip_pim_statistics,
clear_ip_pim_statistics_cmd,
- "clear ip pim statistics [vrf NAME]",
+ "clear ip pim statistics [vrf NAME]$name",
CLEAR_STR
IP_STR
CLEAR_IP_PIM_STR
VRF_CMD_HELP_STR
"Reset PIM statistics\n")
{
- int idx = 2;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ struct vrf *v = pim_cmd_lookup(vty, name);
- if (!vrf)
+ if (!v)
return CMD_WARNING;
- clear_pim_statistics(vrf->info);
- return CMD_SUCCESS;
-}
-
-static void clear_mroute(struct pim_instance *pim)
-{
- struct pim_upstream *up;
- struct interface *ifp;
-
- /* scan interfaces */
- FOR_ALL_INTERFACES (pim->vrf, ifp) {
- struct pim_interface *pim_ifp = ifp->info;
- struct pim_ifchannel *ch;
-
- if (!pim_ifp)
- continue;
-
- /* deleting all ifchannels */
- while (!RB_EMPTY(pim_ifchannel_rb, &pim_ifp->ifchannel_rb)) {
- ch = RB_ROOT(pim_ifchannel_rb, &pim_ifp->ifchannel_rb);
-
- pim_ifchannel_delete(ch);
- }
-
-#if PIM_IPV == 4
- /* clean up all igmp groups */
- struct gm_group *grp;
-
- if (pim_ifp->gm_group_list) {
- while (pim_ifp->gm_group_list->count) {
- grp = listnode_head(pim_ifp->gm_group_list);
- igmp_group_delete(grp);
- }
- }
-#endif
- }
-
- /* clean up all upstreams*/
- while ((up = rb_pim_upstream_first(&pim->upstream_head)))
- pim_upstream_del(pim, up, __func__);
+ clear_pim_statistics(v->info);
+ return CMD_SUCCESS;
}
-DEFUN (clear_ip_mroute,
+DEFPY (clear_ip_mroute,
clear_ip_mroute_cmd,
- "clear ip mroute [vrf NAME]",
+ "clear ip mroute [vrf NAME]$name",
CLEAR_STR
IP_STR
"Reset multicast routes\n"
VRF_CMD_HELP_STR)
{
- int idx = 2;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ struct vrf *v = pim_cmd_lookup(vty, name);
- if (!vrf)
+ if (!v)
return CMD_WARNING;
- clear_mroute(vrf->info);
+ clear_mroute(v->info);
return CMD_SUCCESS;
}
return CMD_SUCCESS;
}
-DEFUN (clear_ip_pim_oil,
+DEFPY (clear_ip_pim_oil,
clear_ip_pim_oil_cmd,
- "clear ip pim [vrf NAME] oil",
+ "clear ip pim [vrf NAME]$name oil",
CLEAR_STR
IP_STR
CLEAR_IP_PIM_STR
VRF_CMD_HELP_STR
"Rescan PIM OIL (output interface list)\n")
{
- int idx = 2;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ struct vrf *v = pim_cmd_lookup(vty, name);
- if (!vrf)
+ if (!v)
return CMD_WARNING;
- pim_scan_oil(vrf->info);
+ pim_scan_oil(v->info);
return CMD_SUCCESS;
}
return CMD_SUCCESS;
}
-static void show_mroute(struct pim_instance *pim, struct vty *vty,
- pim_sgaddr *sg, bool fill, bool uj)
-{
- struct listnode *node;
- struct channel_oil *c_oil;
- struct static_route *s_route;
- time_t now;
- json_object *json = NULL;
- json_object *json_group = NULL;
- json_object *json_source = NULL;
- json_object *json_oil = NULL;
- json_object *json_ifp_out = NULL;
- int found_oif;
- int first;
- char grp_str[INET_ADDRSTRLEN];
- char src_str[INET_ADDRSTRLEN];
- char in_ifname[INTERFACE_NAMSIZ + 1];
- char out_ifname[INTERFACE_NAMSIZ + 1];
- int oif_vif_index;
- struct interface *ifp_in;
- char proto[100];
- char state_str[PIM_REG_STATE_STR_LEN];
- char mroute_uptime[10];
-
- if (uj) {
- json = json_object_new_object();
- } else {
- vty_out(vty, "IP Multicast Routing Table\n");
- vty_out(vty, "Flags: S - Sparse, C - Connected, P - Pruned\n");
- vty_out(vty,
- " R - SGRpt Pruned, F - Register flag, T - SPT-bit set\n");
- vty_out(vty,
- "\nSource Group Flags Proto Input Output TTL Uptime\n");
- }
-
- now = pim_time_monotonic_sec();
-
- /* print list of PIM and IGMP routes */
- frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
- found_oif = 0;
- first = 1;
- if (!c_oil->installed)
- continue;
-
- if (!pim_addr_is_any(sg->grp) &&
- pim_addr_cmp(sg->grp, c_oil->oil.mfcc_mcastgrp))
- continue;
- if (!pim_addr_is_any(sg->src) &&
- pim_addr_cmp(sg->src, c_oil->oil.mfcc_origin))
- continue;
-
- pim_inet4_dump("<group?>", c_oil->oil.mfcc_mcastgrp, grp_str,
- sizeof(grp_str));
- pim_inet4_dump("<source?>", c_oil->oil.mfcc_origin, src_str,
- sizeof(src_str));
-
- strlcpy(state_str, "S", sizeof(state_str));
- /* When a non DR receives a igmp join, it creates a (*,G)
- * channel_oil without any upstream creation */
- if (c_oil->up) {
- if (PIM_UPSTREAM_FLAG_TEST_SRC_IGMP(c_oil->up->flags))
- strlcat(state_str, "C", sizeof(state_str));
- if (pim_upstream_is_sg_rpt(c_oil->up))
- strlcat(state_str, "R", sizeof(state_str));
- if (PIM_UPSTREAM_FLAG_TEST_FHR(c_oil->up->flags))
- strlcat(state_str, "F", sizeof(state_str));
- if (c_oil->up->sptbit == PIM_UPSTREAM_SPTBIT_TRUE)
- strlcat(state_str, "T", sizeof(state_str));
- }
- if (pim_channel_oil_empty(c_oil))
- strlcat(state_str, "P", sizeof(state_str));
-
- ifp_in = pim_if_find_by_vif_index(pim, c_oil->oil.mfcc_parent);
-
- if (ifp_in)
- strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
- else
- strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
-
-
- pim_time_uptime(mroute_uptime, sizeof(mroute_uptime),
- now - c_oil->mroute_creation);
-
- if (uj) {
-
- /* Find the group, create it if it doesn't exist */
- json_object_object_get_ex(json, grp_str, &json_group);
-
- if (!json_group) {
- json_group = json_object_new_object();
- json_object_object_add(json, grp_str,
- json_group);
- }
-
- /* Find the source nested under the group, create it if
- * it doesn't exist
- */
- json_object_object_get_ex(json_group, src_str,
- &json_source);
-
- if (!json_source) {
- json_source = json_object_new_object();
- json_object_object_add(json_group, src_str,
- json_source);
- }
-
- /* Find the inbound interface nested under the source,
- * create it if it doesn't exist */
- json_object_string_add(json_source, "source",
- src_str);
- json_object_string_add(json_source, "group",
- grp_str);
- json_object_int_add(json_source, "installed",
- c_oil->installed);
- json_object_int_add(json_source, "refCount",
- c_oil->oil_ref_count);
- json_object_int_add(json_source, "oilSize",
- c_oil->oil_size);
- json_object_int_add(json_source, "OilInheritedRescan",
- c_oil->oil_inherited_rescan);
- json_object_int_add(json_source, "oilInheritedRescan",
- c_oil->oil_inherited_rescan);
- json_object_string_add(json_source, "iif", in_ifname);
- json_object_string_add(json_source, "upTime",
- mroute_uptime);
- json_oil = NULL;
- }
-
- for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
- ++oif_vif_index) {
- struct interface *ifp_out;
- int ttl;
-
- ttl = c_oil->oil.mfcc_ttls[oif_vif_index];
- if (ttl < 1)
- continue;
-
- /* do not display muted OIFs */
- if (c_oil->oif_flags[oif_vif_index]
- & PIM_OIF_FLAG_MUTE)
- continue;
-
- if (c_oil->oil.mfcc_parent == oif_vif_index &&
- !pim_mroute_allow_iif_in_oil(c_oil,
- oif_vif_index))
- continue;
-
- ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index);
- found_oif = 1;
-
- if (ifp_out)
- strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname));
- else
- strlcpy(out_ifname, "<oif?>", sizeof(out_ifname));
-
- if (uj) {
- json_ifp_out = json_object_new_object();
- json_object_string_add(json_ifp_out, "source",
- src_str);
- json_object_string_add(json_ifp_out, "group",
- grp_str);
-
- if (c_oil->oif_flags[oif_vif_index]
- & PIM_OIF_FLAG_PROTO_PIM)
- json_object_boolean_true_add(
- json_ifp_out, "protocolPim");
-
- if (c_oil->oif_flags[oif_vif_index] &
- PIM_OIF_FLAG_PROTO_GM)
- json_object_boolean_true_add(
- json_ifp_out, "protocolIgmp");
-
- if (c_oil->oif_flags[oif_vif_index]
- & PIM_OIF_FLAG_PROTO_VXLAN)
- json_object_boolean_true_add(
- json_ifp_out, "protocolVxlan");
-
- if (c_oil->oif_flags[oif_vif_index]
- & PIM_OIF_FLAG_PROTO_STAR)
- json_object_boolean_true_add(
- json_ifp_out,
- "protocolInherited");
-
- json_object_string_add(json_ifp_out,
- "inboundInterface",
- in_ifname);
- json_object_int_add(json_ifp_out, "iVifI",
- c_oil->oil.mfcc_parent);
- json_object_string_add(json_ifp_out,
- "outboundInterface",
- out_ifname);
- json_object_int_add(json_ifp_out, "oVifI",
- oif_vif_index);
- json_object_int_add(json_ifp_out, "ttl", ttl);
- json_object_string_add(json_ifp_out, "upTime",
- mroute_uptime);
- json_object_string_add(json_source, "flags",
- state_str);
- if (!json_oil) {
- json_oil = json_object_new_object();
- json_object_object_add(json_source,
- "oil", json_oil);
- }
- json_object_object_add(json_oil, out_ifname,
- json_ifp_out);
- } else {
- proto[0] = '\0';
- if (c_oil->oif_flags[oif_vif_index]
- & PIM_OIF_FLAG_PROTO_PIM) {
- strlcpy(proto, "PIM", sizeof(proto));
- }
-
- if (c_oil->oif_flags[oif_vif_index] &
- PIM_OIF_FLAG_PROTO_GM) {
- strlcpy(proto, "IGMP", sizeof(proto));
- }
-
- if (c_oil->oif_flags[oif_vif_index]
- & PIM_OIF_FLAG_PROTO_VXLAN) {
- strlcpy(proto, "VxLAN", sizeof(proto));
- }
-
- if (c_oil->oif_flags[oif_vif_index]
- & PIM_OIF_FLAG_PROTO_STAR) {
- strlcpy(proto, "STAR", sizeof(proto));
- }
-
- vty_out(vty,
- "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
- src_str, grp_str, state_str, proto,
- in_ifname, out_ifname, ttl,
- mroute_uptime);
-
- if (first) {
- src_str[0] = '\0';
- grp_str[0] = '\0';
- in_ifname[0] = '\0';
- state_str[0] = '\0';
- mroute_uptime[0] = '\0';
- first = 0;
- }
- }
- }
-
- if (!uj && !found_oif) {
- vty_out(vty,
- "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
- src_str, grp_str, state_str, "none", in_ifname,
- "none", 0, "--:--:--");
- }
- }
-
- /* Print list of static routes */
- for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
- first = 1;
-
- if (!s_route->c_oil.installed)
- continue;
-
- pim_inet4_dump("<group?>", s_route->group, grp_str,
- sizeof(grp_str));
- pim_inet4_dump("<source?>", s_route->source, src_str,
- sizeof(src_str));
- ifp_in = pim_if_find_by_vif_index(pim, s_route->iif);
- found_oif = 0;
-
- if (ifp_in)
- strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
- else
- strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
-
- if (uj) {
-
- /* Find the group, create it if it doesn't exist */
- json_object_object_get_ex(json, grp_str, &json_group);
-
- if (!json_group) {
- json_group = json_object_new_object();
- json_object_object_add(json, grp_str,
- json_group);
- }
-
- /* Find the source nested under the group, create it if
- * it doesn't exist */
- json_object_object_get_ex(json_group, src_str,
- &json_source);
-
- if (!json_source) {
- json_source = json_object_new_object();
- json_object_object_add(json_group, src_str,
- json_source);
- }
-
- json_object_string_add(json_source, "iif", in_ifname);
- json_oil = NULL;
- } else {
- strlcpy(proto, "STATIC", sizeof(proto));
- }
-
- for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
- ++oif_vif_index) {
- struct interface *ifp_out;
- char oif_uptime[10];
- int ttl;
-
- ttl = s_route->oif_ttls[oif_vif_index];
- if (ttl < 1)
- continue;
-
- ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index);
- pim_time_uptime(
- oif_uptime, sizeof(oif_uptime),
- now
- - s_route->c_oil
- .oif_creation[oif_vif_index]);
- found_oif = 1;
-
- if (ifp_out)
- strlcpy(out_ifname, ifp_out->name, sizeof(out_ifname));
- else
- strlcpy(out_ifname, "<oif?>", sizeof(out_ifname));
-
- if (uj) {
- json_ifp_out = json_object_new_object();
- json_object_string_add(json_ifp_out, "source",
- src_str);
- json_object_string_add(json_ifp_out, "group",
- grp_str);
- json_object_boolean_true_add(json_ifp_out,
- "protocolStatic");
- json_object_string_add(json_ifp_out,
- "inboundInterface",
- in_ifname);
- json_object_int_add(
- json_ifp_out, "iVifI",
- s_route->c_oil.oil.mfcc_parent);
- json_object_string_add(json_ifp_out,
- "outboundInterface",
- out_ifname);
- json_object_int_add(json_ifp_out, "oVifI",
- oif_vif_index);
- json_object_int_add(json_ifp_out, "ttl", ttl);
- json_object_string_add(json_ifp_out, "upTime",
- oif_uptime);
- if (!json_oil) {
- json_oil = json_object_new_object();
- json_object_object_add(json_source,
- "oil", json_oil);
- }
- json_object_object_add(json_oil, out_ifname,
- json_ifp_out);
- } else {
- vty_out(vty,
- "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
- src_str, grp_str, "-", proto, in_ifname,
- out_ifname, ttl, oif_uptime);
- if (first && !fill) {
- src_str[0] = '\0';
- grp_str[0] = '\0';
- in_ifname[0] = '\0';
- first = 0;
- }
- }
- }
-
- if (!uj && !found_oif) {
- vty_out(vty,
- "%-15s %-15s %-8s %-6s %-16s %-16s %-3d %8s\n",
- src_str, grp_str, "-", proto, in_ifname, "none",
- 0, "--:--:--");
- }
- }
-
- if (uj)
- vty_json(vty, json);
-}
-
DEFPY (show_ip_mroute,
show_ip_mroute_cmd,
"show ip mroute [vrf NAME] [A.B.C.D$s_or_g [A.B.C.D$g]] [fill$fill] [json$json]",
pim_sgaddr sg = {0};
struct pim_instance *pim;
struct vrf *v;
+ json_object *json_parent = NULL;
v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
- if (!v) {
- vty_out(vty, "%% Vrf specified: %s does not exist\n", vrf);
+ if (!v)
return CMD_WARNING;
- }
+
pim = pim_get_pim_instance(v->vrf_id);
if (!pim) {
return CMD_WARNING;
}
+ if (json)
+ json_parent = json_object_new_object();
+
if (s_or_g.s_addr != INADDR_ANY) {
if (g.s_addr != INADDR_ANY) {
sg.src = s_or_g;
} else
sg.grp = s_or_g;
}
- show_mroute(pim, vty, &sg, !!fill, !!json);
+
+ show_mroute(pim, vty, &sg, !!fill, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
+
return CMD_SUCCESS;
}
-DEFUN (show_ip_mroute_vrf_all,
+DEFPY (show_ip_mroute_vrf_all,
show_ip_mroute_vrf_all_cmd,
- "show ip mroute vrf all [fill] [json]",
+ "show ip mroute vrf all [fill$fill] [json$json]",
SHOW_STR
IP_STR
MROUTE_STR
JSON_STR)
{
pim_sgaddr sg = {0};
- bool uj = use_json(argc, argv);
- int idx = 4;
struct vrf *vrf;
- bool first = true;
- bool fill = false;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
- if (argv_find(argv, argc, "fill", &idx))
- fill = true;
+ if (json)
+ json_parent = json_object_new_object();
- if (uj)
- vty_out(vty, "{ ");
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if (uj) {
- if (!first)
- vty_out(vty, ", ");
- vty_out(vty, " \"%s\": ", vrf->name);
- first = false;
- } else
+ if (!json)
vty_out(vty, "VRF: %s\n", vrf->name);
- show_mroute(vrf->info, vty, &sg, fill, uj);
+ else
+ json_vrf = json_object_new_object();
+ show_mroute(vrf->info, vty, &sg, !!fill, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
}
- if (uj)
- vty_out(vty, "}\n");
+ if (json)
+ vty_json(vty, json_parent);
return CMD_SUCCESS;
}
-DEFUN (clear_ip_mroute_count,
+DEFPY (clear_ip_mroute_count,
clear_ip_mroute_count_cmd,
- "clear ip mroute [vrf NAME] count",
+ "clear ip mroute [vrf NAME]$name count",
CLEAR_STR
IP_STR
MROUTE_STR
VRF_CMD_HELP_STR
"Route and packet count data\n")
{
- int idx = 2;
- struct listnode *node;
- struct channel_oil *c_oil;
- struct static_route *sr;
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
+ return clear_ip_mroute_count_command(vty, name);
+}
+
+DEFPY (show_ip_mroute_count,
+ show_ip_mroute_count_cmd,
+ "show ip mroute [vrf NAME] count [json$json]",
+ SHOW_STR
+ IP_STR
+ MROUTE_STR
+ VRF_CMD_HELP_STR
+ "Route and packet count data\n"
+ JSON_STR)
+{
struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
- if (!vrf)
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
+
+ if (!v)
return CMD_WARNING;
- pim = vrf->info;
- frr_each(rb_pim_oil, &pim->channel_oil_head, c_oil) {
- if (!c_oil->installed)
- continue;
+ pim = pim_get_pim_instance(v->vrf_id);
- pim_mroute_update_counters(c_oil);
- c_oil->cc.origpktcnt = c_oil->cc.pktcnt;
- c_oil->cc.origbytecnt = c_oil->cc.bytecnt;
- c_oil->cc.origwrong_if = c_oil->cc.wrong_if;
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
}
- for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr)) {
- if (!sr->c_oil.installed)
- continue;
+ if (json)
+ json_parent = json_object_new_object();
- pim_mroute_update_counters(&sr->c_oil);
+ show_mroute_count(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
- sr->c_oil.cc.origpktcnt = sr->c_oil.cc.pktcnt;
- sr->c_oil.cc.origbytecnt = sr->c_oil.cc.bytecnt;
- sr->c_oil.cc.origwrong_if = sr->c_oil.cc.wrong_if;
- }
return CMD_SUCCESS;
}
-static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil,
- json_object *json,
- struct vty *vty)
-{
- char group_str[INET_ADDRSTRLEN];
- char source_str[INET_ADDRSTRLEN];
- json_object *json_group = NULL;
- json_object *json_source = NULL;
-
- if (!c_oil->installed)
- return;
-
- pim_mroute_update_counters(c_oil);
-
- pim_inet4_dump("<group?>", c_oil->oil.mfcc_mcastgrp, group_str,
- sizeof(group_str));
- pim_inet4_dump("<source?>", c_oil->oil.mfcc_origin, source_str,
- sizeof(source_str));
-
- if (json) {
- json_object_object_get_ex(json, group_str, &json_group);
-
- if (!json_group) {
- json_group = json_object_new_object();
- json_object_object_add(json, group_str, json_group);
- }
-
- json_source = json_object_new_object();
- json_object_object_add(json_group, source_str, json_source);
- json_object_int_add(json_source, "lastUsed",
- c_oil->cc.lastused / 100);
- json_object_int_add(json_source, "packets", c_oil->cc.pktcnt);
- json_object_int_add(json_source, "bytes", c_oil->cc.bytecnt);
- json_object_int_add(json_source, "wrongIf", c_oil->cc.wrong_if);
-
- } else {
- vty_out(vty, "%-15s %-15s %-8llu %-7ld %-10ld %-7ld\n",
- source_str, group_str, c_oil->cc.lastused / 100,
- c_oil->cc.pktcnt - c_oil->cc.origpktcnt,
- c_oil->cc.bytecnt - c_oil->cc.origbytecnt,
- c_oil->cc.wrong_if - c_oil->cc.origwrong_if);
- }
-}
-
-static void show_mroute_count(struct pim_instance *pim, struct vty *vty,
- bool uj)
-{
- struct listnode *node;
- struct channel_oil *c_oil;
- struct static_route *sr;
- json_object *json = NULL;
-
- if (uj)
- json = json_object_new_object();
- else {
- vty_out(vty, "\n");
-
- vty_out(vty,
- "Source Group LastUsed Packets Bytes WrongIf \n");
- }
-
- /* Print PIM and IGMP route counts */
- frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil)
- show_mroute_count_per_channel_oil(c_oil, json, vty);
-
- for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr))
- show_mroute_count_per_channel_oil(&sr->c_oil, json, vty);
-
- if (uj)
- vty_json(vty, json);
-}
-
-DEFUN (show_ip_mroute_count,
- show_ip_mroute_count_cmd,
- "show ip mroute [vrf NAME] count [json]",
- SHOW_STR
- IP_STR
- MROUTE_STR
- VRF_CMD_HELP_STR
- "Route and packet count data\n"
- JSON_STR)
-{
- int idx = 2;
- bool uj = use_json(argc, argv);
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
-
- if (!vrf)
- return CMD_WARNING;
-
- show_mroute_count(vrf->info, vty, uj);
- return CMD_SUCCESS;
-}
-
-DEFUN (show_ip_mroute_count_vrf_all,
+DEFPY (show_ip_mroute_count_vrf_all,
show_ip_mroute_count_vrf_all_cmd,
- "show ip mroute vrf all count [json]",
+ "show ip mroute vrf all count [json$json]",
SHOW_STR
IP_STR
MROUTE_STR
"Route and packet count data\n"
JSON_STR)
{
- bool uj = use_json(argc, argv);
struct vrf *vrf;
- bool first = true;
+ json_object *json_parent = NULL;
+ json_object *json_vrf = NULL;
+
+ if (json)
+ json_parent = json_object_new_object();
- if (uj)
- vty_out(vty, "{ ");
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if (uj) {
- if (!first)
- vty_out(vty, ", ");
- vty_out(vty, " \"%s\": ", vrf->name);
- first = false;
- } else
+ if (!json)
vty_out(vty, "VRF: %s\n", vrf->name);
- show_mroute_count(vrf->info, vty, uj);
- }
- if (uj)
- vty_out(vty, "}\n");
+ else
+ json_vrf = json_object_new_object();
- return CMD_SUCCESS;
-}
+ show_mroute_count(vrf->info, vty, json_vrf);
-static void show_mroute_summary(struct pim_instance *pim, struct vty *vty,
- json_object *json)
-{
- struct listnode *node;
- struct channel_oil *c_oil;
- struct static_route *s_route;
- uint32_t starg_sw_mroute_cnt = 0;
- uint32_t sg_sw_mroute_cnt = 0;
- uint32_t starg_hw_mroute_cnt = 0;
- uint32_t sg_hw_mroute_cnt = 0;
- json_object *json_starg = NULL;
- json_object *json_sg = NULL;
-
- if (!json)
- vty_out(vty, "Mroute Type Installed/Total\n");
-
- frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
- if (!c_oil->installed) {
- if (c_oil->oil.mfcc_origin.s_addr == INADDR_ANY)
- starg_sw_mroute_cnt++;
- else
- sg_sw_mroute_cnt++;
- } else {
- if (c_oil->oil.mfcc_origin.s_addr == INADDR_ANY)
- starg_hw_mroute_cnt++;
- else
- sg_hw_mroute_cnt++;
- }
- }
-
- for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
- if (!s_route->c_oil.installed) {
- if (s_route->c_oil.oil.mfcc_origin.s_addr == INADDR_ANY)
- starg_sw_mroute_cnt++;
- else
- sg_sw_mroute_cnt++;
- } else {
- if (s_route->c_oil.oil.mfcc_origin.s_addr == INADDR_ANY)
- starg_hw_mroute_cnt++;
- else
- sg_hw_mroute_cnt++;
- }
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
}
+ if (json)
+ vty_json(vty, json_parent);
- if (!json) {
- vty_out(vty, "%-20s %u/%u\n", "(*, G)", starg_hw_mroute_cnt,
- starg_sw_mroute_cnt + starg_hw_mroute_cnt);
- vty_out(vty, "%-20s %u/%u\n", "(S, G)", sg_hw_mroute_cnt,
- sg_sw_mroute_cnt + sg_hw_mroute_cnt);
- vty_out(vty, "------\n");
- vty_out(vty, "%-20s %u/%u\n", "Total",
- (starg_hw_mroute_cnt + sg_hw_mroute_cnt),
- (starg_sw_mroute_cnt + starg_hw_mroute_cnt
- + sg_sw_mroute_cnt + sg_hw_mroute_cnt));
- } else {
- /* (*,G) route details */
- json_starg = json_object_new_object();
- json_object_object_add(json, "wildcardGroup", json_starg);
-
- json_object_int_add(json_starg, "installed",
- starg_hw_mroute_cnt);
- json_object_int_add(json_starg, "total",
- starg_sw_mroute_cnt + starg_hw_mroute_cnt);
-
- /* (S, G) route details */
- json_sg = json_object_new_object();
- json_object_object_add(json, "sourceGroup", json_sg);
-
- json_object_int_add(json_sg, "installed", sg_hw_mroute_cnt);
- json_object_int_add(json_sg, "total",
- sg_sw_mroute_cnt + sg_hw_mroute_cnt);
-
- json_object_int_add(json, "totalNumOfInstalledMroutes",
- starg_hw_mroute_cnt + sg_hw_mroute_cnt);
- json_object_int_add(json, "totalNumOfMroutes",
- starg_sw_mroute_cnt + starg_hw_mroute_cnt
- + sg_sw_mroute_cnt
- + sg_hw_mroute_cnt);
- }
+ return CMD_SUCCESS;
}
-DEFUN (show_ip_mroute_summary,
+DEFPY (show_ip_mroute_summary,
show_ip_mroute_summary_cmd,
- "show ip mroute [vrf NAME] summary [json]",
+ "show ip mroute [vrf NAME] summary [json$json]",
SHOW_STR
IP_STR
MROUTE_STR
"Summary of all mroutes\n"
JSON_STR)
{
- int idx = 2;
- bool uj = use_json(argc, argv);
- struct vrf *vrf = pim_cmd_lookup_vrf(vty, argv, argc, &idx);
- json_object *json = NULL;
+ struct pim_instance *pim;
+ struct vrf *v;
+ json_object *json_parent = NULL;
- if (uj)
- json = json_object_new_object();
+ v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME);
- if (!vrf)
+ if (!v)
return CMD_WARNING;
- show_mroute_summary(vrf->info, vty, json);
+ pim = pim_get_pim_instance(v->vrf_id);
+
+ if (!pim) {
+ vty_out(vty, "%% Unable to find pim instance\n");
+ return CMD_WARNING;
+ }
+
+ if (json)
+ json_parent = json_object_new_object();
+
+ show_mroute_summary(pim, vty, json_parent);
+
+ if (json)
+ vty_json(vty, json_parent);
- if (uj)
- vty_json(vty, json);
return CMD_SUCCESS;
}
-DEFUN (show_ip_mroute_summary_vrf_all,
+DEFPY (show_ip_mroute_summary_vrf_all,
show_ip_mroute_summary_vrf_all_cmd,
- "show ip mroute vrf all summary [json]",
+ "show ip mroute vrf all summary [json$json]",
SHOW_STR
IP_STR
MROUTE_STR
JSON_STR)
{
struct vrf *vrf;
- bool uj = use_json(argc, argv);
- json_object *json = NULL;
+ json_object *json_parent = NULL;
json_object *json_vrf = NULL;
- if (uj)
- json = json_object_new_object();
+ if (json)
+ json_parent = json_object_new_object();
RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if (uj)
- json_vrf = json_object_new_object();
- else
+ if (!json)
vty_out(vty, "VRF: %s\n", vrf->name);
+ else
+ json_vrf = json_object_new_object();
show_mroute_summary(vrf->info, vty, json_vrf);
- if (uj)
- json_object_object_add(json, vrf->name, json_vrf);
+ if (json)
+ json_object_object_add(json_parent, vrf->name,
+ json_vrf);
}
- if (uj)
- vty_json(vty, json);
+ if (json)
+ vty_json(vty, json_parent);
return CMD_SUCCESS;
}
"Configure group limit for watermark warning\n"
"Group count to generate watermark warning\n")
{
- PIM_DECLVAR_CONTEXT(vrf, pim);
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
pim->igmp_watermark_limit = limit;
return CMD_SUCCESS;
"Unconfigure group limit for watermark warning\n"
IGNORED_IN_NO_STR)
{
- PIM_DECLVAR_CONTEXT(vrf, pim);
+ PIM_DECLVAR_CONTEXT_VRF(vrf, pim);
pim->igmp_watermark_limit = 0;
return CMD_SUCCESS;
return pim_process_ip_pim_cmd(vty);
}
-DEFUN (interface_ip_pim,
+DEFPY (interface_ip_pim,
interface_ip_pim_cmd,
- "ip pim",
+ "ip pim [passive$passive]",
IP_STR
- PIM_STR)
+ PIM_STR
+ "Disable exchange of protocol packets\n")
{
- return pim_process_ip_pim_cmd(vty);
+ int ret;
+
+ ret = pim_process_ip_pim_cmd(vty);
+
+ if (ret != NB_OK)
+ return ret;
+
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, true);
+
+ return CMD_SUCCESS;
}
DEFUN_HIDDEN (interface_no_ip_pim_ssm,
return pim_process_no_ip_pim_cmd(vty);
}
-DEFUN (interface_no_ip_pim,
+DEFPY (interface_no_ip_pim,
interface_no_ip_pim_cmd,
- "no ip pim",
+ "no ip pim [passive$passive]",
NO_STR
IP_STR
- PIM_STR)
+ PIM_STR
+ "Disable exchange of protocol packets\n")
{
+ if (passive)
+ return pim_process_ip_pim_passive_cmd(vty, false);
+
return pim_process_no_ip_pim_cmd(vty);
}
}
-DEFUN (debug_pim,
+DEFPY (debug_pim,
debug_pim_cmd,
- "debug pim",
- DEBUG_STR
- DEBUG_PIM_STR)
-{
- PIM_DO_DEBUG_PIM_EVENTS;
- PIM_DO_DEBUG_PIM_PACKETS;
- PIM_DO_DEBUG_PIM_TRACE;
- PIM_DO_DEBUG_MSDP_EVENTS;
- PIM_DO_DEBUG_MSDP_PACKETS;
- PIM_DO_DEBUG_BSM;
- PIM_DO_DEBUG_VXLAN;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim,
- no_debug_pim_cmd,
- "no debug pim",
+ "[no] debug pim",
NO_STR
DEBUG_STR
DEBUG_PIM_STR)
{
- PIM_DONT_DEBUG_PIM_EVENTS;
- PIM_DONT_DEBUG_PIM_PACKETS;
- PIM_DONT_DEBUG_PIM_TRACE;
- PIM_DONT_DEBUG_MSDP_EVENTS;
- PIM_DONT_DEBUG_MSDP_PACKETS;
-
- PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
- PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
- PIM_DONT_DEBUG_BSM;
- PIM_DONT_DEBUG_VXLAN;
-
- return CMD_SUCCESS;
+ if (!no)
+ return pim_debug_pim_cmd();
+ else
+ return pim_no_debug_pim_cmd();
}
-DEFUN (debug_pim_nht,
+DEFPY (debug_pim_nht,
debug_pim_nht_cmd,
- "debug pim nht",
- DEBUG_STR
- DEBUG_PIM_STR
- "Nexthop Tracking\n")
-{
- PIM_DO_DEBUG_PIM_NHT;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_nht,
- no_debug_pim_nht_cmd,
- "no debug pim nht",
+ "[no] debug pim nht",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
"Nexthop Tracking\n")
{
- PIM_DONT_DEBUG_PIM_NHT;
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT;
+ else
+ PIM_DONT_DEBUG_PIM_NHT;
return CMD_SUCCESS;
}
-DEFUN (debug_pim_nht_det,
+DEFPY (debug_pim_nht_det,
debug_pim_nht_det_cmd,
- "debug pim nht detail",
- DEBUG_STR
- DEBUG_PIM_STR
- "Nexthop Tracking\n"
- "Detailed Information\n")
-{
- PIM_DO_DEBUG_PIM_NHT_DETAIL;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_nht_det,
- no_debug_pim_nht_det_cmd,
- "no debug pim nht detail",
+ "[no] debug pim nht detail",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
"Nexthop Tracking\n"
"Detailed Information\n")
{
- PIM_DONT_DEBUG_PIM_NHT_DETAIL;
+ if (!no)
+ PIM_DO_DEBUG_PIM_NHT_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_NHT_DETAIL;
return CMD_SUCCESS;
}
return CMD_SUCCESS;
}
-DEFUN (debug_pim_events,
+DEFPY (debug_pim_events,
debug_pim_events_cmd,
- "debug pim events",
- DEBUG_STR
- DEBUG_PIM_STR
- DEBUG_PIM_EVENTS_STR)
-{
- PIM_DO_DEBUG_PIM_EVENTS;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_events,
- no_debug_pim_events_cmd,
- "no debug pim events",
+ "[no] debug pim events",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
DEBUG_PIM_EVENTS_STR)
{
- PIM_DONT_DEBUG_PIM_EVENTS;
+ if (!no)
+ PIM_DO_DEBUG_PIM_EVENTS;
+ else
+ PIM_DONT_DEBUG_PIM_EVENTS;
return CMD_SUCCESS;
}
-DEFUN (debug_pim_packets,
+DEFPY (debug_pim_packets,
debug_pim_packets_cmd,
- "debug pim packets [<hello|joins|register>]",
- DEBUG_STR
+ "[no] debug pim packets [<hello$hello|joins$joins|register$registers>]",
+ NO_STR DEBUG_STR
DEBUG_PIM_STR
DEBUG_PIM_PACKETS_STR
DEBUG_PIM_HELLO_PACKETS_STR
DEBUG_PIM_J_P_PACKETS_STR
DEBUG_PIM_PIM_REG_PACKETS_STR)
{
- int idx = 0;
- if (argv_find(argv, argc, "hello", &idx)) {
- PIM_DO_DEBUG_PIM_HELLO;
- vty_out(vty, "PIM Hello debugging is on\n");
- } else if (argv_find(argv, argc, "joins", &idx)) {
- PIM_DO_DEBUG_PIM_J_P;
- vty_out(vty, "PIM Join/Prune debugging is on\n");
- } else if (argv_find(argv, argc, "register", &idx)) {
- PIM_DO_DEBUG_PIM_REG;
- vty_out(vty, "PIM Register debugging is on\n");
- } else {
- PIM_DO_DEBUG_PIM_PACKETS;
- vty_out(vty, "PIM Packet debugging is on \n");
- }
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_packets,
- no_debug_pim_packets_cmd,
- "no debug pim packets [<hello|joins|register>]",
- NO_STR
- DEBUG_STR
- DEBUG_PIM_STR
- DEBUG_PIM_PACKETS_STR
- DEBUG_PIM_HELLO_PACKETS_STR
- DEBUG_PIM_J_P_PACKETS_STR
- DEBUG_PIM_PIM_REG_PACKETS_STR)
-{
- int idx = 0;
- if (argv_find(argv, argc, "hello", &idx)) {
- PIM_DONT_DEBUG_PIM_HELLO;
- vty_out(vty, "PIM Hello debugging is off \n");
- } else if (argv_find(argv, argc, "joins", &idx)) {
- PIM_DONT_DEBUG_PIM_J_P;
- vty_out(vty, "PIM Join/Prune debugging is off \n");
- } else if (argv_find(argv, argc, "register", &idx)) {
- PIM_DONT_DEBUG_PIM_REG;
- vty_out(vty, "PIM Register debugging is off\n");
- } else
- PIM_DONT_DEBUG_PIM_PACKETS;
-
- return CMD_SUCCESS;
+ if (!no)
+ return pim_debug_pim_packets_cmd(hello, joins, registers, vty);
+ else
+ return pim_no_debug_pim_packets_cmd(hello, joins, registers,
+ vty);
}
-
-DEFUN (debug_pim_packetdump_send,
+DEFPY (debug_pim_packetdump_send,
debug_pim_packetdump_send_cmd,
- "debug pim packet-dump send",
- DEBUG_STR
- DEBUG_PIM_STR
- DEBUG_PIM_PACKETDUMP_STR
- DEBUG_PIM_PACKETDUMP_SEND_STR)
-{
- PIM_DO_DEBUG_PIM_PACKETDUMP_SEND;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_packetdump_send,
- no_debug_pim_packetdump_send_cmd,
- "no debug pim packet-dump send",
+ "[no] debug pim packet-dump send",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
DEBUG_PIM_PACKETDUMP_STR
DEBUG_PIM_PACKETDUMP_SEND_STR)
{
- PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_SEND;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
return CMD_SUCCESS;
}
-DEFUN (debug_pim_packetdump_recv,
+DEFPY (debug_pim_packetdump_recv,
debug_pim_packetdump_recv_cmd,
- "debug pim packet-dump receive",
- DEBUG_STR
- DEBUG_PIM_STR
- DEBUG_PIM_PACKETDUMP_STR
- DEBUG_PIM_PACKETDUMP_RECV_STR)
-{
- PIM_DO_DEBUG_PIM_PACKETDUMP_RECV;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_packetdump_recv,
- no_debug_pim_packetdump_recv_cmd,
- "no debug pim packet-dump receive",
+ "[no] debug pim packet-dump receive",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
DEBUG_PIM_PACKETDUMP_STR
DEBUG_PIM_PACKETDUMP_RECV_STR)
{
- PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ if (!no)
+ PIM_DO_DEBUG_PIM_PACKETDUMP_RECV;
+ else
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
return CMD_SUCCESS;
}
-DEFUN (debug_pim_trace,
+DEFPY (debug_pim_trace,
debug_pim_trace_cmd,
- "debug pim trace",
- DEBUG_STR
- DEBUG_PIM_STR
- DEBUG_PIM_TRACE_STR)
-{
- PIM_DO_DEBUG_PIM_TRACE;
- return CMD_SUCCESS;
-}
-
-DEFUN (debug_pim_trace_detail,
- debug_pim_trace_detail_cmd,
- "debug pim trace detail",
- DEBUG_STR
- DEBUG_PIM_STR
- DEBUG_PIM_TRACE_STR
- "Detailed Information\n")
-{
- PIM_DO_DEBUG_PIM_TRACE_DETAIL;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_trace,
- no_debug_pim_trace_cmd,
- "no debug pim trace",
+ "[no] debug pim trace",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
DEBUG_PIM_TRACE_STR)
{
- PIM_DONT_DEBUG_PIM_TRACE;
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE;
return CMD_SUCCESS;
}
-DEFUN (no_debug_pim_trace_detail,
- no_debug_pim_trace_detail_cmd,
- "no debug pim trace detail",
+DEFPY (debug_pim_trace_detail,
+ debug_pim_trace_detail_cmd,
+ "[no] debug pim trace detail",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
DEBUG_PIM_TRACE_STR
"Detailed Information\n")
{
- PIM_DONT_DEBUG_PIM_TRACE_DETAIL;
+ if (!no)
+ PIM_DO_DEBUG_PIM_TRACE_DETAIL;
+ else
+ PIM_DONT_DEBUG_PIM_TRACE_DETAIL;
return CMD_SUCCESS;
}
return CMD_SUCCESS;
}
-DEFUN (debug_pim_zebra,
+DEFPY (debug_pim_zebra,
debug_pim_zebra_cmd,
- "debug pim zebra",
- DEBUG_STR
- DEBUG_PIM_STR
- DEBUG_PIM_ZEBRA_STR)
-{
- PIM_DO_DEBUG_ZEBRA;
- return CMD_SUCCESS;
-}
-
-DEFUN (no_debug_pim_zebra,
- no_debug_pim_zebra_cmd,
- "no debug pim zebra",
+ "[no] debug pim zebra",
NO_STR
DEBUG_STR
DEBUG_PIM_STR
DEBUG_PIM_ZEBRA_STR)
{
- PIM_DONT_DEBUG_ZEBRA;
+ if (!no)
+ PIM_DO_DEBUG_ZEBRA;
+ else
+ PIM_DONT_DEBUG_ZEBRA;
return CMD_SUCCESS;
}
install_element(ENABLE_NODE, &debug_pim_static_cmd);
install_element(ENABLE_NODE, &no_debug_pim_static_cmd);
install_element(ENABLE_NODE, &debug_pim_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_cmd);
install_element(ENABLE_NODE, &debug_pim_nht_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_nht_cmd);
install_element(ENABLE_NODE, &debug_pim_nht_det_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_nht_det_cmd);
install_element(ENABLE_NODE, &debug_pim_nht_rp_cmd);
install_element(ENABLE_NODE, &no_debug_pim_nht_rp_cmd);
install_element(ENABLE_NODE, &debug_pim_events_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_events_cmd);
install_element(ENABLE_NODE, &debug_pim_packets_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_packets_cmd);
install_element(ENABLE_NODE, &debug_pim_packetdump_send_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_packetdump_send_cmd);
install_element(ENABLE_NODE, &debug_pim_packetdump_recv_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_packetdump_recv_cmd);
install_element(ENABLE_NODE, &debug_pim_trace_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_trace_cmd);
install_element(ENABLE_NODE, &debug_pim_trace_detail_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_trace_detail_cmd);
install_element(ENABLE_NODE, &debug_ssmpingd_cmd);
install_element(ENABLE_NODE, &no_debug_ssmpingd_cmd);
install_element(ENABLE_NODE, &debug_pim_zebra_cmd);
- install_element(ENABLE_NODE, &no_debug_pim_zebra_cmd);
install_element(ENABLE_NODE, &debug_pim_mlag_cmd);
install_element(ENABLE_NODE, &no_debug_pim_mlag_cmd);
install_element(ENABLE_NODE, &debug_pim_vxlan_cmd);
install_element(CONFIG_NODE, &debug_pim_static_cmd);
install_element(CONFIG_NODE, &no_debug_pim_static_cmd);
install_element(CONFIG_NODE, &debug_pim_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_cmd);
install_element(CONFIG_NODE, &debug_pim_nht_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_nht_cmd);
install_element(CONFIG_NODE, &debug_pim_nht_det_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_nht_det_cmd);
install_element(CONFIG_NODE, &debug_pim_nht_rp_cmd);
install_element(CONFIG_NODE, &no_debug_pim_nht_rp_cmd);
install_element(CONFIG_NODE, &debug_pim_events_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_events_cmd);
install_element(CONFIG_NODE, &debug_pim_packets_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_packets_cmd);
install_element(CONFIG_NODE, &debug_pim_packetdump_send_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_packetdump_send_cmd);
install_element(CONFIG_NODE, &debug_pim_packetdump_recv_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_packetdump_recv_cmd);
install_element(CONFIG_NODE, &debug_pim_trace_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_trace_cmd);
install_element(CONFIG_NODE, &debug_pim_trace_detail_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_trace_detail_cmd);
install_element(CONFIG_NODE, &debug_ssmpingd_cmd);
install_element(CONFIG_NODE, &no_debug_ssmpingd_cmd);
install_element(CONFIG_NODE, &debug_pim_zebra_cmd);
- install_element(CONFIG_NODE, &no_debug_pim_zebra_cmd);
install_element(CONFIG_NODE, &debug_pim_mlag_cmd);
install_element(CONFIG_NODE, &no_debug_pim_mlag_cmd);
install_element(CONFIG_NODE, &debug_pim_vxlan_cmd);
#include "lib/linklist.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_vty.h"
#include "lib/northbound_cli.h"
#include "pim_errors.h"
#include "pim_nht.h"
#include "pim_sock.h"
#include "pim_ssm.h"
+#include "pim_static.h"
#include "pim_addr.h"
+#include "pim_static.h"
/**
* Get current node VRF name.
FRR_PIM_AF_XPATH_VAL);
}
+int pim_process_ip_pim_passive_cmd(struct vty *vty, bool enable)
+{
+ if (enable)
+ nb_cli_enqueue_change(vty, "./pim-passive-enable", NB_OP_MODIFY,
+ "true");
+ else
+ nb_cli_enqueue_change(vty, "./pim-passive-enable", NB_OP_MODIFY,
+ "false");
+
+ return nb_cli_apply_changes(vty, FRR_PIM_INTERFACE_XPATH,
+ FRR_PIM_AF_XPATH_VAL);
+}
+
int pim_process_no_ip_pim_cmd(struct vty *vty)
{
const struct lyd_node *mld_enable_dnode;
sec_list);
}
+ if (pim_ifp->pim_passive_enable)
+ json_object_boolean_true_add(json_row,
+ "passive");
+
/* PIM neighbors */
if (pim_ifp->pim_neighbor_list->count) {
json_pim_neighbors = json_object_new_object();
} else {
vty_out(vty, "Address : %pPAs\n", &ifaddr);
}
+
+ if (pim_ifp->pim_passive_enable)
+ vty_out(vty, "Passive : %s\n",
+ (pim_ifp->pim_passive_enable) ? "yes"
+ : "no");
+
vty_out(vty, "\n");
/* PIM neighbors */
show_multicast_interfaces(pim, vty, NULL);
}
+
+void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
+ bool fill, json_object *json)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *s_route;
+ time_t now;
+ json_object *json_group = NULL;
+ json_object *json_source = NULL;
+ json_object *json_oil = NULL;
+ json_object *json_ifp_out = NULL;
+ int found_oif;
+ int first;
+ char grp_str[PIM_ADDRSTRLEN];
+ char src_str[PIM_ADDRSTRLEN];
+ char in_ifname[INTERFACE_NAMSIZ + 1];
+ char out_ifname[INTERFACE_NAMSIZ + 1];
+ int oif_vif_index;
+ struct interface *ifp_in;
+ char proto[100];
+ char state_str[PIM_REG_STATE_STR_LEN];
+ char mroute_uptime[10];
+
+ if (!json) {
+ vty_out(vty, "IP Multicast Routing Table\n");
+ vty_out(vty, "Flags: S - Sparse, C - Connected, P - Pruned\n");
+ vty_out(vty,
+ " R - SGRpt Pruned, F - Register flag, T - SPT-bit set\n");
+ vty_out(vty,
+ "\nSource Group Flags Proto Input Output TTL Uptime\n");
+ }
+
+ now = pim_time_monotonic_sec();
+
+ /* print list of PIM and IGMP routes */
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
+ found_oif = 0;
+ first = 1;
+ if (!c_oil->installed)
+ continue;
+
+ if (!pim_addr_is_any(sg->grp) &&
+ pim_addr_cmp(sg->grp, *oil_mcastgrp(c_oil)))
+ continue;
+ if (!pim_addr_is_any(sg->src) &&
+ pim_addr_cmp(sg->src, *oil_origin(c_oil)))
+ continue;
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs",
+ oil_mcastgrp(c_oil));
+ snprintfrr(src_str, sizeof(src_str), "%pPAs",
+ oil_origin(c_oil));
+
+ strlcpy(state_str, "S", sizeof(state_str));
+ /* When a non DR receives a igmp join, it creates a (*,G)
+ * channel_oil without any upstream creation
+ */
+ if (c_oil->up) {
+ if (PIM_UPSTREAM_FLAG_TEST_SRC_IGMP(c_oil->up->flags))
+ strlcat(state_str, "C", sizeof(state_str));
+ if (pim_upstream_is_sg_rpt(c_oil->up))
+ strlcat(state_str, "R", sizeof(state_str));
+ if (PIM_UPSTREAM_FLAG_TEST_FHR(c_oil->up->flags))
+ strlcat(state_str, "F", sizeof(state_str));
+ if (c_oil->up->sptbit == PIM_UPSTREAM_SPTBIT_TRUE)
+ strlcat(state_str, "T", sizeof(state_str));
+ }
+ if (pim_channel_oil_empty(c_oil))
+ strlcat(state_str, "P", sizeof(state_str));
+
+ ifp_in = pim_if_find_by_vif_index(pim, *oil_parent(c_oil));
+
+ if (ifp_in)
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
+ else
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
+
+
+ pim_time_uptime(mroute_uptime, sizeof(mroute_uptime),
+ now - c_oil->mroute_creation);
+
+ if (json) {
+
+ /* Find the group, create it if it doesn't exist */
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ /* Find the source nested under the group, create it if
+ * it doesn't exist
+ */
+ json_object_object_get_ex(json_group, src_str,
+ &json_source);
+
+ if (!json_source) {
+ json_source = json_object_new_object();
+ json_object_object_add(json_group, src_str,
+ json_source);
+ }
+
+ /* Find the inbound interface nested under the source,
+ * create it if it doesn't exist
+ */
+ json_object_string_add(json_source, "source", src_str);
+ json_object_string_add(json_source, "group", grp_str);
+ json_object_int_add(json_source, "installed",
+ c_oil->installed);
+ json_object_int_add(json_source, "refCount",
+ c_oil->oil_ref_count);
+ json_object_int_add(json_source, "oilSize",
+ c_oil->oil_size);
+ json_object_int_add(json_source, "OilInheritedRescan",
+ c_oil->oil_inherited_rescan);
+ json_object_int_add(json_source, "oilInheritedRescan",
+ c_oil->oil_inherited_rescan);
+ json_object_string_add(json_source, "iif", in_ifname);
+ json_object_string_add(json_source, "upTime",
+ mroute_uptime);
+ json_oil = NULL;
+ }
+
+ for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
+ ++oif_vif_index) {
+ struct interface *ifp_out;
+ int ttl;
+
+ ttl = oil_if_has(c_oil, oif_vif_index);
+ if (ttl < 1)
+ continue;
+
+ /* do not display muted OIFs */
+ if (c_oil->oif_flags[oif_vif_index] & PIM_OIF_FLAG_MUTE)
+ continue;
+
+ if (*oil_parent(c_oil) == oif_vif_index &&
+ !pim_mroute_allow_iif_in_oil(c_oil, oif_vif_index))
+ continue;
+
+ ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index);
+ found_oif = 1;
+
+ if (ifp_out)
+ strlcpy(out_ifname, ifp_out->name,
+ sizeof(out_ifname));
+ else
+ strlcpy(out_ifname, "<oif?>",
+ sizeof(out_ifname));
+
+ if (json) {
+ json_ifp_out = json_object_new_object();
+ json_object_string_add(json_ifp_out, "source",
+ src_str);
+ json_object_string_add(json_ifp_out, "group",
+ grp_str);
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_PIM)
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolPim");
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_GM)
+#if PIM_IPV == 4
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolIgmp");
+#else
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolMld");
+#endif
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_VXLAN)
+ json_object_boolean_true_add(
+ json_ifp_out, "protocolVxlan");
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_STAR)
+ json_object_boolean_true_add(
+ json_ifp_out,
+ "protocolInherited");
+
+ json_object_string_add(json_ifp_out,
+ "inboundInterface",
+ in_ifname);
+ json_object_int_add(json_ifp_out, "iVifI",
+ *oil_parent(c_oil));
+ json_object_string_add(json_ifp_out,
+ "outboundInterface",
+ out_ifname);
+ json_object_int_add(json_ifp_out, "oVifI",
+ oif_vif_index);
+ json_object_int_add(json_ifp_out, "ttl", ttl);
+ json_object_string_add(json_ifp_out, "upTime",
+ mroute_uptime);
+ json_object_string_add(json_source, "flags",
+ state_str);
+ if (!json_oil) {
+ json_oil = json_object_new_object();
+ json_object_object_add(json_source,
+ "oil", json_oil);
+ }
+ json_object_object_add(json_oil, out_ifname,
+ json_ifp_out);
+ } else {
+ proto[0] = '\0';
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_PIM) {
+ strlcpy(proto, "PIM", sizeof(proto));
+ }
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_GM) {
+#if PIM_IPV == 4
+ strlcpy(proto, "IGMP", sizeof(proto));
+#else
+ strlcpy(proto, "MLD", sizeof(proto));
+#endif
+ }
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_VXLAN) {
+ strlcpy(proto, "VxLAN", sizeof(proto));
+ }
+
+ if (c_oil->oif_flags[oif_vif_index] &
+ PIM_OIF_FLAG_PROTO_STAR) {
+ strlcpy(proto, "STAR", sizeof(proto));
+ }
+
+ vty_out(vty,
+ "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d %8s\n",
+ oil_origin(c_oil), oil_mcastgrp(c_oil),
+ state_str, proto, in_ifname, out_ifname,
+ ttl, mroute_uptime);
+
+ if (first) {
+ src_str[0] = '\0';
+ grp_str[0] = '\0';
+ in_ifname[0] = '\0';
+ state_str[0] = '\0';
+ mroute_uptime[0] = '\0';
+ first = 0;
+ }
+ }
+ }
+
+ if (!json && !found_oif) {
+ vty_out(vty,
+ "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d %8s\n",
+ oil_origin(c_oil), oil_mcastgrp(c_oil),
+ state_str, "none", in_ifname, "none", 0,
+ "--:--:--");
+ }
+ }
+
+ /* Print list of static routes */
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
+ first = 1;
+
+ if (!s_route->c_oil.installed)
+ continue;
+
+ snprintfrr(grp_str, sizeof(grp_str), "%pPAs", &s_route->group);
+ snprintfrr(src_str, sizeof(src_str), "%pPAs", &s_route->source);
+ ifp_in = pim_if_find_by_vif_index(pim, s_route->iif);
+ found_oif = 0;
+
+ if (ifp_in)
+ strlcpy(in_ifname, ifp_in->name, sizeof(in_ifname));
+ else
+ strlcpy(in_ifname, "<iif?>", sizeof(in_ifname));
+
+ if (json) {
+
+ /* Find the group, create it if it doesn't exist */
+ json_object_object_get_ex(json, grp_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, grp_str,
+ json_group);
+ }
+
+ /* Find the source nested under the group, create it if
+ * it doesn't exist
+ */
+ json_object_object_get_ex(json_group, src_str,
+ &json_source);
+
+ if (!json_source) {
+ json_source = json_object_new_object();
+ json_object_object_add(json_group, src_str,
+ json_source);
+ }
+
+ json_object_string_add(json_source, "iif", in_ifname);
+ json_oil = NULL;
+ } else {
+ strlcpy(proto, "STATIC", sizeof(proto));
+ }
+
+ for (oif_vif_index = 0; oif_vif_index < MAXVIFS;
+ ++oif_vif_index) {
+ struct interface *ifp_out;
+ char oif_uptime[10];
+ int ttl;
+
+ ttl = s_route->oif_ttls[oif_vif_index];
+ if (ttl < 1)
+ continue;
+
+ ifp_out = pim_if_find_by_vif_index(pim, oif_vif_index);
+ pim_time_uptime(
+ oif_uptime, sizeof(oif_uptime),
+ now - s_route->c_oil
+ .oif_creation[oif_vif_index]);
+ found_oif = 1;
+
+ if (ifp_out)
+ strlcpy(out_ifname, ifp_out->name,
+ sizeof(out_ifname));
+ else
+ strlcpy(out_ifname, "<oif?>",
+ sizeof(out_ifname));
+
+ if (json) {
+ json_ifp_out = json_object_new_object();
+ json_object_string_add(json_ifp_out, "source",
+ src_str);
+ json_object_string_add(json_ifp_out, "group",
+ grp_str);
+ json_object_boolean_true_add(json_ifp_out,
+ "protocolStatic");
+ json_object_string_add(json_ifp_out,
+ "inboundInterface",
+ in_ifname);
+ json_object_int_add(
+ json_ifp_out, "iVifI",
+ *oil_parent(&s_route->c_oil));
+ json_object_string_add(json_ifp_out,
+ "outboundInterface",
+ out_ifname);
+ json_object_int_add(json_ifp_out, "oVifI",
+ oif_vif_index);
+ json_object_int_add(json_ifp_out, "ttl", ttl);
+ json_object_string_add(json_ifp_out, "upTime",
+ oif_uptime);
+ if (!json_oil) {
+ json_oil = json_object_new_object();
+ json_object_object_add(json_source,
+ "oil", json_oil);
+ }
+ json_object_object_add(json_oil, out_ifname,
+ json_ifp_out);
+ } else {
+ vty_out(vty,
+ "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d %8s\n",
+ &s_route->source, &s_route->group, "-",
+ proto, in_ifname, out_ifname, ttl,
+ oif_uptime);
+ if (first && !fill) {
+ src_str[0] = '\0';
+ grp_str[0] = '\0';
+ in_ifname[0] = '\0';
+ first = 0;
+ }
+ }
+ }
+
+ if (!json && !found_oif) {
+ vty_out(vty,
+ "%-15pPAs %-15pPAs %-8s %-6s %-16s %-16s %-3d %8s\n",
+ &s_route->source, &s_route->group, "-", proto,
+ in_ifname, "none", 0, "--:--:--");
+ }
+ }
+}
+
+static void show_mroute_count_per_channel_oil(struct channel_oil *c_oil,
+ json_object *json,
+ struct vty *vty)
+{
+ json_object *json_group = NULL;
+ json_object *json_source = NULL;
+
+ if (!c_oil->installed)
+ return;
+
+ pim_mroute_update_counters(c_oil);
+
+ if (json) {
+ char group_str[PIM_ADDRSTRLEN];
+ char source_str[PIM_ADDRSTRLEN];
+
+ snprintfrr(group_str, sizeof(group_str), "%pPAs",
+ oil_mcastgrp(c_oil));
+ snprintfrr(source_str, sizeof(source_str), "%pPAs",
+ oil_origin(c_oil));
+
+ json_object_object_get_ex(json, group_str, &json_group);
+
+ if (!json_group) {
+ json_group = json_object_new_object();
+ json_object_object_add(json, group_str, json_group);
+ }
+
+ json_source = json_object_new_object();
+ json_object_object_add(json_group, source_str, json_source);
+ json_object_int_add(json_source, "lastUsed",
+ c_oil->cc.lastused / 100);
+ json_object_int_add(json_source, "packets", c_oil->cc.pktcnt);
+ json_object_int_add(json_source, "bytes", c_oil->cc.bytecnt);
+ json_object_int_add(json_source, "wrongIf", c_oil->cc.wrong_if);
+
+ } else {
+ vty_out(vty, "%-15pPAs %-15pPAs %-8llu %-7ld %-10ld %-7ld\n",
+ oil_origin(c_oil), oil_mcastgrp(c_oil),
+ c_oil->cc.lastused / 100,
+ c_oil->cc.pktcnt - c_oil->cc.origpktcnt,
+ c_oil->cc.bytecnt - c_oil->cc.origbytecnt,
+ c_oil->cc.wrong_if - c_oil->cc.origwrong_if);
+ }
+}
+
+void show_mroute_count(struct pim_instance *pim, struct vty *vty,
+ json_object *json)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *sr;
+
+ if (!json) {
+ vty_out(vty, "\n");
+
+ vty_out(vty,
+ "Source Group LastUsed Packets Bytes WrongIf \n");
+ }
+
+ /* Print PIM and IGMP route counts */
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil)
+ show_mroute_count_per_channel_oil(c_oil, json, vty);
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr))
+ show_mroute_count_per_channel_oil(&sr->c_oil, json, vty);
+}
+
+void show_mroute_summary(struct pim_instance *pim, struct vty *vty,
+ json_object *json)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *s_route;
+ uint32_t starg_sw_mroute_cnt = 0;
+ uint32_t sg_sw_mroute_cnt = 0;
+ uint32_t starg_hw_mroute_cnt = 0;
+ uint32_t sg_hw_mroute_cnt = 0;
+ json_object *json_starg = NULL;
+ json_object *json_sg = NULL;
+
+ if (!json)
+ vty_out(vty, "Mroute Type Installed/Total\n");
+
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
+ if (!c_oil->installed) {
+ if (pim_addr_is_any(*oil_origin(c_oil)))
+ starg_sw_mroute_cnt++;
+ else
+ sg_sw_mroute_cnt++;
+ } else {
+ if (pim_addr_is_any(*oil_origin(c_oil)))
+ starg_hw_mroute_cnt++;
+ else
+ sg_hw_mroute_cnt++;
+ }
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, s_route)) {
+ if (!s_route->c_oil.installed) {
+ if (pim_addr_is_any(*oil_origin(&s_route->c_oil)))
+ starg_sw_mroute_cnt++;
+ else
+ sg_sw_mroute_cnt++;
+ } else {
+ if (pim_addr_is_any(*oil_origin(&s_route->c_oil)))
+ starg_hw_mroute_cnt++;
+ else
+ sg_hw_mroute_cnt++;
+ }
+ }
+
+ if (!json) {
+ vty_out(vty, "%-20s %u/%u\n", "(*, G)", starg_hw_mroute_cnt,
+ starg_sw_mroute_cnt + starg_hw_mroute_cnt);
+ vty_out(vty, "%-20s %u/%u\n", "(S, G)", sg_hw_mroute_cnt,
+ sg_sw_mroute_cnt + sg_hw_mroute_cnt);
+ vty_out(vty, "------\n");
+ vty_out(vty, "%-20s %u/%u\n", "Total",
+ (starg_hw_mroute_cnt + sg_hw_mroute_cnt),
+ (starg_sw_mroute_cnt + starg_hw_mroute_cnt +
+ sg_sw_mroute_cnt + sg_hw_mroute_cnt));
+ } else {
+ /* (*,G) route details */
+ json_starg = json_object_new_object();
+ json_object_object_add(json, "wildcardGroup", json_starg);
+
+ json_object_int_add(json_starg, "installed",
+ starg_hw_mroute_cnt);
+ json_object_int_add(json_starg, "total",
+ starg_sw_mroute_cnt + starg_hw_mroute_cnt);
+
+ /* (S, G) route details */
+ json_sg = json_object_new_object();
+ json_object_object_add(json, "sourceGroup", json_sg);
+
+ json_object_int_add(json_sg, "installed", sg_hw_mroute_cnt);
+ json_object_int_add(json_sg, "total",
+ sg_sw_mroute_cnt + sg_hw_mroute_cnt);
+
+ json_object_int_add(json, "totalNumOfInstalledMroutes",
+ starg_hw_mroute_cnt + sg_hw_mroute_cnt);
+ json_object_int_add(json, "totalNumOfMroutes",
+ starg_sw_mroute_cnt + starg_hw_mroute_cnt +
+ sg_sw_mroute_cnt +
+ sg_hw_mroute_cnt);
+ }
+}
+
+int clear_ip_mroute_count_command(struct vty *vty, const char *name)
+{
+ struct listnode *node;
+ struct channel_oil *c_oil;
+ struct static_route *sr;
+ struct vrf *v = pim_cmd_lookup(vty, name);
+ struct pim_instance *pim;
+
+ if (!v)
+ return CMD_WARNING;
+
+ pim = v->info;
+ frr_each (rb_pim_oil, &pim->channel_oil_head, c_oil) {
+ if (!c_oil->installed)
+ continue;
+
+ pim_mroute_update_counters(c_oil);
+ c_oil->cc.origpktcnt = c_oil->cc.pktcnt;
+ c_oil->cc.origbytecnt = c_oil->cc.bytecnt;
+ c_oil->cc.origwrong_if = c_oil->cc.wrong_if;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(pim->static_routes, node, sr)) {
+ if (!sr->c_oil.installed)
+ continue;
+
+ pim_mroute_update_counters(&sr->c_oil);
+
+ sr->c_oil.cc.origpktcnt = sr->c_oil.cc.pktcnt;
+ sr->c_oil.cc.origbytecnt = sr->c_oil.cc.bytecnt;
+ sr->c_oil.cc.origwrong_if = sr->c_oil.cc.wrong_if;
+ }
+ return CMD_SUCCESS;
+}
+
+struct vrf *pim_cmd_lookup(struct vty *vty, const char *name)
+{
+ struct vrf *vrf;
+
+ if (name)
+ vrf = vrf_lookup_by_name(name);
+ else
+ vrf = vrf_lookup_by_id(VRF_DEFAULT);
+
+ if (!vrf)
+ vty_out(vty, "Specified VRF: %s does not exist\n", name);
+
+ return vrf;
+}
+
+void clear_mroute(struct pim_instance *pim)
+{
+ struct pim_upstream *up;
+ struct interface *ifp;
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+ struct pim_ifchannel *ch;
+
+ if (!pim_ifp)
+ continue;
+
+ /* deleting all ifchannels */
+ while (!RB_EMPTY(pim_ifchannel_rb, &pim_ifp->ifchannel_rb)) {
+ ch = RB_ROOT(pim_ifchannel_rb, &pim_ifp->ifchannel_rb);
+
+ pim_ifchannel_delete(ch);
+ }
+
+#if PIM_IPV == 4
+ /* clean up all igmp groups */
+ struct gm_group *grp;
+
+ if (pim_ifp->gm_group_list) {
+ while (pim_ifp->gm_group_list->count) {
+ grp = listnode_head(pim_ifp->gm_group_list);
+ igmp_group_delete(grp);
+ }
+ }
+#endif
+ }
+
+ /* clean up all upstreams*/
+ while ((up = rb_pim_upstream_first(&pim->upstream_head)))
+ pim_upstream_del(pim, up, __func__);
+}
+
+void clear_pim_statistics(struct pim_instance *pim)
+{
+ struct interface *ifp;
+
+ pim->bsm_rcvd = 0;
+ pim->bsm_sent = 0;
+ pim->bsm_dropped = 0;
+
+ /* scan interfaces */
+ FOR_ALL_INTERFACES (pim->vrf, ifp) {
+ struct pim_interface *pim_ifp = ifp->info;
+
+ if (!pim_ifp)
+ continue;
+
+ pim_ifp->pim_ifstat_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_ucast_bsm_cfg_miss = 0;
+ pim_ifp->pim_ifstat_bsm_invalid_sz = 0;
+ }
+}
+
+int pim_debug_pim_cmd(void)
+{
+ PIM_DO_DEBUG_PIM_EVENTS;
+ PIM_DO_DEBUG_PIM_PACKETS;
+ PIM_DO_DEBUG_PIM_TRACE;
+ PIM_DO_DEBUG_MSDP_EVENTS;
+ PIM_DO_DEBUG_MSDP_PACKETS;
+ PIM_DO_DEBUG_BSM;
+ PIM_DO_DEBUG_VXLAN;
+ return CMD_SUCCESS;
+}
+
+int pim_no_debug_pim_cmd(void)
+{
+ PIM_DONT_DEBUG_PIM_EVENTS;
+ PIM_DONT_DEBUG_PIM_PACKETS;
+ PIM_DONT_DEBUG_PIM_TRACE;
+ PIM_DONT_DEBUG_MSDP_EVENTS;
+ PIM_DONT_DEBUG_MSDP_PACKETS;
+
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_SEND;
+ PIM_DONT_DEBUG_PIM_PACKETDUMP_RECV;
+ return CMD_SUCCESS;
+}
+
+int pim_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty)
+{
+ if (hello) {
+ PIM_DO_DEBUG_PIM_HELLO;
+ vty_out(vty, "PIM Hello debugging is on\n");
+ } else if (joins) {
+ PIM_DO_DEBUG_PIM_J_P;
+ vty_out(vty, "PIM Join/Prune debugging is on\n");
+ } else if (registers) {
+ PIM_DO_DEBUG_PIM_REG;
+ vty_out(vty, "PIM Register debugging is on\n");
+ } else {
+ PIM_DO_DEBUG_PIM_PACKETS;
+ vty_out(vty, "PIM Packet debugging is on\n");
+ }
+ return CMD_SUCCESS;
+}
+
+int pim_no_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty)
+{
+ if (hello) {
+ PIM_DONT_DEBUG_PIM_HELLO;
+ vty_out(vty, "PIM Hello debugging is off\n");
+ } else if (joins) {
+ PIM_DONT_DEBUG_PIM_J_P;
+ vty_out(vty, "PIM Join/Prune debugging is off\n");
+ } else if (registers) {
+ PIM_DONT_DEBUG_PIM_REG;
+ vty_out(vty, "PIM Register debugging is off\n");
+ } else {
+ PIM_DONT_DEBUG_PIM_PACKETS;
+ vty_out(vty, "PIM Packet debugging is off\n");
+ }
+
+ return CMD_SUCCESS;
+}
#ifndef PIM_CMD_COMMON_H
#define PIM_CMD_COMMON_H
+struct pim_upstream;
+struct pim_instance;
+
+/* duplicated from pim_instance.h - needed to avoid dependency mess */
+struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id);
+
const char *pim_cli_get_vrf_name(struct vty *vty);
int pim_process_join_prune_cmd(struct vty *vty, const char *jpi_str);
int pim_process_no_join_prune_cmd(struct vty *vty);
int pim_process_ip_pim_cmd(struct vty *vty);
int pim_process_no_ip_pim_cmd(struct vty *vty);
+int pim_process_ip_pim_passive_cmd(struct vty *vty, bool enable);
int pim_process_ip_pim_drprio_cmd(struct vty *vty, const char *drpriority_str);
int pim_process_no_ip_pim_drprio_cmd(struct vty *vty);
int pim_process_ip_pim_hello_cmd(struct vty *vty, const char *hello_str,
struct vty *vty);
void show_multicast_interfaces(struct pim_instance *pim, struct vty *vty,
json_object *json);
+void show_mroute(struct pim_instance *pim, struct vty *vty, pim_sgaddr *sg,
+ bool fill, json_object *json);
+void show_mroute_count(struct pim_instance *pim, struct vty *vty,
+ json_object *json);
+void show_mroute_summary(struct pim_instance *pim, struct vty *vty,
+ json_object *json);
+int clear_ip_mroute_count_command(struct vty *vty, const char *name);
+struct vrf *pim_cmd_lookup(struct vty *vty, const char *name);
+void clear_mroute(struct pim_instance *pim);
+void clear_pim_statistics(struct pim_instance *pim);
+int pim_debug_pim_cmd(void);
+int pim_no_debug_pim_cmd(void);
+int pim_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty);
+int pim_no_debug_pim_packets_cmd(const char *hello, const char *joins,
+ const char *registers, struct vty *vty);
/*
* Special Macro to allow us to get the correct pim_instance;
*/
-#define PIM_DECLVAR_CONTEXT(A, B) \
- struct vrf *A = VTY_GET_CONTEXT(vrf); \
- struct pim_instance *B = \
- (vrf) ? vrf->info : pim_get_pim_instance(VRF_DEFAULT); \
- vrf = (vrf) ? vrf : pim->vrf
+#define PIM_DECLVAR_CONTEXT_VRF(vrfptr, pimptr) \
+ VTY_DECLVAR_CONTEXT_VRF(vrfptr); \
+ struct pim_instance *pimptr = vrfptr->info; \
+ MACRO_REQUIRE_SEMICOLON() /* end */
+
#endif /* PIM_CMD_COMMON_H */
#include "if.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_pim.h"
#include "pim_str.h"
#include "pim_tlv.h"
pim_ifp = ifp->info;
assert(pim_ifp);
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
++pim_ifp->pim_ifstat_hello_recv;
/*
#include "pim_igmp_join.h"
#include "pim_vxlan.h"
+#include "pim6_mld.h"
+
#if PIM_IPV == 4
static void pim_if_igmp_join_del_all(struct interface *ifp);
static int igmp_join_sock(const char *ifname, ifindex_t ifindex,
pim_ifp->mroute_vif_index = -1;
pim_ifp->igmp_version = IGMP_DEFAULT_VERSION;
+ pim_ifp->mld_version = MLD_DEFAULT_VERSION;
pim_ifp->gm_default_robustness_variable =
IGMP_DEFAULT_ROBUSTNESS_VARIABLE;
pim_ifp->gm_default_query_interval = IGMP_GENERAL_QUERY_INTERVAL;
pim_ifp->gm_default_query_interval);
pim_ifp->pim_enable = pim;
+ pim_ifp->pim_passive_enable = false;
#if PIM_IPV == 4
pim_ifp->igmp_enable = igmp;
#endif
vxlan_term = pim_vxlan_is_term_dev_cfg(pim_ifp->pim, ifp);
pim_if_add_vif(ifp, false, vxlan_term);
}
+ gm_ifp_update(ifp);
pim_ifchannel_scan_forward_start(ifp);
}
"%s: removed link-local %pI6, lowest now %pI6, highest %pI6",
ifc->ifp->name, &ifc->address->u.prefix6,
&pim_ifp->ll_lowest, &pim_ifp->ll_highest);
+
+ gm_ifp_update(ifp);
}
#endif
vxlan_term = pim_vxlan_is_term_dev_cfg(pim_ifp->pim, ifp);
pim_if_add_vif(ifp, false, vxlan_term);
}
+ gm_ifp_update(ifp);
pim_ifchannel_scan_forward_start(ifp);
pim_rp_setup(pim_ifp->pim);
}
ifaddr = pim_ifp->primary_address;
+#if PIM_IPV != 6
+ /* IPv6 API is always by interface index */
if (!ispimreg && !is_vxlan_term && pim_addr_is_any(ifaddr)) {
zlog_warn(
"%s: could not get address for interface %s ifindex=%d",
__func__, ifp->name, ifp->ifindex);
return -4;
}
+#endif
pim_ifp->mroute_vif_index = pim_iface_next_vif_index(ifp);
pim_ifp->pim->iface_vif_index[pim_ifp->mroute_vif_index] = 1;
+ gm_ifp_update(ifp);
+
/* if the device qualifies as pim_vxlan iif/oif update vxlan entries */
pim_vxlan_add_vif(ifp);
-
return 0;
}
/* if the device was a pim_vxlan iif/oif update vxlan mroute entries */
pim_vxlan_del_vif(ifp);
+ gm_ifp_teardown(ifp);
+
pim_mroute_del_vif(ifp);
/*
pim_ifp->pim->iface_vif_index[pim_ifp->mroute_vif_index] = 0;
pim_ifp->mroute_vif_index = -1;
-
return 0;
}
#include "pim_igmp.h"
#include "pim_upstream.h"
-#include "pim_instance.h"
#include "bfd.h"
#include "pim_str.h"
enum pim_secondary_addr_flags flags;
};
+struct gm_if;
+
struct pim_interface {
bool pim_enable : 1;
bool pim_can_disable_join_suppression : 1;
+ bool pim_passive_enable : 1;
bool igmp_enable : 1;
* address of the interface */
int igmp_version; /* IGMP version */
+ int mld_version;
int gm_default_robustness_variable; /* IGMP or MLD QRV */
int gm_default_query_interval; /* IGMP or MLD secs between general
queries */
struct list *gm_group_list; /* list of struct IGMP or MLD group */
struct hash *gm_group_hash;
+ struct gm_if *mld;
+
int pim_sock_fd; /* PIM socket file descriptor */
struct thread *t_pim_sock_read; /* thread for reading PIM socket */
int64_t pim_sock_creation; /* timestamp of PIM socket creation */
#include "prefix.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_str.h"
#include "pim_iface.h"
#include "pim_ifchannel.h"
#include "lib_errors.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_igmp.h"
#include "pim_igmpv2.h"
#include "pim_igmpv3.h"
#include <zebra.h>
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_util.h"
#include "pim_sock.h"
#include "pim_rp.h"
#include "zebra.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_igmp.h"
#include "pim_igmpv2.h"
#include "pim_igmpv3.h"
#include "lib_errors.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_iface.h"
#include "pim_igmp.h"
#include "pim_igmpv3.h"
#include "lib_errors.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_ssm.h"
#include "pim_rpf.h"
#include "pim_rp.h"
pim->send_v6_secondary = 1;
+ pim->gm_socket = -1;
+
pim_rp_init(pim);
pim_bsm_proc_init(pim);
struct list *ssmpingd_list;
pim_addr ssmpingd_group_addr;
+ unsigned int gm_socket_if_count;
+ int gm_socket;
+ struct thread *t_gm_recv;
+
unsigned int igmp_group_count;
unsigned int igmp_watermark_limit;
unsigned int keep_alive_time;
int64_t nexthop_lookups;
int64_t nexthop_lookups_avoided;
int64_t last_route_change_time;
+
+ uint64_t gm_rx_drop_sys;
};
void pim_vrf_init(void);
#include "plist.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_str.h"
#include "pim_tlv.h"
#include "pim_msg.h"
pastend = tlv_buf + tlv_buf_size;
pim_ifp = ifp->info;
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
/*
Parse ucast addr
*/
pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
packet_size,
- rpf->source_nexthop.interface->name)) {
+ rpf->source_nexthop.interface)) {
zlog_warn(
"%s: could not send PIM message on interface %s",
__func__,
packet_size += group_size;
pim_msg_build_jp_groups(grp, group, group_size);
- pim_ifp->pim_ifstat_join_send += ntohs(grp->joins);
- pim_ifp->pim_ifstat_prune_send += ntohs(grp->prunes);
+ if (!pim_ifp->pim_passive_enable) {
+ pim_ifp->pim_ifstat_join_send += ntohs(grp->joins);
+ pim_ifp->pim_ifstat_prune_send += ntohs(grp->prunes);
+ }
if (PIM_DEBUG_PIM_TRACE)
zlog_debug(
pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
packet_size,
- rpf->source_nexthop.interface->name)) {
+ rpf->source_nexthop.interface)) {
zlog_warn(
"%s: could not send PIM message on interface %s",
__func__,
pim_msg, packet_size, PIM_MSG_TYPE_JOIN_PRUNE, false);
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg,
- packet_size,
- rpf->source_nexthop.interface->name)) {
+ packet_size, rpf->source_nexthop.interface)) {
zlog_warn(
"%s: could not send PIM message on interface %s",
__func__, rpf->source_nexthop.interface->name);
#include "if.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_msg.h"
#include "pim_jp_agg.h"
#include "pim_join.h"
#ifndef __PIM_JP_AGG_H__
#define __PIM_JP_AGG_H__
+#include "pim_rpf.h"
+
struct pim_jp_sources {
struct pim_upstream *up;
int is_join;
#include "plist.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_macro.h"
#include "pim_iface.h"
#include "pim_ifchannel.h"
return 0;
}
-int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf)
+int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf,
+ size_t len)
{
struct pim_interface *pim_ifp;
pim_sgaddr sg;
}
pim_register_send((uint8_t *)buf + sizeof(ipv_hdr),
- ntohs(IPV_LEN(ip_hdr)) - sizeof(ipv_hdr),
+ len - sizeof(ipv_hdr),
pim_ifp->primary_address, rpg, 0, up);
}
return 0;
return 0;
}
-int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf)
+int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
+ size_t len)
{
const ipv_hdr *ip_hdr = (const ipv_hdr *)buf;
struct pim_interface *pim_ifp;
pim_upstream_keep_alive_timer_start(
up, pim_ifp->pim->keep_alive_time);
pim_upstream_inherited_olist(pim_ifp->pim, up);
- pim_mroute_msg_wholepkt(fd, ifp, buf);
+ pim_mroute_msg_wholepkt(fd, ifp, buf, len);
}
return 0;
}
pim_upstream_mroute_add(up->channel_oil, __func__);
// Send the packet to the RP
- pim_mroute_msg_wholepkt(fd, ifp, buf);
+ pim_mroute_msg_wholepkt(fd, ifp, buf, len);
} else {
up = pim_upstream_add(pim_ifp->pim, &sg, ifp,
PIM_UPSTREAM_FLAG_MASK_SRC_NOCACHE,
return -2;
}
+#if PIM_IPV == 6
+ struct icmp6_filter filter[1];
+ int ret;
+
+ /* Unlike IPv4, this socket is not used for MLD, so just drop
+ * everything with an empty ICMP6 filter. Otherwise we get
+ * all kinds of garbage here, possibly even non-multicast
+ * related ICMPv6 traffic (e.g. ping)
+ *
+ * (mroute kernel upcall "packets" are injected directly on the
+ * socket, this sockopt -or any other- has no effect on them)
+ */
+ ICMP6_FILTER_SETBLOCKALL(filter);
+ ret = setsockopt(fd, SOL_ICMPV6, ICMP6_FILTER, filter,
+ sizeof(filter));
+ if (ret)
+ zlog_err(
+ "(VRF %s) failed to set mroute control filter: %m",
+ pim->vrf->name);
+#endif
+
#ifdef SO_BINDTODEVICE
if (pim->vrf->vrf_id != VRF_DEFAULT
&& setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,
memset(&sgreq, 0, sizeof(sgreq));
+ pim_zlookup_sg_statistics(c_oil);
+
#if PIM_IPV == 4
sgreq.src = *oil_origin(c_oil);
sgreq.grp = *oil_mcastgrp(c_oil);
- pim_zlookup_sg_statistics(c_oil);
#else
sgreq.src = c_oil->oil.mf6cc_origin;
sgreq.grp = c_oil->oil.mf6cc_mcastgrp;
- /* TODO Zlookup_sg_statistics for V6 to be added */
#endif
if (ioctl(pim->mroute_socket, PIM_SIOCGETSGCNT, &sgreq)) {
pim_sgaddr sg;
*/
struct channel_oil;
+struct pim_instance;
int pim_mroute_socket_enable(struct pim_instance *pim);
int pim_mroute_socket_disable(struct pim_instance *pim);
int pim_mroute_msg(struct pim_instance *pim, const char *buf, size_t buf_size,
ifindex_t ifindex);
int pim_mroute_msg_nocache(int fd, struct interface *ifp, const kernmsg *msg);
-int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf);
+int pim_mroute_msg_wholepkt(int fd, struct interface *ifp, const char *buf,
+ size_t len);
int pim_mroute_msg_wrongvif(int fd, struct interface *ifp, const kernmsg *msg);
-int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf);
+int pim_mroute_msg_wrvifwhole(int fd, struct interface *ifp, const char *buf,
+ size_t len);
int pim_mroute_set(struct pim_instance *pim, int enable);
#endif /* PIM_MROUTE_H */
#include "lib/network.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_mroute.h"
#include "pim_oil.h"
#include "pim_str.h"
msg);
case IGMPMSG_WHOLEPKT:
return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
- (const char *)msg);
+ (const char *)msg,
+ buf_size);
case IGMPMSG_WRVIFWHOLE:
- return pim_mroute_msg_wrvifwhole(
- pim->mroute_socket, ifp, (const char *)msg);
+ return pim_mroute_msg_wrvifwhole(pim->mroute_socket,
+ ifp, (const char *)msg,
+ buf_size);
default:
break;
}
#include "pimd.h"
#include "pim_memory.h"
+#include "pim_instance.h"
#include "pim_iface.h"
#include "pim_rp.h"
#include "pim_str.h"
#include <lib/lib_errors.h>
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_str.h"
#include "pim_errors.h"
#include <lib/lib_errors.h>
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_sock.h"
#include "pim_errors.h"
#include "plist.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_vty.h"
#include "pim_pim.h"
#include "pim_msg.h"
.modify = lib_interface_pim_address_family_pim_enable_modify,
}
},
+ {
+ .xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/pim-passive-enable",
+ .cbs = {
+ .modify = lib_interface_pim_address_family_pim_passive_enable_modify,
+ }
+ },
{
.xpath = "/frr-interface:lib/interface/frr-pim:pim/address-family/dr-priority",
.cbs = {
int lib_interface_pim_address_family_destroy(struct nb_cb_destroy_args *args);
int lib_interface_pim_address_family_pim_enable_modify(
struct nb_cb_modify_args *args);
+int lib_interface_pim_address_family_pim_passive_enable_modify(
+ struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_hello_interval_modify(
struct nb_cb_modify_args *args);
int lib_interface_pim_address_family_hello_holdtime_modify(
#include "log.h"
#include "lib_errors.h"
#include "pim_util.h"
+#include "pim6_mld.h"
#if PIM_IPV == 6
#define pim6_msdp_err(funcname, argtype) \
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-interface:lib/interface/frr-pim:pim/address-family/pim-passive-enable
+ */
+int lib_interface_pim_address_family_pim_passive_enable_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_ABORT:
+ case NB_EV_PREPARE:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ pim_ifp->pim_passive_enable =
+ yang_dnode_get_bool(args->dnode, NULL);
+ break;
+ }
+
+ return NB_OK;
+}
+
/*
* XPath: /frr-interface:lib/interface/frr-pim:pim/address-family/hello-interval
*/
int lib_interface_gmp_address_family_mld_version_modify(
struct nb_cb_modify_args *args)
{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ break;
case NB_EV_APPLY:
- /* TBD depends on MLD data structure changes */
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ pim_ifp->mld_version = yang_dnode_get_uint8(args->dnode, NULL);
+ gm_ifp_update(ifp);
break;
}
int lib_interface_gmp_address_family_mld_version_destroy(
struct nb_cb_destroy_args *args)
{
+ struct interface *ifp;
+ struct pim_interface *pim_ifp;
+
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
case NB_EV_ABORT:
+ break;
case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ pim_ifp->mld_version = 2;
+ gm_ifp_update(ifp);
break;
}
int lib_interface_gmp_address_family_query_interval_modify(
struct nb_cb_modify_args *args)
{
-#if PIM_IPV == 4
struct interface *ifp;
int query_interval;
+#if PIM_IPV == 4
switch (args->event) {
case NB_EV_VALIDATE:
case NB_EV_PREPARE:
change_query_interval(ifp->info, query_interval);
}
#else
- /* TBD Depends on MLD data structure changes */
+ struct pim_interface *pim_ifp;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ pim_ifp = ifp->info;
+ if (!pim_ifp)
+ return NB_ERR_INCONSISTENCY;
+
+ query_interval = yang_dnode_get_uint16(args->dnode, NULL);
+ pim_ifp->gm_default_query_interval = query_interval;
+ gm_ifp_update(ifp);
+ }
#endif
return NB_OK;
}
#include "lib_errors.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_neighbor.h"
#include "pim_time.h"
#include "pim_str.h"
#include "pimd.h"
#include "pimd/pim_nht.h"
+#include "pim_instance.h"
#include "log.h"
#include "pim_time.h"
#include "pim_oil.h"
struct zclient *zclient = NULL;
zclient = pim_zebra_zclient_get();
- memset(&rpf, 0, sizeof(struct pim_rpf));
+ memset(&rpf, 0, sizeof(rpf));
rpf.rpf_addr = *addr;
pnc = pim_nexthop_cache_find(pim, &rpf);
}
if (up != NULL)
- hash_get(pnc->upstream_hash, up, hash_alloc_intern);
+ (void)hash_get(pnc->upstream_hash, up, hash_alloc_intern);
if (CHECK_FLAG(pnc->flags, PIM_NEXTHOP_VALID)) {
if (out_pnc)
#include "network.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_pim.h"
#include "pim_time.h"
#include "pim_iface.h"
}
int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
- int pim_msg_size, const char *ifname)
+ int pim_msg_size, struct interface *ifp)
{
socklen_t tolen;
unsigned char buffer[10000];
unsigned char *msg_start;
uint8_t ttl;
struct pim_msg_header *header;
+ struct pim_interface *pim_ifp;
+
+ pim_ifp = ifp->info;
+
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip sending PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
memset(buffer, 0, 10000);
if (PIM_DEBUG_PIM_PACKETS)
zlog_debug("%s: to %pPA on %s: msg_size=%d checksum=%x",
- __func__, &dst, ifname, pim_msg_size,
+ __func__, &dst, ifp->name, pim_msg_size,
header->checksum);
if (PIM_DEBUG_PIM_PACKETDUMP_SEND) {
}
pim_msg_send_frame(fd, (char *)buffer, sendlen, (struct sockaddr *)&to,
- tolen, ifname);
+ tolen, ifp->name);
return 0;
}
if (pim_msg_send(pim_ifp->pim_sock_fd, pim_ifp->primary_address,
qpim_all_pim_routers_addr, pim_msg, pim_msg_size,
- ifp->name)) {
+ ifp)) {
if (PIM_DEBUG_PIM_HELLO) {
zlog_debug(
"%s: could not send PIM message on interface %s",
return -1;
}
- ++pim_ifp->pim_ifstat_hello_sent;
- PIM_IF_FLAG_SET_HELLO_SENT(pim_ifp->flags);
+ if (!pim_ifp->pim_passive_enable) {
+ ++pim_ifp->pim_ifstat_hello_sent;
+ PIM_IF_FLAG_SET_HELLO_SENT(pim_ifp->flags);
+ }
return 0;
}
pim_sgaddr sg);
int pim_msg_send(int fd, pim_addr src, pim_addr dst, uint8_t *pim_msg,
- int pim_msg_size, const char *ifname);
+ int pim_msg_size, struct interface *ifp);
int pim_hello_send(struct interface *ifp, uint16_t holdtime);
#endif /* PIM_PIM_H */
#include "pim_util.h"
#include "pim_ssm.h"
#include "pim_vxlan.h"
+#include "pim_addr.h"
struct thread *send_test_packet_timer = NULL;
pim_vxlan_update_sg_reg_state(pim, up, true);
}
-void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg,
- struct in_addr src, struct in_addr originator)
+void pim_register_stop_send(struct interface *ifp, pim_sgaddr *sg, pim_addr src,
+ pim_addr originator)
{
struct pim_interface *pinfo;
unsigned char buffer[10000];
uint8_t *b1;
if (PIM_DEBUG_PIM_REG) {
- zlog_debug("Sending Register stop for %pSG to %pI4 on %s", sg,
+ zlog_debug("Sending Register stop for %pSG to %pPA on %s", sg,
&originator, ifp->name);
}
return;
}
if (pim_msg_send(pinfo->pim_sock_fd, src, originator, buffer,
- b1length + PIM_MSG_REGISTER_STOP_LEN, ifp->name)) {
+ b1length + PIM_MSG_REGISTER_STOP_LEN, ifp)) {
if (PIM_DEBUG_PIM_TRACE) {
zlog_debug(
"%s: could not send PIM register stop message on interface %s",
__func__, ifp->name);
}
}
- ++pinfo->pim_ifstat_reg_stop_send;
+
+ if (!pinfo->pim_passive_enable)
+ ++pinfo->pim_ifstat_reg_stop_send;
}
static void pim_reg_stop_upstream(struct pim_instance *pim,
bool handling_star = false;
int l;
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
++pim_ifp->pim_ifstat_reg_stop_recv;
memset(&sg, 0, sizeof(sg));
return 0;
}
-void pim_register_send(const uint8_t *buf, int buf_size, struct in_addr src,
+void pim_register_send(const uint8_t *buf, int buf_size, pim_addr src,
struct pim_rpf *rpg, int null_register,
struct pim_upstream *up)
{
unsigned char *b1;
struct pim_interface *pinfo;
struct interface *ifp;
+ pim_addr dst = pim_addr_from_prefix(&rpg->rpf_addr);
if (PIM_DEBUG_PIM_REG) {
- zlog_debug("Sending %s %sRegister Packet to %pI4", up->sg_str,
- null_register ? "NULL " : "",
- &rpg->rpf_addr.u.prefix4);
+ zlog_debug("Sending %s %sRegister Packet to %pPA", up->sg_str,
+ null_register ? "NULL " : "", &dst);
}
ifp = rpg->source_nexthop.interface;
}
if (PIM_DEBUG_PIM_REG) {
- zlog_debug("%s: Sending %s %sRegister Packet to %pI4 on %s",
+ zlog_debug("%s: Sending %s %sRegister Packet to %pPA on %s",
__func__, up->sg_str, null_register ? "NULL " : "",
- &rpg->rpf_addr.u.prefix4, ifp->name);
+ &dst, ifp->name);
}
memset(buffer, 0, 10000);
memcpy(b1, (const unsigned char *)buf, buf_size);
- pim_msg_build_header(src, rpg->rpf_addr.u.prefix4, buffer,
- buf_size + PIM_MSG_REGISTER_LEN,
+ pim_msg_build_header(src, dst, buffer, buf_size + PIM_MSG_REGISTER_LEN,
PIM_MSG_TYPE_REGISTER, false);
- ++pinfo->pim_ifstat_reg_send;
+ if (!pinfo->pim_passive_enable)
+ ++pinfo->pim_ifstat_reg_send;
- if (pim_msg_send(pinfo->pim_sock_fd, src, rpg->rpf_addr.u.prefix4,
- buffer, buf_size + PIM_MSG_REGISTER_LEN, ifp->name)) {
+ if (pim_msg_send(pinfo->pim_sock_fd, src, dst, buffer,
+ buf_size + PIM_MSG_REGISTER_LEN, ifp)) {
if (PIM_DEBUG_PIM_TRACE) {
zlog_debug(
"%s: could not send PIM register message on interface %s",
}
}
+#if PIM_IPV == 4
void pim_null_register_send(struct pim_upstream *up)
{
struct ip ip_hdr;
struct pim_interface *pim_ifp;
struct pim_rpf *rpg;
- struct in_addr src;
+ pim_addr src;
pim_ifp = up->rpf.source_nexthop.interface->info;
if (!pim_ifp) {
return;
}
- memset(&ip_hdr, 0, sizeof(struct ip));
+ memset(&ip_hdr, 0, sizeof(ip_hdr));
ip_hdr.ip_p = PIM_IP_PROTO_PIM;
ip_hdr.ip_hl = 5;
ip_hdr.ip_v = 4;
return;
}
}
- pim_register_send((uint8_t *)&ip_hdr, sizeof(struct ip),
- src, rpg, 1, up);
+ pim_register_send((uint8_t *)&ip_hdr, sizeof(struct ip), src, rpg, 1,
+ up);
+}
+#else
+void pim_null_register_send(struct pim_upstream *up)
+{
+ struct ip6_hdr ip6_hdr;
+ struct pim_msg_header pim_msg_header;
+ struct pim_interface *pim_ifp;
+ struct pim_rpf *rpg;
+ pim_addr src;
+ unsigned char buffer[sizeof(ip6_hdr) + sizeof(pim_msg_header)];
+ struct ipv6_ph ph;
+
+ pim_ifp = up->rpf.source_nexthop.interface->info;
+ if (!pim_ifp) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "Cannot send null-register for %s no valid iif",
+ up->sg_str);
+ return;
+ }
+
+ rpg = RP(pim_ifp->pim, up->sg.grp);
+ if (!rpg) {
+ if (PIM_DEBUG_PIM_TRACE)
+ zlog_debug(
+ "Cannot send null-register for %s no RPF to the RP",
+ up->sg_str);
+ return;
+ }
+
+ memset(&ip6_hdr, 0, sizeof(ip6_hdr));
+ ip6_hdr.ip6_nxt = PIM_IP_PROTO_PIM;
+ ip6_hdr.ip6_plen = PIM_MSG_HEADER_LEN;
+ ip6_hdr.ip6_vfc = 6 << 4;
+ ip6_hdr.ip6_hlim = MAXTTL;
+ ip6_hdr.ip6_src = up->sg.src;
+ ip6_hdr.ip6_dst = up->sg.grp;
+
+ memset(buffer, 0, (sizeof(ip6_hdr) + sizeof(pim_msg_header)));
+ memcpy(buffer, &ip6_hdr, sizeof(ip6_hdr));
+
+ pim_msg_header.ver = 0;
+ pim_msg_header.type = 0;
+ pim_msg_header.reserved = 0;
+
+ pim_msg_header.checksum = 0;
+
+ ph.src = up->sg.src;
+ ph.dst = up->sg.grp;
+ ph.ulpl = htonl(PIM_MSG_HEADER_LEN);
+ ph.next_hdr = IPPROTO_PIM;
+ pim_msg_header.checksum =
+ in_cksum_with_ph6(&ph, &pim_msg_header, PIM_MSG_HEADER_LEN);
+
+ memcpy(buffer + sizeof(ip6_hdr), &pim_msg_header, PIM_MSG_HEADER_LEN);
+
+
+ src = pim_ifp->primary_address;
+ pim_register_send((uint8_t *)buffer,
+ sizeof(ip6_hdr) + PIM_MSG_HEADER_LEN, src, rpg, 1,
+ up);
}
+#endif
/*
* 4.4.2 Receiving Register Messages at the RP
struct pim_instance *pim = pim_ifp->pim;
pim_addr rp_addr;
+ if (pim_ifp->pim_passive_enable) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "skip receiving PIM message on passive interface %s",
+ ifp->name);
+ return 0;
+ }
+
#define PIM_MSG_REGISTER_BIT_RESERVED_LEN 4
ip_hdr = (tlv_buf + PIM_MSG_REGISTER_BIT_RESERVED_LEN);
memset(&sg, 0, sizeof(sg));
sg = pim_sgaddr_from_iphdr(ip_hdr);
+#if PIM_IPV == 6
+ /*
+ * According to RFC section 4.9.3, If Dummy PIM Header is included
+ * in NULL Register as a payload there would be two PIM headers.
+ * The inner PIM Header's checksum field should also be validated
+ * in addition to the outer PIM Header's checksum. Validation of
+ * inner PIM header checksum is done here.
+ */
+ if ((*bits & PIM_REGISTER_NR_BIT) &&
+ ((tlv_buf_size - PIM_MSG_REGISTER_BIT_RESERVED_LEN) >
+ (int)sizeof(struct ip6_hdr))) {
+ uint16_t computed_checksum;
+ uint16_t received_checksum;
+ struct ipv6_ph ph;
+ struct pim_msg_header *header;
+
+ header = (struct pim_msg_header
+ *)(tlv_buf +
+ PIM_MSG_REGISTER_BIT_RESERVED_LEN +
+ sizeof(struct ip6_hdr));
+ ph.src = sg.src;
+ ph.dst = sg.grp;
+ ph.ulpl = htonl(PIM_MSG_HEADER_LEN);
+ ph.next_hdr = IPPROTO_PIM;
+
+ received_checksum = header->checksum;
+
+ header->checksum = 0;
+ computed_checksum = in_cksum_with_ph6(
+ &ph, header, htonl(PIM_MSG_HEADER_LEN));
+
+ if (computed_checksum != received_checksum) {
+ if (PIM_DEBUG_PIM_PACKETS)
+ zlog_debug(
+ "Ignoring Null Register message%pSG from %pPA due to bad checksum in Encapsulated dummy PIM header",
+ &sg, &src_addr);
+ return 0;
+ }
+ }
+#endif
i_am_rp = I_am_RP(pim, sg.grp);
if (PIM_DEBUG_PIM_REG)
#include "lib_errors.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_vty.h"
#include "pim_str.h"
#include "pim_iface.h"
#include "jhash.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_rpf.h"
#include "pim_pim.h"
#include "pim_str.h"
#include <zebra.h>
#include "pim_str.h"
+struct pim_instance;
+
/*
RFC 4601:
#include "network.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_mroute.h"
#include "pim_iface.h"
#include "pim_sock.h"
*tolen = sizeof(*to);
}
- memset(&msgh, 0, sizeof(struct msghdr));
+ memset(&msgh, 0, sizeof(msgh));
iov.iov_base = buf;
iov.iov_len = len;
msgh.msg_control = cbuf;
#define PIM_SOCK_ERR_NAME (-10) /* Socket name (getsockname) */
#define PIM_SOCK_ERR_BIND (-11) /* Can't bind to interface */
+struct pim_instance;
+
int pim_socket_bind(int fd, struct interface *ifp);
void pim_socket_ip_hdr(int fd);
int pim_socket_raw(int protocol);
#include <lib/lib_errors.h>
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_ssm.h"
#include "pim_igmp.h"
#define PIM_SSM_STANDARD_RANGE "232.0.0.0/8"
+struct pim_instance;
+
/* SSM error codes */
enum pim_ssm_err {
PIM_SSM_ERR_NONE = 0,
#include "lib_errors.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_ssmpingd.h"
#include "pim_time.h"
#include "pim_sock.h"
#include "linklist.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_oil.h"
#include "pim_static.h"
#include "pim_time.h"
#include <zebra.h>
#include "pim_mroute.h"
+#include "pim_oil.h"
#include "if.h"
struct static_route {
#include "pim_tib.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_iface.h"
#include "pim_upstream.h"
#include "pim_oil.h"
#include "if.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_int.h"
#include "pim_tlv.h"
#include "pim_str.h"
#include "pim_bfd.h"
#include "pim_bsm.h"
#include "pim_vxlan.h"
+#include "pim6_mld.h"
int pim_debug_config_write(struct vty *vty)
{
}
#if PIM_IPV == 4
-static int pim_igmp_config_write(struct vty *vty, int writes,
- struct pim_interface *pim_ifp)
+static int gm_config_write(struct vty *vty, int writes,
+ struct pim_interface *pim_ifp)
{
/* IF ip igmp */
if (pim_ifp->igmp_enable) {
return writes;
}
+#else
+static int gm_config_write(struct vty *vty, int writes,
+ struct pim_interface *pim_ifp)
+{
+ if (pim_ifp->mld_version != MLD_DEFAULT_VERSION)
+ vty_out(vty, " ipv6 mld version %d\n", pim_ifp->mld_version);
+ if (pim_ifp->gm_default_query_interval != IGMP_GENERAL_QUERY_INTERVAL)
+ vty_out(vty, " ipv6 mld query-interval %d\n",
+ pim_ifp->gm_default_query_interval);
+ return 0;
+}
#endif
int pim_config_write(struct vty *vty, int writes, struct interface *ifp,
++writes;
}
-#if PIM_IPV == 4
- writes += pim_igmp_config_write(vty, writes, pim_ifp);
-#endif
+ writes += gm_config_write(vty, writes, pim_ifp);
/* update source */
if (!pim_addr_is_any(pim_ifp->update_source)) {
++writes;
}
+ if (pim_ifp->pim_passive_enable) {
+ vty_out(vty, " " PIM_AF_NAME " pim passive\n");
+ ++writes;
+ }
+
writes += pim_static_write_mroute(pim, vty, ifp);
pim_bsm_write_config(vty, ifp);
++writes;
#include "vty.h"
+struct pim_instance;
+
int pim_debug_config_write(struct vty *vty);
int pim_global_config_write_worker(struct pim_instance *pim, struct vty *vty);
int pim_interface_config_write(struct vty *vty);
#ifndef PIM_VXLAN_H
#define PIM_VXLAN_H
+#include "pim_instance.h"
+
/* global timer used for miscellaneous staggered processing */
#define PIM_VXLAN_WORK_TIME 1
/* number of SG entries processed at one shot */
#include "lib_errors.h"
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_iface.h"
#include "pim_neighbor.h"
#include "pim_pim.h"
}
}
-#if PIM_IPV == 4
int pim_zlookup_sg_statistics(struct channel_oil *c_oil)
{
struct stream *s = zlookup->obuf;
pim_sgaddr sg;
int count = 0;
int ret;
+ pim_sgaddr more = {};
struct interface *ifp =
- pim_if_find_by_vif_index(c_oil->pim, c_oil->oil.mfcc_parent);
+ pim_if_find_by_vif_index(c_oil->pim, *oil_parent(c_oil));
if (PIM_DEBUG_ZEBRA) {
- pim_sgaddr more;
-
- more.src = c_oil->oil.mfcc_origin;
- more.grp = c_oil->oil.mfcc_mcastgrp;
- zlog_debug("Sending Request for New Channel Oil Information%pSG VIIF %d(%s)",
- &more, c_oil->oil.mfcc_parent,
- c_oil->pim->vrf->name);
+ more.src = *oil_origin(c_oil);
+ more.grp = *oil_mcastgrp(c_oil);
+ zlog_debug(
+ "Sending Request for New Channel Oil Information%pSG VIIF %d(%s)",
+ &more, *oil_parent(c_oil), c_oil->pim->vrf->name);
}
if (!ifp)
stream_reset(s);
zclient_create_header(s, ZEBRA_IPMR_ROUTE_STATS,
c_oil->pim->vrf->vrf_id);
- stream_put_in_addr(s, &c_oil->oil.mfcc_origin);
- stream_put_in_addr(s, &c_oil->oil.mfcc_mcastgrp);
+ stream_putl(s, PIM_AF);
+ stream_write(s, oil_origin(c_oil), sizeof(pim_addr));
+ stream_write(s, oil_mcastgrp(c_oil), sizeof(pim_addr));
stream_putl(s, ifp->ifindex);
stream_putw_at(s, 0, stream_get_endp(s));
}
}
- sg.src.s_addr = stream_get_ipv4(s);
- sg.grp.s_addr = stream_get_ipv4(s);
- if (sg.src.s_addr != c_oil->oil.mfcc_origin.s_addr
- || sg.grp.s_addr != c_oil->oil.mfcc_mcastgrp.s_addr) {
- if (PIM_DEBUG_ZEBRA) {
- pim_sgaddr more;
+ stream_get(&sg.src, s, sizeof(pim_addr));
+ stream_get(&sg.grp, s, sizeof(pim_addr));
- more.src = c_oil->oil.mfcc_origin;
- more.grp = c_oil->oil.mfcc_mcastgrp;
+ more.src = *oil_origin(c_oil);
+ more.grp = *oil_mcastgrp(c_oil);
+ if (pim_sgaddr_cmp(sg, more)) {
+ if (PIM_DEBUG_ZEBRA)
flog_err(
EC_LIB_ZAPI_MISSMATCH,
"%s: Received wrong %pSG(%s) information requested",
__func__, &more, c_oil->pim->vrf->name);
- }
zclient_lookup_failed(zlookup);
return -3;
}
return 0;
}
-#endif
#define PIM_NEXTHOP_LOOKUP_MAX (3) /* max. recursive route lookup */
+struct channel_oil;
+
struct pim_zlookup_nexthop {
vrf_id_t vrf_id;
pim_addr nexthop_addr;
#include <lib/lib_errors.h>
#include "pimd.h"
+#include "pim_instance.h"
#include "pim_mlag.h"
#include "pim_zebra.h"
#include "plist.h"
#include "pim_addr.h"
-#include "pim_instance.h"
#include "pim_str.h"
#include "pim_memory.h"
#include "pim_assert.h"
vtysh_scan += \
pimd/pim_cmd.c \
pimd/pim6_cmd.c \
+ pimd/pim6_mld.c \
#end
vtysh_daemons += pimd
vtysh_daemons += pim6d
pimd/pim_zebra.c \
pimd/pim_zlookup.c \
pimd/pim_vxlan.c \
+ pimd/pim_register.c \
pimd/pimd.c \
# end
pimd/pim_msdp.c \
pimd/pim_msdp_packet.c \
pimd/pim_msdp_socket.c \
- pimd/pim_register.c \
pimd/pim_signals.c \
pimd/pim_zpthread.c \
pimd/pim_mroute_msg.c \
pimd_pim6d_SOURCES = \
$(pim_common) \
pimd/pim6_main.c \
+ pimd/pim6_mld.c \
pimd/pim6_stubs.c \
pimd/pim6_cmd.c \
pimd/pim6_mroute_msg.c \
pimd/pim_vxlan.h \
pimd/pim_vxlan_instance.h \
pimd/pimd.h \
+ pimd/pim6_mld.h \
+ pimd/pim6_mld_protocol.h \
pimd/mtracebis_netlink.h \
pimd/mtracebis_routeget.h \
pimd/pim6_cmd.h \
clippy_scan += \
pimd/pim_cmd.c \
pimd/pim6_cmd.c \
+ pimd/pim6_mld.c \
# end
pimd_pimd_CFLAGS = $(AM_CFLAGS) -DPIM_IPV=4
struct prefix_ipv4 p;
struct route_node *node;
- memset(&p, 0, sizeof(struct prefix_ipv4));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET;
p.prefix = from->sin_addr;
p.prefixlen = IPV4_MAX_BITLEN;
rip = nb_running_get_entry(args->dnode, NULL, true);
default_information = yang_dnode_get_bool(args->dnode, NULL);
- memset(&p, 0, sizeof(struct prefix_ipv4));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET;
if (default_information) {
struct nexthop nh;
== MATCH_FAILED)
return NULL;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
/* Lookup interface. */
ifp = rip2IfLookup(v, name, length, &addr, exact);
== MATCH_FAILED)
return NULL;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
/* Lookup interface. */
ifp = rip2IfLookup(v, name, length, &addr, exact);
== MATCH_FAILED)
return NULL;
- memset(&addr, 0, sizeof(struct in_addr));
+ memset(&addr, 0, sizeof(addr));
/* Lookup interface. */
peer = rip2PeerLookup(v, name, length, &addr, exact);
uint32_t destination;
if (subnetted == -1) {
- memcpy(&ifaddr, ifc->address,
- sizeof(struct prefix_ipv4));
+ memcpy(&ifaddr, ifc->address, sizeof(ifaddr));
memcpy(&ifaddrclass, &ifaddr,
- sizeof(struct prefix_ipv4));
+ sizeof(ifaddrclass));
apply_classful_mask_ipv4(&ifaddrclass);
subnetted = 0;
if (ifaddr.prefixlen > ifaddrclass.prefixlen)
}
/* Make destination address. */
- memset(&sin, 0, sizeof(struct sockaddr_in));
+ memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
sin.sin_len = sizeof(struct sockaddr_in);
rp = route_node_get(rip->table, (struct prefix *)p);
- memset(&newinfo, 0, sizeof(struct rip_info));
+ memset(&newinfo, 0, sizeof(newinfo));
newinfo.type = type;
newinfo.sub_type = sub_type;
newinfo.metric = 1;
rip_event(rip, RIP_READ, sock);
/* RIPd manages only IPv4. */
- memset(&from, 0, sizeof(struct sockaddr_in));
+ memset(&from, 0, sizeof(from));
fromlen = sizeof(struct sockaddr_in);
len = recvfrom(sock, (char *)&rip_buf.buf, sizeof(rip_buf.buf), 0,
}
if (version == RIPv1) {
- memcpy(&ifaddrclass, ifc->address, sizeof(struct prefix_ipv4));
+ memcpy(&ifaddrclass, ifc->address, sizeof(ifaddrclass));
apply_classful_mask_ipv4(&ifaddrclass);
subnetted = 0;
if (ifc->address->prefixlen > ifaddrclass.prefixlen)
if (if_is_broadcast(ifp) || if_is_pointopoint(ifp)) {
if (ifc->address->family == AF_INET) {
/* Destination address and port setting. */
- memset(&to, 0, sizeof(struct sockaddr_in));
+ memset(&to, 0, sizeof(to));
if (ifc->destination)
/* use specified broadcast or peer destination
* addr */
struct rip_distance *rdistance;
struct access_list *alist;
- memset(&p, 0, sizeof(struct prefix_ipv4));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET;
p.prefix = rinfo->from;
p.prefixlen = IPV4_MAX_BITLEN;
zlog_debug(" send packet size %d", bufsize);
}
- memset(&addr, 0, sizeof(struct sockaddr_in6));
+ memset(&addr, 0, sizeof(addr));
addr.sin6_family = AF_INET6;
#ifdef SIN6_LEN
addr.sin6_len = sizeof(struct sockaddr_in6);
msg.msg_namelen = sizeof(struct sockaddr_in6);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
- msg.msg_control = (void *)adata;
+ msg.msg_control = adata;
msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo));
iov.iov_base = buf;
struct cmsghdr *cmsgptr;
struct in6_addr dst = {.s6_addr = {0}};
- memset(&dst, 0, sizeof(struct in6_addr));
+ memset(&dst, 0, sizeof(dst));
/* Ancillary data. This store cmsghdr and in6_pktinfo. But at this
point I can't determine size of cmsghdr */
rp = agg_node_get(ripng->table, (struct prefix *)p);
- memset(&newinfo, 0, sizeof(struct ripng_info));
+ memset(&newinfo, 0, sizeof(newinfo));
newinfo.type = type;
newinfo.sub_type = sub_type;
newinfo.ifindex = ifindex;
ripng_peer_update(ripng, from, packet->version);
/* Reset nexthop. */
- memset(&nexthop, 0, sizeof(struct ripng_nexthop));
+ memset(&nexthop, 0, sizeof(nexthop));
nexthop.flag = RIPNG_NEXTHOP_UNSPEC;
/* Set RTE pointer. */
field. Once all the entries have been filled in, change the
command from Request to Response and send the datagram back
to the requestor. */
- memset(&p, 0, sizeof(struct prefix_ipv6));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET6;
for (; ((caddr_t)rte) < lim; rte++) {
fails++;
printf("firstas: %d, got %d\n", sp->first,
aspath_firstas_check(as, sp->first));
- printf("loop does: %d %d, doesnt: %d %d\n", sp->does_loop,
+ printf("loop does: %d %d, doesn't: %d %d\n", sp->does_loop,
aspath_loop_check(as, sp->does_loop), sp->doesnt_loop,
aspath_loop_check(as, sp->doesnt_loop));
printf("private check: %d %d\n", sp->private_as,
}
rn->info = (void *)0xdeadbeef;
- hash_get(test->log, hash_entry, log_alloc);
+ (void)hash_get(test->log, hash_entry, log_alloc);
};
static void test_state_del_route(struct test_state *test,
p->prefixlen = prng_rand(prng) % 129;
p->family = AF_INET6;
- apply_mask((struct prefix *)p);
+ apply_mask(p);
}
static void get_rand_prefix_pair(struct prng *prng, struct prefix_ipv6 *dst_p,
prng = prng_new(0);
- timers = XMALLOC(MTYPE_TMP, SCHEDULE_TIMERS * sizeof(*timers));
+ timers = XCALLOC(MTYPE_TMP, SCHEDULE_TIMERS * sizeof(*timers));
for (i = 0; i < SCHEDULE_TIMERS; i++) {
long interval_msec;
* representing the expected "output" of the timers when they
* are run. */
j = 0;
- alarms = XMALLOC(MTYPE_TMP, timers_pending * sizeof(*alarms));
+ alarms = XCALLOC(MTYPE_TMP, timers_pending * sizeof(*alarms));
for (i = 0; i < SCHEDULE_TIMERS; i++) {
if (!timers[i])
continue;
)
if raises:
# error = Exception("stderr: {}".format(stderr))
- # This annoyingly doesnt' show stderr when printed normally
+ # This annoyingly doesn't' show stderr when printed normally
error = subprocess.CalledProcessError(rc, actual_cmd)
error.stdout, error.stderr = stdout, stderr
raise error
config_data.append(cmd)
protocol = "igmp"
del_action = intf_data[intf_name]["igmp"].setdefault("delete", False)
+ del_attr = intf_data[intf_name]["igmp"].setdefault("delete_attr", False)
cmd = "ip igmp"
if del_action:
cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ if not del_attr:
+ config_data.append(cmd)
- del_attr = intf_data[intf_name]["igmp"].setdefault("delete_attr", False)
for attribute, data in intf_data[intf_name]["igmp"].items():
if attribute == "version":
cmd = "ip {} {} {}".format(protocol, attribute, data)
if del_action:
cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ if not del_attr:
+ config_data.append(cmd)
if attribute == "join":
for group in data:
@retry(retry_timeout=120, diag_pct=0)
-def verify_ip_mroutes(
+def verify_mroutes(
tgen,
dut,
src_address,
):
"""
Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes
- by running "show ip pim upstream" cli
+ by running "show ip/ipv6 mroute" cli
Parameters
----------
-----
dut = "r1"
group_address = "225.1.1.1"
- result = verify_ip_mroutes(tgen, dut, src_address, group_address)
+ result = verify_mroutes(tgen, dut, src_address, group_address)
Returns
-------
return True
-def clear_ip_pim_interface_traffic(tgen, topo):
+def clear_pim_interface_traffic(tgen, topo):
"""
- Clear ip pim interface traffice by running
- "clear ip pim interface traffic" cli
+ Clear ip/ipv6 pim interface traffice by running
+ "clear ip/ipv6 pim interface traffic" cli
Parameters
----------
Usage
-----
- result = clear_ip_pim_interface_traffic(tgen, topo)
+ result = clear_pim_interface_traffic(tgen, topo)
Returns
-------
return True
-def clear_ip_pim_interfaces(tgen, dut):
+def clear_pim_interfaces(tgen, dut):
"""
- Clear ip pim interface by running
- "clear ip pim interfaces" cli
+ Clear ip/ipv6 pim interface by running
+ "clear ip/ipv6 pim interfaces" cli
Parameters
----------
Usage
-----
- result = clear_ip_pim_interfaces(tgen, dut)
+ result = clear_pim_interfaces(tgen, dut)
Returns
-------
return True
-def clear_ip_igmp_interfaces(tgen, dut):
+def clear_igmp_interfaces(tgen, dut):
"""
- Clear ip igmp interfaces by running
- "clear ip igmp interfaces" cli
+ Clear ip/ipv6 igmp interfaces by running
+ "clear ip/ipv6 igmp interfaces" cli
Parameters
----------
Usage
-----
dut = "r1"
- result = clear_ip_igmp_interfaces(tgen, dut)
+ result = clear_igmp_interfaces(tgen, dut)
Returns
-------
errormsg(str) or True
@retry(retry_timeout=20)
-def clear_ip_mroute_verify(tgen, dut, expected=True):
+def clear_mroute_verify(tgen, dut, expected=True):
"""
- Clear ip mroute by running "clear ip mroute" cli and verify
+ Clear ip/ipv6 mroute by running "clear ip/ipv6 mroute" cli and verify
mroutes are up again after mroute clear
Parameters
Usage
-----
- result = clear_ip_mroute_verify(tgen, dut)
+ result = clear_mroute_verify(tgen, dut)
Returns
-------
return True
-def clear_ip_mroute(tgen, dut=None):
+def clear_mroute(tgen, dut=None):
"""
- Clear ip mroute by running "clear ip mroute" cli
+ Clear ip/ipv6 mroute by running "clear ip mroute" cli
Parameters
----------
Usage
-----
- clear_ip_mroute(tgen, dut)
+ clear_mroute(tgen, dut)
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
@retry(retry_timeout=60, diag_pct=0)
-def verify_ip_pim_upstream_rpf(
+def verify_pim_upstream_rpf(
tgen, topo, dut, interface, group_addresses, rp=None, expected=True
):
"""
- Verify IP PIM upstream rpf, config is verified
- using "show ip pim neighbor" cli
+ Verify IP/IPv6 PIM upstream rpf, config is verified
+ using "show ip/ipv6 pim neighbor" cli
Parameters
----------
Usage
-----
- result = verify_ip_pim_upstream_rpf(gen, topo, dut, interface,
+ result = verify_pim_upstream_rpf(gen, topo, dut, interface,
group_addresses, rp=None)
Returns
@retry(retry_timeout=60, diag_pct=0)
-def verify_ip_pim_join(
+def verify_pim_join(
tgen, topo, dut, interface, group_addresses, src_address=None, expected=True
):
"""
- Verify ip pim join by running "show ip pim join" cli
+ Verify ip/ipv6 pim join by running "show ip/ipv6 pim join" cli
Parameters
----------
dut = "r1"
interface = "r1-r0-eth0"
group_address = "225.1.1.1"
- result = verify_ip_pim_join(tgen, dut, star, group_address, interface)
+ result = verify_pim_join(tgen, dut, star, group_address, interface)
Returns
-------
return True
+
+def verify_pim_interface_traffic(tgen, input_dict, return_stats=True):
+ """
+ Verify ip pim interface traffice by running
+ "show ip pim interface traffic" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `input_dict(dict)`: defines DUT, what and from which interfaces
+ traffic needs to be verified
+ Usage
+ -----
+ input_dict = {
+ "r1": {
+ "r1-r0-eth0": {
+ "helloRx": 0,
+ "helloTx": 1,
+ "joinRx": 0,
+ "joinTx": 0
+ }
+ }
+ }
+
+ result = verify_pim_interface_traffic(tgen, input_dict)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ output_dict = {}
+ for dut in input_dict.keys():
+ if dut not in tgen.routers():
+ continue
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying pim interface traffic", dut)
+ show_pim_intf_traffic_json = run_frr_cmd(
+ rnode, "show ip pim interface traffic json", isjson=True
+ )
+
+ output_dict[dut] = {}
+ for intf, data in input_dict[dut].items():
+ interface_json = show_pim_intf_traffic_json[intf]
+ for state in data:
+
+ # Verify Tx/Rx
+ if state in interface_json:
+ output_dict[dut][state] = interface_json[state]
+ else:
+ errormsg = (
+ "[DUT %s]: %s is not present"
+ "for interface %s [FAILED]!! " % (dut, state, intf)
+ )
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True if return_stats == False else output_dict
+
# def cleanup(self):
# super(McastTesterHelper, self).cleanup()
find_rp_from_bsrp_info,
verify_pim_grp_rp_source,
verify_pim_bsr,
- verify_ip_mroutes,
+ verify_mroutes,
verify_join_state_and_timer,
verify_pim_state,
verify_upstream_iif,
verify_igmp_groups,
- verify_ip_pim_upstream_rpf,
+ verify_pim_upstream_rpf,
enable_disable_pim_unicast_bsm,
enable_disable_pim_bsm,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ clear_mroute,
+ clear_pim_interface_traffic,
get_pim_interface_traffic,
McastTesterHelper,
)
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
step("pre-configure BSM packet")
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
step("pre-configure BSM packet")
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
oil = "l1-r1-eth1"
step("Verify mroute populated on l1")
- result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Reload i1 and l1
# Verify ip mroute populated again
step("Verify mroute again on l1 (lhr)")
- result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("clear BSM database before moving to next case")
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
src_addr = "*"
oil = "i1-l1-eth1"
- result = verify_ip_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# wait till bsm rp age out
# check if mroute uninstalled because of rp age out
step("check if mroute uninstalled because of rp age out in i1")
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil, expected=False
)
assert (
# check if mroute still not installed because of rp not available
step("check if mroute still not installed because of rp not available")
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil, expected=False
)
assert result is not True, (
# verify ip mroute populated
step("Verify ip mroute")
- result = verify_ip_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, "i1", src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Shut/No shut the bsm rpf interface and check mroute on lhr(l1)
iif = "l1-i1-eth0"
oil = "l1-r1-eth1"
- result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
step("clear BSM database before moving to next case")
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
iif = "l1-i1-eth0"
# Verify upstream rpf for 225.1.1.1 is chosen as rp1
step("Verify upstream rpf for 225.1.1.1 is chosen as bsrp")
- result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group])
+ result = verify_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group])
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Configure a static rp for the group 225.1.1.1/32
# Verify if upstream also reflects the static rp
step("Verify upstream rpf for 225.1.1.1 is chosen as static in l1")
- result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, static_rp)
+ result = verify_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, static_rp)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# delete static rp for the group 225.1.1.1/32
# Verify upstream rpf for 225.1.1.1 is chosen as bsrp
step("Verify upstream rpf for 225.1.1.1 is chosen as bsrp in l1")
- result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group])
+ result = verify_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp[group])
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
step("clear BSM database before moving to next case")
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
iif = "l1-i1-eth0"
src_addr = "*"
oil = "l1-r1-eth1"
- result = verify_ip_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, "l1", src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
result = pre_config_to_bsm(
tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
iif = "l1-i1-eth0"
src_addr = "*"
oil = "l1-r1-eth1"
- result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify join state and join timer
# Verify ip mroute is not installed
step("Verify mroute not installed in l1")
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, expected=False
)
assert (
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
# Verify ip mroute
src_addr = "*"
step("Verify ip mroute in l1")
- result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Make RP unreachanble in LHR
# Verify mroute not installed
step("Verify mroute not installed")
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, expected=False
)
assert (
# Verify that (*,G) installed in mroute again
iif = "l1-i1-eth0"
- result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
step("clear BSM database before moving to next case")
find_rp_from_bsrp_info,
verify_pim_grp_rp_source,
verify_pim_bsr,
- verify_ip_mroutes,
+ verify_mroutes,
verify_join_state_and_timer,
verify_pim_state,
verify_upstream_iif,
verify_igmp_groups,
- verify_ip_pim_upstream_rpf,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ verify_pim_upstream_rpf,
+ clear_mroute,
+ clear_pim_interface_traffic,
McastTesterHelper,
)
from lib.topolog import logger
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
# Verify ip mroute
step("Verify ip mroute in l1")
src_addr = "*"
- result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Remove the group rp mapping and send bsm
# Verify mroute not installed
step("Verify mroute not installed in l1")
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, dut, src_addr, GROUP_ADDRESS, iif, oil, retry_timeout=20, expected=False
)
assert (
# Verify that (*,G) installed in mroute again
iif = "l1-i1-eth0"
- result = verify_ip_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
+ result = verify_mroutes(tgen, dut, src_addr, GROUP_ADDRESS, iif, oil)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
step("clear BSM database before moving to next case")
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
iif = "l1-i1-eth0"
# Verify upstream rpf for 225.1.1.1 is chosen as rp1
step("Verify upstream rpf for 225.1.1.1 is chosen as rp1 in l1")
- result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp1)
+ result = verify_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp1)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Send BSR packet from b1 with rp for 225.1.1.1/32 removed
# Verify upstream rpf for 225.1.1.1 is chosen as rp1
step("Verify upstream rpf for 225.1.1.1 is chosen as rp2 in l1")
- result = verify_ip_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp2)
+ result = verify_pim_upstream_rpf(tgen, topo, dut, iif, GROUP_ADDRESS, rp2)
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify IIF/OIL in pim state
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
write_test_header(tc_name)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
pytest.skip(tgen.errors)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("pre-configure BSM packet")
result = pre_config_to_bsm(
from lib.pim import (
create_pim_config,
create_igmp_config,
- verify_ip_mroutes,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ verify_mroutes,
+ clear_mroute,
+ clear_pim_interface_traffic,
verify_pim_config,
verify_upstream_iif,
verify_multicast_traffic,
verify_multicast_flag_state,
verify_igmp_groups,
- McastTesterHelper
+ McastTesterHelper,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
check_router_status(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
]
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Delete local join from DR node")
- input_dict = {
- "r1": {
- "igmp": {
- "interfaces": {
- vlan_intf_r1_s1: {
- "igmp": {
- "version": "2",
- "join": IGMP_JOIN_RANGE_3,
- "delete_attr": True,
+ for _join in IGMP_JOIN_RANGE_3:
+ input_dict = {
+ "r1": {
+ "igmp": {
+ "interfaces": {
+ vlan_intf_r1_s1: {
+ "igmp": {
+ "join": [_join],
+ "delete_attr": True,
+ }
}
}
}
}
}
- }
-
- result = create_igmp_config(tgen, topo, input_dict)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- step(
- "After removing local join 227.1.1.1 group removed from IGMP join "
- "of R1, R2 node , using 'show ip igmp groups json'"
- )
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]):
- result = verify_igmp_groups(tgen, dut, intf, IGMP_JOIN_RANGE_3, expected=False)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "IGMP groups are still present \n Error: {}".format(tc_name, result)
+ step(
+ "After removing local join 227.1.1.1 group removed from IGMP join "
+ "of R1, R2 node , using 'show ip igmp groups json'"
)
+ for dut, intf in zip(["r1", "r2"], [intf_r1_s1, intf_r2_s1]):
+ result = verify_igmp_groups(tgen, dut, intf, IGMP_JOIN_RANGE_3, expected=False)
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "IGMP groups are still present \n Error: {}".format(tc_name, result)
+ )
+
step("(*,G) mroute for 227.1.1.1 group removed from R1 node")
step(
"After remove of local join from R1 and R2 node verify (*,G) and (S,G) "
)
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
step("Configure local join on R2 for group range (227.1.1.1)")
]
for data in input_dict_r1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "Mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
step("Remove local join from DR and Non DR node")
)
for data in input_dict_r1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "Mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "Mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
write_test_footer(tc_name)
from lib.pim import (
create_pim_config,
create_igmp_config,
- verify_ip_mroutes,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ verify_mroutes,
+ clear_mroute,
+ clear_pim_interface_traffic,
verify_pim_config,
verify_upstream_iif,
verify_multicast_flag_state,
)
logger.info("=" * 40)
+
#####################################################
#
# Local APIs
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
check_router_status(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
]
for data in input_dict_r1_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("R1 has mroute with none OIL and upstream with Not Join")
for data in input_dict_r1_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
for data in input_dict_r1_r2:
if data["dut"] == "r1":
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_r1_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
data["oil"],
expected=False,
)
- assert result is not True, (
- "Testcase {} : Failed \n "
- "mroutes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "mroutes are still present \n Error: {}".format(
+ tc_name, result
)
step("start FRR for all the nodes")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_r1_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
from lib.pim import (
create_pim_config,
create_igmp_config,
- verify_ip_mroutes,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ verify_mroutes,
+ clear_mroute,
+ clear_pim_interface_traffic,
verify_pim_config,
verify_upstream_iif,
verify_multicast_traffic,
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
check_router_status(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
]
for data in input_dict_r1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_r5:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_r5:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
create_pim_config,
create_igmp_config,
verify_igmp_groups,
- verify_ip_mroutes,
+ verify_mroutes,
get_pim_interface_traffic,
verify_upstream_iif,
- verify_ip_pim_join,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ verify_pim_join,
+ clear_mroute,
+ clear_pim_interface_traffic,
verify_igmp_config,
McastTesterHelper,
)
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("l1 sent PIM (S,G) join to f1 , verify using 'show ip pim join'")
dut = "f1"
interface = intf_f1_r2
- result = verify_ip_pim_join(tgen, topo, dut, interface, IGMP_JOIN)
+ result = verify_pim_join(tgen, topo, dut, interface, IGMP_JOIN)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
write_test_footer(tc_name)
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
# previous 80 retries with 2s wait if we assume .5s per vtysh/show ip mroute runtime
# (41 * (2 + .5)) == 102.
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("l1 sent PIM (S,G) join to f1 , verify using 'show ip pim join'")
dut = "f1"
interface = "f1-r2-eth3"
- result = verify_ip_pim_join(tgen, topo, dut, interface, IGMP_JOIN)
+ result = verify_pim_join(tgen, topo, dut, interface, IGMP_JOIN)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
write_test_footer(tc_name)
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP on c1 for group (225.1.1.1-5)")
input_dict = {
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
step("Clear mroutes on l1")
- clear_ip_mroute(tgen, "l1")
+ clear_mroute(tgen, "l1")
step(
"After clear ip mroute (*,g) entries are re-populated again"
)
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) and (232.1.1.1-5)" " in c1")
},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) in c2")
input_dict = {
{"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "c2-c1-eth0"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
" router 'show ip mroute'"
)
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, "c1", "*", IGMP_JOIN, "c1-c2-eth1", "c1-l1-eth0", expected=False
)
assert (
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure RP on FRR2 (loopback interface) for " "the group range 225.0.0.0/8")
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
sleep(20)
step("Clear the mroute on f1")
- clear_ip_mroute(tgen, "f1")
+ clear_mroute(tgen, "f1")
step(
"After Shut the RP interface and clear the mroute verify all "
" 'show ip mroute' "
)
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, "f1", "*", IGMP_JOIN, "f1-r2-eth3", "f1-i8-eth2", expected=False
)
assert (
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
result = app_helper.run_join("i1", IGMP_JOIN, "l1")
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
]
for data in input_dict_4:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
result = app_helper.run_join("i1", IGMP_JOIN, "l1")
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-r2-eth3"},
]
for data in input_dict_5:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
create_pim_config,
create_igmp_config,
verify_igmp_groups,
- verify_ip_mroutes,
+ verify_mroutes,
get_pim_interface_traffic,
verify_upstream_iif,
verify_pim_neighbors,
verify_pim_state,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ clear_mroute,
+ clear_pim_interface_traffic,
McastTesterHelper,
)
from lib.topolog import logger
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
start_router_daemons(tgen, "f1", ["pimd"])
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "none"}
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
start_router(tgen, "f1")
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"}
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "none"}
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) and " "(232.1.1.1-5) in c2")
" 'show ip mroute' and mroute OIL is towards RP."
)
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
"f1",
"10.0.5.2",
)
assert result is True, "Testcase {} : " "Failed Error: {}".format(tc_name, result)
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, "f1", "10.0.5.2", _IGMP_JOIN_RANGE, "f1-i2-eth1", "f1-r2-eth3"
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
{"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "none"}
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
{"dut": "f1", "src_address": source, "iif": "f1-i2-eth1", "oil": "f1-i8-eth2"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
shutdown_bringup_interface(tgen, dut, intf, True)
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
intf = "f1-r2-eth3"
shutdown_bringup_interface(tgen, dut, intf, True)
- clear_ip_mroute(tgen, "l1")
- clear_ip_mroute(tgen, "l1")
+ clear_mroute(tgen, "l1")
+ clear_mroute(tgen, "l1")
step(
"After no shut, verify traffic resume to all the receivers"
)
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
shutdown_bringup_interface(tgen, dut, intf_l1_c1, True)
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
app_helper.stop_all_hosts()
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP on c1 for group range " "(226.1.1.1-5) and (232.1.1.1-5)")
{"dut": "c2", "src_address": source, "iif": "c2-f1-eth1", "oil": "c2-i5-eth2"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for group range (226.1.1.1-5) and " "(232.1.1.1-5) on c1")
_GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
{"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
{"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"},
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "l1", "src_address": source, "iif": "l1-r2-eth4", "oil": "l1-i1-eth1"}
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
{"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"}
]
for data in input_dict:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
},
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
" 'show ip mroute' "
)
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
"f1",
source_i2,
import time
import datetime
import pytest
+from time import sleep
pytestmark = pytest.mark.pimd
create_pim_config,
create_igmp_config,
verify_igmp_groups,
- verify_ip_mroutes,
- clear_ip_mroute,
- clear_ip_pim_interface_traffic,
+ verify_mroutes,
+ clear_mroute,
+ clear_pim_interface_traffic,
verify_igmp_config,
verify_pim_config,
verify_pim_interface,
verify_pim_rp_info,
verify_multicast_flag_state,
McastTesterHelper,
+ verify_pim_interface_traffic,
)
from lib.topolog import logger
from lib.topojson import build_config_from_json
return True
+def verify_pim_stats_increament(stats_before, stats_after):
+ """
+ API to compare pim interface control plane traffic
+
+ Parameters
+ ----------
+ * `stats_before` : Stats dictionary for any particular instance
+ * `stats_after` : Stats dictionary for any particular instance
+ """
+
+ for router, stats_data in stats_before.items():
+ for stats, value in stats_data.items():
+ if stats_before[router][stats] >= stats_after[router][stats]:
+ errormsg = (
+ "[DUT: %s]: state %s value has not"
+ " incremented, Initial value: %s, "
+ "Current value: %s [FAILED!!]"
+ % (
+ router,
+ stats,
+ stats_before[router][stats],
+ stats_after[router][stats],
+ )
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is "
+ "incremented, Initial value: %s, Current value: %s"
+ " [PASSED!!]",
+ router,
+ stats,
+ stats_before[router][stats],
+ stats_after[router][stats],
+ )
+
+ return True
+
+
def test_verify_oil_when_join_prune_sent_scenario_1_p1(request):
"""
TC_21_1:
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
step("Verify mroutes and iff upstream")
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroutes and iff upstream")
for data in input_dict_l1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroutes and iff upstream")
for data in input_dict_f1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroutes and iff upstream")
for data in input_dict_l1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Removing FRR3 to simulate topo " "FHR(FRR1)---LHR(FRR2)")
step("Verify mroutes and iff upstream")
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroutes and iff upstream")
for data in input_dict_l1_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_l1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
intf_f1_r2 = topo["routers"]["f1"]["links"]["r2"]["interface"]
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, "f1", source_i2, IGMP_JOIN_RANGE_1, intf_f1_i2, intf_f1_r2, expected=False
)
assert (
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Enable PIM on all routers")
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_l1_f1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
iif = topo["routers"]["l1"]["links"]["i6"]["interface"]
oil = topo["routers"]["l1"]["links"]["i1"]["interface"]
source = source_i6
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, dut, source, IGMP_JOIN_RANGE_1, iif, oil, expected=False
)
assert (
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Enable PIM on all routers")
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Configure 'ip pim' on receiver interface on FRR1")
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Configure 'ip pim' on receiver interface on FRR1")
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step("Configure 'ip pim' on receiver interface on FRR1")
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step(
step("Verify mroutes and iff upstream")
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
logger.info("Expected Behaviour: {}".format(result))
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step(
step("Verify mroutes and iff upstream")
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroute after Shut the link from LHR to RP from RP node")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
logger.info("Expected Behaviour: {}".format(result))
for data in input_dict_sg_i1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroute after No shut the link from LHR to RP from RP node")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroute after Shut the link from FHR to RP from RP node")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroute after Noshut the link from FHR to RP from RP node")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroute after Shut the link from FHR to RP from FHR node")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroute after No Shut the link from FHR to RP from FHR node")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_sg_i1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step(
step("Verify mroutes and iff upstream")
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step(
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_l1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_l1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_r2_f1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
iif = topo["routers"]["r2"]["links"]["f1"]["interface"]
oil = topo["routers"]["r2"]["links"]["i3"]["interface"]
- result = verify_ip_mroutes(tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil)
+ result = verify_mroutes(tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
src_address = source_i1
iif = topo["routers"]["r2"]["links"]["l1"]["interface"]
oil = topo["routers"]["r2"]["links"]["i3"]["interface"]
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen, dut, src_address, _IGMP_JOIN_RANGE, iif, oil, expected=False
)
assert (
)
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
step(
]
for data in input_dict_all:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_l1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_l1:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_r2:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
]
for data in input_dict_all_star:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
write_test_footer(tc_name)
+def test_PIM_passive_p1(request):
+ """
+ TC Verify PIM passive functionality"
+ """
+
+ tgen = get_topogen()
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ app_helper.stop_all_hosts()
+ # Creating configuration from JSON
+ clear_mroute(tgen)
+ if tgen.routers_have_failure():
+ check_router_status(tgen)
+ reset_config_on_routers(tgen)
+ clear_pim_interface_traffic(tgen, topo)
+
+ step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
+ step(
+ "Enable IGMP of FRR1 interface and send IGMP joins "
+ " from FRR1 node for group range (225.1.1.1-5)"
+ )
+
+ intf_c1_i4 = topo["routers"]["c1"]["links"]["i4"]["interface"]
+
+ step(
+ "configure PIM passive on receiver interface to verify no impact on IGMP join"
+ "and multicast traffic on pim passive interface"
+ )
+
+ raw_config = {
+ "c1": {"raw_config": ["interface {}".format(intf_c1_i4), "ip pim passive"]}
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("configure IGMPv2 and send IGMP joinon on PIM passive interface")
+ input_dict = {
+ "c1": {"igmp": {"interfaces": {intf_c1_i4: {"igmp": {"version": "2"}}}}}
+ }
+ result = create_igmp_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ input_join = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]}
+ for recvr, recvr_intf in input_join.items():
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
+ assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+
+ step("Configure static RP for (225.1.1.1-5) as R2")
+
+ input_dict = {
+ "r2": {
+ "pim": {
+ "rp": [
+ {
+ "rp_addr": topo["routers"]["r2"]["links"]["lo"]["ipv4"].split(
+ "/"
+ )[0],
+ "group_addr_range": GROUP_RANGE_1,
+ }
+ ]
+ }
+ }
+ }
+
+ result = create_pim_config(tgen, topo, input_dict)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("Send Mcast traffic from C2 to all the groups ( 225.1.1.1 to 225.1.1.5)")
+
+ input_src = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]}
+ for src, src_intf in input_src.items():
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0]
+
+ input_dict_starg = [
+ {
+ "dut": "c1",
+ "src_address": "*",
+ "iif": topo["routers"]["c1"]["links"]["l1"]["interface"],
+ "oil": topo["routers"]["c1"]["links"]["i4"]["interface"],
+ }
+ ]
+
+ input_dict_sg = [
+ {
+ "dut": "c1",
+ "src_address": source_i5,
+ "iif": topo["routers"]["c1"]["links"]["c2"]["interface"],
+ "oil": topo["routers"]["c1"]["links"]["i4"]["interface"],
+ }
+ ]
+
+ step("(*,G) and (S,G) created on f1 and node verify using 'show ip mroute'")
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ intf_c1_c2 = topo["routers"]["c1"]["links"]["c2"]["interface"]
+ intf_c2_c1 = topo["routers"]["c2"]["links"]["c1"]["interface"]
+
+ step(
+ "configure PIM passive on upstream interface to verify"
+ "hello tx/rx counts are not incremented"
+ )
+
+ # Changing hello timer to 3sec for checking more number of packets
+
+ raw_config = {
+ "c1": {
+ "raw_config": [
+ "interface {}".format(intf_c1_c2),
+ "ip pim passive",
+ "ip pim hello 3",
+ ]
+ },
+ "c2": {"raw_config": ["interface {}".format(intf_c2_c1), "ip pim hello 3"]},
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("verify PIM hello tx/rx stats on C1")
+ state_dict = {
+ "c1": {
+ intf_c1_c2: ["helloTx", "helloRx"],
+ }
+ }
+
+ logger.info("waiting for 5 sec config to get apply and hello count update")
+ sleep(5)
+
+ c1_state_before = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ c1_state_before, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ logger.info(
+ "sleeping for 30 sec hello interval timer to verify count are not increamented"
+ )
+ sleep(35)
+
+ c1_state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ c1_state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify stats not increamented on c1")
+ result = verify_pim_stats_increament(c1_state_before, c1_state_after)
+ assert (
+ result is not True
+ ), "Testcase{} : Failed Error: {}" "stats incremented".format(tc_name, result)
+
+ step("No impact observed on mroutes")
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_sg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ for data in input_dict_starg:
+ result = verify_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN_RANGE_1,
+ data["iif"],
+ data["oil"],
+ )
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step("remove PIM passive and verify hello tx/rx is increamented")
+ raw_config = {
+ "c1": {
+ "raw_config": [
+ "interface {}".format(intf_c1_c2),
+ "no ip pim passive",
+ "ip pim hello 3",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ logger.info("waiting for 30 sec for pim hello to receive")
+ sleep(30)
+
+ c1_state_after = verify_pim_interface_traffic(tgen, state_dict)
+ assert isinstance(
+ c1_state_after, dict
+ ), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ step("verify stats increamented on c1 after removing pim passive")
+ result = verify_pim_stats_increament(c1_state_before, c1_state_after)
+ assert result is True, "Testcase{} : Failed Error: {}" "stats incremented".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
from lib.pim import (
create_pim_config,
create_igmp_config,
- verify_ip_mroutes,
- clear_ip_pim_interface_traffic,
+ verify_mroutes,
+ clear_pim_interface_traffic,
verify_upstream_iif,
- clear_ip_mroute,
+ clear_mroute,
verify_pim_rp_info,
get_pim_interface_traffic,
McastTesterHelper,
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step(
"Remove c1-c2 connected link to simulate topo "
step("Verify mroutes and iff upstream")
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify mroute not present after Delete of static routes on c1")
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
)
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify (s,g) populated after adding default route ")
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify (*,g) populated after adding default route ")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step(
"Remove c1-c2 connected link to simulate topo "
step("Verify mroutes and iff upstream")
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify (s,g) populated after adding default route ")
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
step("Verify (*,g) populated after adding default route ")
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
# Creating configuration from JSON
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
+ clear_mroute(tgen)
reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
+ clear_pim_interface_traffic(tgen, topo)
step(
"Remove c1-c2 connected link to simulate topo "
step("(*,G) and (S,G) created on f1 and node verify using 'show ip mroute'")
for data in input_dict_sg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
for data in input_dict_starg:
- result = verify_ip_mroutes(
+ result = verify_mroutes(
tgen,
data["dut"],
data["src_address"],
verify_igmp_groups,
verify_upstream_iif,
verify_join_state_and_timer,
- verify_ip_mroutes,
+ verify_mroutes,
verify_pim_neighbors,
get_pim_interface_traffic,
verify_pim_rp_info,
verify_pim_state,
- clear_ip_pim_interface_traffic,
- clear_ip_igmp_interfaces,
- clear_ip_pim_interfaces,
- clear_ip_mroute,
- clear_ip_mroute_verify,
+ clear_pim_interface_traffic,
+ clear_igmp_interfaces,
+ clear_pim_interfaces,
+ clear_mroute,
+ clear_mroute_verify,
McastTesterHelper,
)
result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify ip pim join")
)
step("r1: Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: mroutes are still present \n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
dut = "r1"
intf = "r1-r3-eth2"
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) upstream IIF interface")
step("r2: Verify (*, G) ip mroutes")
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r2-eth1"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Uncomment next line for debugging
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
dut = "r1"
intf = "r1-r3-eth2"
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 :Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Make RP un-reachable")
assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
step("r1: (*, G) cleared from mroute table using show ip mroute")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*, G) are not cleared from mroute table \n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on R1 interface")
step("Configure r2 loopback interface as RP")
)
step("r1: Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: mroutes are still present\n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 : Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
logger.info("Expected behavior: %s", result)
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1")
step("Configure r2 loopback interface as RP")
)
step("r1 : Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n " "r1: mroutes are still present\n Error: {}".format(
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 : Verify ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
logger.info("Expected behavior: %s", result)
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range " "224.0.0.0/4")
step("r1 : Verify ip mroutes")
iif = "r1-r4-eth3"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 : Verify PIM state")
result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_pim_interface_traffic(tgen, TOPO)
step("r1 : Verify joinTx, pruneTx count before RP gets deleted")
state_dict = {"r1": {"r1-r2-eth1": ["joinTx"], "r1-r4-eth3": ["pruneTx"]}}
"RP gets deleted"
)
iif = "r1-r2-eth1"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
logger.info("Expected behavior: %s", result)
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Uncomment next line for debugging
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Uncomment next line for debugging
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Uncomment next line for debugging
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Uncomment next line for debugging
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) upstream IIF interface")
step("r2: Verify (*, G) ip mroutes")
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) upstream IIF interface")
step("r2: Verify (S, G) ip mroutes")
oif = "none"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Uncomment next line for debugging
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
step("r1: Verify (*, G) ip mroutes")
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups timer restarted")
- result = clear_ip_igmp_interfaces(tgen, dut)
+ result = clear_igmp_interfaces(tgen, dut)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify PIM neighbor timer restarted")
- result = clear_ip_pim_interfaces(tgen, dut)
+ result = clear_pim_interfaces(tgen, dut)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify PIM mroute timer restarted")
- result = clear_ip_mroute_verify(tgen, dut)
+ result = clear_mroute_verify(tgen, dut)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Uncomment next line for debugging
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1")
step("Configure RP on r3 (loopback interface) for the group range" " 224.0.0.0/4")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) upstream IIF interface")
step("r2: Verify (*, G) ip mroutes")
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
dut = "r1"
logger.info("waiting for 10 sec to make sure old mroute time is higher")
sleep(10)
# Why do we then wait 60 seconds below before checking the routes?
- uptime_before = verify_ip_mroutes(
+ uptime_before = verify_mroutes(
tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=60
)
assert isinstance(uptime_before, dict), "Testcase{} : Failed Error: {}".format(
sleep(5)
# Why do we then wait 10 seconds below before checking the routes?
- uptime_after = verify_ip_mroutes(
+ uptime_after = verify_mroutes(
tgen, dut, STAR, GROUP_ADDRESS, iif, oil, return_uptime=True, mwait=10
)
assert isinstance(uptime_after, dict), "Testcase{} : Failed Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
step("Configure RP on r2 (loopback interface) for the group range" "225.1.1.0/24")
step("r1: Verify (*, G) ip mroutes")
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) upstream IIF interface")
step("r2: Verify (*, G) ip mroutes")
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) upstream IIF interface")
step("r2: Verify (S, G) ip mroutes")
oif = "none"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Delete RP configuration")
step("r1: Verify (*, G) ip mroutes")
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) upstream IIF interface")
step("r2: Verify (*, G) ip mroutes")
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) upstream IIF interface")
step("r2: Verify (S, G) ip mroutes")
oif = "none"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, group_address_list, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Delete existing RP configuration")
input_dict = {
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) upstream IIF interface")
step("r2: Verify (*, G) ip mroutes")
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) upstream IIF interface")
step("r2: Verify (S, G) ip mroutes")
oif = "none"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("r1: Verify (*, G) ip mroutes")
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r4: Verify (*, G) upstream IIF interface")
step("r4: Verify (*, G) ip mroutes")
oif = "r4-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r4: Verify (S, G) upstream IIF interface")
step("r4: Verify (S, G) ip mroutes")
oif = "none"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Delete RP configuration")
step("r1: Verify (*, G) ip mroutes")
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) upstream IIF interface")
step("r2: Verify (*, G) ip mroutes")
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (S, G) upstream IIF interface")
step("r2: Verify (S, G) ip mroutes")
oif = "none"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_1, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("r1: Verify (*, G) ip mroutes")
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) upstream IIF interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (S, G) ip mroutes")
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r4: Verify (*, G) upstream IIF interface")
step("r4: Verify (*, G) ip mroutes")
oif = "r4-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r4: Verify (S, G) upstream IIF interface")
step("r4: Verify (S, G) ip mroutes")
oif = "none"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (S, G) upstream IIF interface")
step("r3: Verify (S, G) ip mroutes")
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(
- tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif
- )
+ result = verify_mroutes(tgen, dut, SOURCE_ADDRESS, GROUP_ADDRESS_LIST_2, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
# Steps to execute
step("Enable IGMP on r1 interface")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) ip mroutes")
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Shut the interface r1-r2-eth1 from R1 to R2")
dut = "r1"
iif = "r1-r3-eth2"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) ip mroutes")
dut = "r2"
iif = "lo"
oif = "r2-r3-eth1"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (*, G) ip mroutes")
dut = "r3"
iif = "r3-r2-eth1"
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Shut the link from R1 to R3 from R3 node")
dut = "r1"
iif = "r1-r3-eth2"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
dut = "r2"
iif = "lo"
oif = "r2-r3-eth1"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r2: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
dut = "r3"
iif = "r3-r2-eth1"
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (*,G) mroutes are not cleared after shut of R1 to R3 link\n Error: {}".format(
dut = "r1"
iif = "r1-r3-eth2"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) ip mroutes")
dut = "r2"
iif = "lo"
oif = "r2-r3-eth1"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r3: Verify (*, G) ip mroutes")
dut = "r3"
iif = "r3-r2-eth1"
oif = "r3-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: No shutdown the link from R1 to R2 from R1 node")
dut = "r1"
iif = "r1-r2-eth1"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) ip mroutes")
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes created")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) ip mroutes created")
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Delete RP configuration")
dut = "r1"
iif = "r1-r2-eth1"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r2: (*,G) mroutes are not cleared after shut of R1 to R0 link\n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
app_helper.stop_all_hosts()
- clear_ip_mroute(tgen)
- clear_ip_pim_interface_traffic(tgen, TOPO)
+ clear_mroute(tgen)
+ clear_pim_interface_traffic(tgen, TOPO)
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (lo) for the group range" " 224.0.0.0/4")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) ip mroutes created")
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Verify (*, G) ip mroutes created")
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r2: Delete RP configuration")
dut = "r1"
iif = "r1-r2-eth1"
oif = "r1-r0-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r1: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
dut = "r2"
iif = "lo"
oif = "r2-r1-eth0"
- result = verify_ip_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
+ result = verify_mroutes(tgen, dut, STAR, GROUP_ADDRESS, iif, oif, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"r2: (*,G) mroutes are not cleared after shut of R1 to R2 and R3 link\n Error: {}".format(
identifier argc;
expression e1;
expression e2;
+identifier I;
@@
+(
- argv_find(argv, argc, e1, &idx);
if (
- idx
{
e2;
}
+|
+- argv_find(argv, argc, e1, &idx);
+... when != I = idx;
+ when strict
+)
--- /dev/null
+//
+
+@@
+identifier src, dst;
+identifier str, len;
+type t =~ "struct";
+
+@@
+
+(
+- memset(&dst, 0, sizeof(t));
++ memset(&dst, 0, sizeof(dst));
+|
+- memcpy(&dst, &src, sizeof(t));
++ memcpy(&dst, &src, sizeof(dst));
+|
+- char str[...];
+...
+- memset(&str, 0, ...);
++ memset(&str, 0, sizeof(str));
+)
vrrp_set_advertisement_interval(vr, vd.advertisement_interval);
- hash_get(vrrp_vrouters_hash, vr, hash_alloc_intern);
+ (void)hash_get(vrrp_vrouters_hash, vr, hash_alloc_intern);
return vr;
}
return -1;
}
- memset(&addr, 0, sizeof(struct sockaddr_un));
+ memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
#ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN
zlog_debug("%s: attempting to connect", dmn->name);
dmn->connect_tries++;
- memset(&addr, 0, sizeof(struct sockaddr_un));
+ memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
snprintf(addr.sun_path, sizeof(addr.sun_path), "%s/%s.vty", gs.vtydir,
dmn->name);
type uint8 {
range "1..2";
}
+ default "2";
description
"MLD version.";
}
"Enable PIM flag on the interface.";
}
+ leaf pim-passive-enable {
+ type boolean;
+ default "false";
+ description
+ "Disable exchange of protocol packets.";
+ }
+
leaf hello-interval {
type uint8 {
range "1..max";
struct prefix p, d;
struct connected *ifc;
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET;
p.u.prefix4 = *addr;
p.prefixlen =
CHECK_FLAG(flags, ZEBRA_IFA_PEER) ? IPV4_MAX_BITLEN : prefixlen;
if (dest) {
- memset(&d, 0, sizeof(struct prefix));
+ memset(&d, 0, sizeof(d));
d.family = AF_INET;
d.u.prefix4 = *dest;
d.prefixlen = prefixlen;
struct prefix p, d;
struct connected *ifc;
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET6;
memcpy(&p.u.prefix6, address, sizeof(struct in6_addr));
p.prefixlen = prefixlen;
rtadv_delete_prefix(ifp->info, &p);
if (dest) {
- memset(&d, 0, sizeof(struct prefix));
+ memset(&d, 0, sizeof(d));
d.family = AF_INET6;
IPV6_ADDR_COPY(&d.u.prefix6, dest);
d.prefixlen = prefixlen;
#include <linux/rtnetlink.h>
#include <net/if_arp.h>
#include <linux/fib_rules.h>
+#include <linux/lwtunnel.h>
#include <stdio.h>
#include <stdint.h>
#include "zebra/rt_netlink.h"
#include "zebra/kernel_netlink.h"
+#include "lib/vxlan.h"
const char *nlmsg_type2str(uint16_t type)
{
case RTM_GETNEXTHOP:
return "GETNEXTHOP";
+ case RTM_NEWTUNNEL:
+ return "NEWTUNNEL";
+ case RTM_DELTUNNEL:
+ return "DELTUNNEL";
+ case RTM_GETTUNNEL:
+ return "GETTUNNEL";
+
case RTM_NEWNETCONF:
return "RTM_NEWNETCONF";
case RTM_DELNETCONF:
return "MRT";
case RTPROT_ZEBRA:
return "ZEBRA";
+ case RTPROT_BGP:
+ return "BGP";
+ case RTPROT_ISIS:
+ return "ISIS";
+ case RTPROT_OSPF:
+ return "OSPF";
case RTPROT_BIRD:
return "BIRD";
case RTPROT_DNROUTED:
goto next_rta;
}
+static void nltnl_dump(struct tunnel_msg *tnlm, size_t msglen)
+{
+ struct rtattr *attr;
+ vni_t vni_start = 0, vni_end = 0;
+ struct rtattr *ttb[VXLAN_VNIFILTER_ENTRY_MAX + 1];
+ uint8_t rta_type;
+
+ attr = TUNNEL_RTA(tnlm);
+next_attr:
+ /* Check the header for valid length and for outbound access. */
+ if (RTA_OK(attr, msglen) == 0)
+ return;
+
+ rta_type = attr->rta_type & NLA_TYPE_MASK;
+
+ if (rta_type != VXLAN_VNIFILTER_ENTRY) {
+ attr = RTA_NEXT(attr, msglen);
+ goto next_attr;
+ }
+
+ memset(ttb, 0, sizeof(ttb));
+
+ netlink_parse_rtattr_flags(ttb, VXLAN_VNIFILTER_ENTRY_MAX,
+ RTA_DATA(attr), RTA_PAYLOAD(attr),
+ NLA_F_NESTED);
+
+ if (ttb[VXLAN_VNIFILTER_ENTRY_START])
+ vni_start =
+ *(uint32_t *)RTA_DATA(ttb[VXLAN_VNIFILTER_ENTRY_START]);
+
+ if (ttb[VXLAN_VNIFILTER_ENTRY_END])
+ vni_end = *(uint32_t *)RTA_DATA(ttb[VXLAN_VNIFILTER_ENTRY_END]);
+ zlog_debug(" vni_start %u, vni_end %u", vni_start, vni_end);
+
+ attr = RTA_NEXT(attr, msglen);
+ goto next_attr;
+}
+
+static const char *lwt_type2str(uint16_t type)
+{
+ switch (type) {
+ case LWTUNNEL_ENCAP_NONE:
+ return "NONE";
+ case LWTUNNEL_ENCAP_MPLS:
+ return "MPLS";
+ case LWTUNNEL_ENCAP_IP:
+ return "IPv4";
+ case LWTUNNEL_ENCAP_ILA:
+ return "ILA";
+ case LWTUNNEL_ENCAP_IP6:
+ return "IPv6";
+ case LWTUNNEL_ENCAP_SEG6:
+ return "SEG6";
+ case LWTUNNEL_ENCAP_BPF:
+ return "BPF";
+ case LWTUNNEL_ENCAP_SEG6_LOCAL:
+ return "SEG6_LOCAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static const char *nhg_type2str(uint16_t type)
+{
+ switch (type) {
+ case NEXTHOP_GRP_TYPE_MPATH:
+ return "MULTIPATH";
+ case NEXTHOP_GRP_TYPE_RES:
+ return "RESILIENT MULTIPATH";
+ default:
+ return "UNKNOWN";
+ }
+}
+
static void nlnh_dump(struct nhmsg *nhm, size_t msglen)
{
struct rtattr *rta;
nhgrp[i].weight);
break;
case NHA_ENCAP_TYPE:
+ u16v = *(uint16_t *)RTA_DATA(rta);
+ zlog_debug(" %s", lwt_type2str(u16v));
+ break;
case NHA_GROUP_TYPE:
u16v = *(uint16_t *)RTA_DATA(rta);
- zlog_debug(" %d", u16v);
+ zlog_debug(" %s", nhg_type2str(u16v));
break;
case NHA_BLACKHOLE:
/* NOTHING */
struct nhmsg *nhm;
struct netconfmsg *ncm;
struct ifinfomsg *ifi;
+ struct tunnel_msg *tnlm;
struct fib_rule_hdr *frh;
char fbuf[128];
char ibuf[128];
nlnh_dump(nhm, nlmsg->nlmsg_len - NLMSG_LENGTH(sizeof(*nhm)));
break;
+ case RTM_NEWTUNNEL:
+ case RTM_DELTUNNEL:
+ case RTM_GETTUNNEL:
+ tnlm = NLMSG_DATA(nlmsg);
+ zlog_debug(" tnlm [family=(%d) %s ifindex=%d ", tnlm->family,
+ af_type2str(tnlm->family), tnlm->ifindex);
+ nltnl_dump(tnlm,
+ nlmsg->nlmsg_len -
+ NLMSG_LENGTH(sizeof(struct tunnel_msg)));
+ break;
+
+
case RTM_NEWNETCONF:
case RTM_DELNETCONF:
ncm = NLMSG_DATA(nlmsg);
uint8_t update = 0;
if (argc == 2) {
- /* Check new delay value against old Min and Max delays if set
+ /*
+ * Check new delay value against old Min and Max delays if set
+ *
+ * RFC 7471 Section 4.2.7:
+ * It is possible for min delay and max delay to be
+ * the same value.
+ *
+ * Therefore, it is also allowed that the average
+ * delay be equal to the min delay or max delay.
*/
if (IS_PARAM_SET(iflp, LP_MM_DELAY)
- && (delay <= iflp->min_delay || delay >= iflp->max_delay)) {
+ && (delay < iflp->min_delay || delay > iflp->max_delay)) {
vty_out(vty,
- "Average delay should be comprise between Min (%d) and Max (%d) delay\n",
+ "Average delay should be in range Min (%d) - Max (%d) delay\n",
iflp->min_delay, iflp->max_delay);
return CMD_WARNING_CONFIG_FAILED;
}
update = 1;
}
} else {
- /* Check new delays value coherency */
- if (delay <= low || delay >= high) {
+ /*
+ * Check new delays value coherency. See above note
+ * regarding average delay equal to min/max allowed
+ */
+ if (delay < low || delay > high) {
vty_out(vty,
- "Average delay should be comprise between Min (%d) and Max (%d) delay\n",
+ "Average delay should be in range Min (%d) - Max (%d) delay\n",
low, high);
return CMD_WARNING_CONFIG_FAILED;
}
strlcpy(ifreq->ifr_name, ifp->name, sizeof(ifreq->ifr_name));
}
+#ifndef HAVE_NETLINK
/* call ioctl system call */
int if_ioctl(unsigned long request, caddr_t buffer)
{
}
return 0;
}
+#endif
/* call ioctl system call */
int vrf_if_ioctl(unsigned long request, caddr_t buffer, vrf_id_t vrf_id)
}
return 0;
}
-#endif /* ! HAVE_NETLINK */
/*
* get interface metric
ifp->mtu6 = ifp->mtu = -1;
#endif
}
+#endif /* ! HAVE_NETLINK */
/*
* Handler for interface address programming via the zebra dplane,
ZEBRA_DPLANE_REQUEST_SUCCESS : ZEBRA_DPLANE_REQUEST_FAILURE);
}
-#endif /* !HAVE_NETLINK */
-
-#ifdef HAVE_NETLINK
-
-/* TODO -- remove; no use of these apis with netlink any longer */
-
-#else /* ! HAVE_NETLINK */
#ifdef HAVE_STRUCT_IFALIASREQ
/*
strlcpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
sizeof(addreq.ifra_name));
- memset(&addr, 0, sizeof(struct sockaddr_in));
+ memset(&addr, 0, sizeof(addr));
addr.sin_addr = p->prefix;
addr.sin_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
if (dplane_ctx_intf_is_connected(ctx)) {
p = (struct prefix_ipv4 *)dplane_ctx_get_intf_dest(ctx);
- memset(&mask, 0, sizeof(struct sockaddr_in));
+ memset(&mask, 0, sizeof(mask));
peer.sin_addr = p->prefix;
peer.sin_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
sizeof(struct sockaddr_in));
}
- memset(&mask, 0, sizeof(struct sockaddr_in));
+ memset(&mask, 0, sizeof(mask));
masklen2ip(p->prefixlen, &mask.sin_addr);
mask.sin_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
strlcpy((char *)&addreq.ifra_name, dplane_ctx_get_ifname(ctx),
sizeof(addreq.ifra_name));
- memset(&addr, 0, sizeof(struct sockaddr_in));
+ memset(&addr, 0, sizeof(addr));
addr.sin_addr = p->prefix;
addr.sin_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
if (dplane_ctx_intf_is_connected(ctx)) {
p = (struct prefix_ipv4 *)dplane_ctx_get_intf_dest(ctx);
- memset(&mask, 0, sizeof(struct sockaddr_in));
+ memset(&mask, 0, sizeof(mask));
peer.sin_addr = p->prefix;
peer.sin_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
sizeof(struct sockaddr_in));
}
- memset(&mask, 0, sizeof(struct sockaddr_in));
+ memset(&mask, 0, sizeof(mask));
masklen2ip(p->prefixlen, &mask.sin_addr);
mask.sin_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
strlcpy(ifreq.ifr_name, dplane_ctx_get_ifname(ctx),
sizeof(ifreq.ifr_name));
- memset(&addr, 0, sizeof(struct sockaddr_in));
+ memset(&addr, 0, sizeof(addr));
addr.sin_family = p->family;
memcpy(&ifreq.ifr_addr, &addr, sizeof(struct sockaddr_in));
ret = if_ioctl(SIOCSIFADDR, (caddr_t)&ifreq);
int ret;
struct ifreq ifreq;
- memset(&ifreq, 0, sizeof(struct ifreq));
+ memset(&ifreq, 0, sizeof(ifreq));
ifreq_set_name(&ifreq, ifp);
ifreq.ifr_flags = ifp->flags;
int ret;
struct ifreq ifreq;
- memset(&ifreq, 0, sizeof(struct ifreq));
+ memset(&ifreq, 0, sizeof(ifreq));
ifreq_set_name(&ifreq, ifp);
ifreq.ifr_flags = ifp->flags;
strlcpy((char *)&addreq.ifra_name,
dplane_ctx_get_ifname(ctx), sizeof(addreq.ifra_name));
- memset(&addr, 0, sizeof(struct sockaddr_in6));
+ memset(&addr, 0, sizeof(addr));
addr.sin6_addr = p->prefix;
addr.sin6_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
#endif
memcpy(&addreq.ifra_addr, &addr, sizeof(struct sockaddr_in6));
- memset(&mask, 0, sizeof(struct sockaddr_in6));
+ memset(&mask, 0, sizeof(mask));
masklen2ip6(p->prefixlen, &mask.sin6_addr);
mask.sin6_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
strlcpy((char *)&addreq.ifra_name,
dplane_ctx_get_ifname(ctx), sizeof(addreq.ifra_name));
- memset(&addr, 0, sizeof(struct sockaddr_in6));
+ memset(&addr, 0, sizeof(addr));
addr.sin6_addr = p->prefix;
addr.sin6_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
#endif
memcpy(&addreq.ifra_addr, &addr, sizeof(struct sockaddr_in6));
- memset(&mask, 0, sizeof(struct sockaddr_in6));
+ memset(&mask, 0, sizeof(mask));
masklen2ip6(p->prefixlen, &mask.sin6_addr);
mask.sin6_family = p->family;
#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN
/* Make socket for Linux netlink interface. */
static int netlink_socket(struct nlsock *nl, unsigned long groups,
- ns_id_t ns_id)
+ unsigned long ext_groups, ns_id_t ns_id)
{
int ret;
struct sockaddr_nl snl;
snl.nl_family = AF_NETLINK;
snl.nl_groups = groups;
+#if defined SOL_NETLINK
+ if (ext_groups)
+ setsockopt(sock, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
+ &ext_groups, sizeof(ext_groups));
+#endif
+
/* Bind the socket to the netlink structure for anything. */
ret = bind(sock, (struct sockaddr *)&snl, sizeof(snl));
}
netlink_parse_rtattr(tb, max, RTA_DATA(rta), RTA_PAYLOAD(rta));
}
+bool nl_addraw_l(struct nlmsghdr *n, unsigned int maxlen, const void *data,
+ unsigned int len)
+{
+ if (NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len) > maxlen) {
+ zlog_err("ERROR message exceeded bound of %d", maxlen);
+ return false;
+ }
+
+ memcpy(NLMSG_TAIL(n), data, len);
+ memset((uint8_t *)NLMSG_TAIL(n) + len, 0, NLMSG_ALIGN(len) - len);
+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len);
+
+ return true;
+}
+
bool nl_attr_put(struct nlmsghdr *n, unsigned int maxlen, int type,
const void *data, unsigned int alen)
{
rtnh->rtnh_len = (uint8_t *)NLMSG_TAIL(n) - (uint8_t *)rtnh;
}
+bool nl_rta_put(struct rtattr *rta, unsigned int maxlen, int type,
+ const void *data, int alen)
+{
+ struct rtattr *subrta;
+ int len = RTA_LENGTH(alen);
+
+ if (RTA_ALIGN(rta->rta_len) + RTA_ALIGN(len) > maxlen) {
+ zlog_err("ERROR max allowed bound %d exceeded for rtattr",
+ maxlen);
+ return false;
+ }
+ subrta = (struct rtattr *)(((char *)rta) + RTA_ALIGN(rta->rta_len));
+ subrta->rta_type = type;
+ subrta->rta_len = len;
+ if (alen)
+ memcpy(RTA_DATA(subrta), data, alen);
+ rta->rta_len = NLMSG_ALIGN(rta->rta_len) + RTA_ALIGN(len);
+
+ return true;
+}
+
+bool nl_rta_put16(struct rtattr *rta, unsigned int maxlen, int type,
+ uint16_t data)
+{
+ return nl_rta_put(rta, maxlen, type, &data, sizeof(uint16_t));
+}
+
+bool nl_rta_put64(struct rtattr *rta, unsigned int maxlen, int type,
+ uint64_t data)
+{
+ return nl_rta_put(rta, maxlen, type, &data, sizeof(uint64_t));
+}
+
+struct rtattr *nl_rta_nest(struct rtattr *rta, unsigned int maxlen, int type)
+{
+ struct rtattr *nest = RTA_TAIL(rta);
+
+ if (nl_rta_put(rta, maxlen, type, NULL, 0))
+ return NULL;
+
+ nest->rta_type |= NLA_F_NESTED;
+
+ return nest;
+}
+
+int nl_rta_nest_end(struct rtattr *rta, struct rtattr *nest)
+{
+ nest->rta_len = (uint8_t *)RTA_TAIL(rta) - (uint8_t *)nest;
+
+ return rta->rta_len;
+}
+
const char *nl_msg_type_to_str(uint16_t msg_type)
{
return lookup_msg(nlmsg_str, msg_type, "");
netlink_socket (). */
void kernel_init(struct zebra_ns *zns)
{
- uint32_t groups, dplane_groups;
+ uint32_t groups, dplane_groups, ext_groups;
#if defined SOL_NETLINK
int one, ret;
#endif
((uint32_t) 1 << (RTNLGRP_IPV6_NETCONF - 1)) |
((uint32_t) 1 << (RTNLGRP_MPLS_NETCONF - 1)));
+ /* Use setsockopt for > 31 group */
+ ext_groups = RTNLGRP_TUNNEL;
snprintf(zns->netlink.name, sizeof(zns->netlink.name),
"netlink-listen (NS %u)", zns->ns_id);
zns->netlink.sock = -1;
- if (netlink_socket(&zns->netlink, groups, zns->ns_id) < 0) {
+ if (netlink_socket(&zns->netlink, groups, ext_groups, zns->ns_id) < 0) {
zlog_err("Failure to create %s socket",
zns->netlink.name);
exit(-1);
snprintf(zns->netlink_cmd.name, sizeof(zns->netlink_cmd.name),
"netlink-cmd (NS %u)", zns->ns_id);
zns->netlink_cmd.sock = -1;
- if (netlink_socket(&zns->netlink_cmd, 0, zns->ns_id) < 0) {
+ if (netlink_socket(&zns->netlink_cmd, 0, 0, zns->ns_id) < 0) {
zlog_err("Failure to create %s socket",
zns->netlink_cmd.name);
exit(-1);
sizeof(zns->netlink_dplane_out.name), "netlink-dp (NS %u)",
zns->ns_id);
zns->netlink_dplane_out.sock = -1;
- if (netlink_socket(&zns->netlink_dplane_out, 0, zns->ns_id) < 0) {
+ if (netlink_socket(&zns->netlink_dplane_out, 0, 0, zns->ns_id) < 0) {
zlog_err("Failure to create %s socket",
zns->netlink_dplane_out.name);
exit(-1);
sizeof(zns->netlink_dplane_in.name), "netlink-dp-in (NS %u)",
zns->ns_id);
zns->netlink_dplane_in.sock = -1;
- if (netlink_socket(&zns->netlink_dplane_in, dplane_groups,
+ if (netlink_socket(&zns->netlink_dplane_in, dplane_groups, 0,
zns->ns_id) < 0) {
zlog_err("Failure to create %s socket",
zns->netlink_dplane_in.name);
unsigned short flags);
extern void netlink_parse_rtattr_nested(struct rtattr **tb, int max,
struct rtattr *rta);
+/*
+ * nl_addraw_l copies raw form the netlink message buffer into netlink
+ * message header pointer. It ensures the aligned data buffer does not
+ * override past max length.
+ * return value is 0 if its successful
+ */
+extern bool nl_addraw_l(struct nlmsghdr *n, unsigned int maxlen,
+ const void *data, unsigned int len);
+/*
+ * nl_rta_put - add an additional optional attribute(rtattr) to the
+ * Netlink message buffer.
+ *
+ * Returns true if the attribute could be added to the message (fits into the
+ * buffer), otherwise false is returned.
+ */
+extern bool nl_rta_put(struct rtattr *rta, unsigned int maxlen, int type,
+ const void *data, int alen);
+extern bool nl_rta_put16(struct rtattr *rta, unsigned int maxlen, int type,
+ uint16_t data);
+extern bool nl_rta_put64(struct rtattr *rta, unsigned int maxlen, int type,
+ uint64_t data);
+/*
+ * nl_rta_nest - start an additional optional attribute (rtattr) nest.
+ *
+ * Returns a valid pointer to the beginning of the nest if the attribute
+ * describing the nest could be added to the message (fits into the buffer),
+ * otherwise NULL is returned.
+ */
+extern struct rtattr *nl_rta_nest(struct rtattr *rta, unsigned int maxlen,
+ int type);
+/*
+ * nl_rta_nest_end - finalize nesting of an aditionl optionl attributes.
+ *
+ * Updates the length field of the attribute header to include the appeneded
+ * attributes. Returns a total length of the Netlink message.
+ */
+extern int nl_rta_nest_end(struct rtattr *rta, struct rtattr *nest);
extern const char *nl_msg_type_to_str(uint16_t msg_type);
extern const char *nl_rtproto_to_str(uint8_t rtproto);
extern const char *nl_family_to_str(uint8_t family);
return ZEBRA_ERR_EPERM;
/* Clear and set rt_msghdr values */
- memset(&msg, 0, sizeof(struct rt_msghdr));
+ memset(&msg, 0, sizeof(msg));
msg.rtm.rtm_version = RTM_VERSION;
msg.rtm.rtm_type = message;
msg.rtm.rtm_seq = msg_seq++;
bool v6_rr_semantics = false;
/* Receive buffer size for kernel control sockets */
+#define RCVBUFSIZE_MIN 4194304
#ifdef HAVE_NETLINK
-uint32_t rcvbufsize = 4194304;
+uint32_t rcvbufsize = RCVBUFSIZE_MIN;
#else
uint32_t rcvbufsize = 128 * 1024;
#endif
break;
case 's':
rcvbufsize = atoi(optarg);
+ if (rcvbufsize < RCVBUFSIZE_MIN)
+ fprintf(stderr,
+ "Rcvbufsize is smaller than recommended value: %d\n",
+ RCVBUFSIZE_MIN);
break;
#ifdef HAVE_NETLINK
case 'n':
RIB_UPDATE_MAX
};
-extern void route_entry_copy_nexthops(struct route_entry *re,
- struct nexthop *nh);
int route_entry_update_nhe(struct route_entry *re,
struct nhg_hash_entry *new_nhghe);
"Manually set the router-id\n"
"IP address to use for router-id\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
int idx = 0;
struct prefix rid;
"Manually set the IPv6 router-id\n"
"IPV6 address to use for router-id\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
int idx = 0;
struct prefix rid;
"Remove the manually configured router-id\n"
"IP address to use for router-id\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
struct prefix rid;
"Remove the manually configured IPv6 router-id\n"
"IPv6 address to use for router-id\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
struct prefix rid;
if (tb[RTA_IIF])
iif = *(int *)RTA_DATA(tb[RTA_IIF]);
- if (tb[RTA_SRC])
- m->sg.src = *(struct in_addr *)RTA_DATA(tb[RTA_SRC]);
+ if (tb[RTA_SRC]) {
+ if (rtm->rtm_family == RTNL_FAMILY_IPMR)
+ m->src.ipaddr_v4 =
+ *(struct in_addr *)RTA_DATA(tb[RTA_SRC]);
+ else
+ m->src.ipaddr_v6 =
+ *(struct in6_addr *)RTA_DATA(tb[RTA_SRC]);
+ }
- if (tb[RTA_DST])
- m->sg.grp = *(struct in_addr *)RTA_DATA(tb[RTA_DST]);
+ if (tb[RTA_DST]) {
+ if (rtm->rtm_family == RTNL_FAMILY_IPMR)
+ m->grp.ipaddr_v4 =
+ *(struct in_addr *)RTA_DATA(tb[RTA_DST]);
+ else
+ m->grp.ipaddr_v6 =
+ *(struct in6_addr *)RTA_DATA(tb[RTA_DST]);
+ }
if (tb[RTA_EXPIRES])
m->lastused = *(unsigned long long *)RTA_DATA(tb[RTA_EXPIRES]);
}
}
+ if (rtm->rtm_family == RTNL_FAMILY_IPMR) {
+ SET_IPADDR_V4(&m->src);
+ SET_IPADDR_V4(&m->grp);
+ } else if (rtm->rtm_family == RTNL_FAMILY_IP6MR) {
+ SET_IPADDR_V6(&m->src);
+ SET_IPADDR_V6(&m->grp);
+ } else {
+ zlog_warn("%s: Invalid rtm_family received", __func__);
+ return 0;
+ }
+
if (IS_ZEBRA_DEBUG_KERNEL) {
struct interface *ifp = NULL;
struct zebra_vrf *zvrf = NULL;
zvrf = zebra_vrf_lookup_by_id(vrf);
ifp = if_lookup_by_index(iif, vrf);
zlog_debug(
- "MCAST VRF: %s(%d) %s (%pI4,%pI4) IIF: %s(%d) OIF: %s jiffies: %lld",
+ "MCAST VRF: %s(%d) %s (%pIA,%pIA) IIF: %s(%d) OIF: %s jiffies: %lld",
zvrf_name(zvrf), vrf, nl_msg_type_to_str(h->nlmsg_type),
- &m->sg.src, &m->sg.grp, ifp ? ifp->name : "Unknown",
- iif, oif_list,
- m->lastused);
+ &m->src, &m->grp, ifp ? ifp->name : "Unknown", iif,
+ oif_list, m->lastused);
}
return 0;
}
req.n.nlmsg_flags = NLM_F_REQUEST;
req.n.nlmsg_pid = zns->netlink_cmd.snl.nl_pid;
- req.ndm.ndm_family = RTNL_FAMILY_IPMR;
req.n.nlmsg_type = RTM_GETROUTE;
nl_attr_put32(&req.n, sizeof(req), RTA_IIF, mroute->ifindex);
nl_attr_put32(&req.n, sizeof(req), RTA_OIF, mroute->ifindex);
- nl_attr_put32(&req.n, sizeof(req), RTA_SRC, mroute->sg.src.s_addr);
- nl_attr_put32(&req.n, sizeof(req), RTA_DST, mroute->sg.grp.s_addr);
+
+ if (mroute->family == AF_INET) {
+ req.ndm.ndm_family = RTNL_FAMILY_IPMR;
+ nl_attr_put(&req.n, sizeof(req), RTA_SRC,
+ &mroute->src.ipaddr_v4,
+ sizeof(mroute->src.ipaddr_v4));
+ nl_attr_put(&req.n, sizeof(req), RTA_DST,
+ &mroute->grp.ipaddr_v4,
+ sizeof(mroute->grp.ipaddr_v4));
+ } else {
+ req.ndm.ndm_family = RTNL_FAMILY_IP6MR;
+ nl_attr_put(&req.n, sizeof(req), RTA_SRC,
+ &mroute->src.ipaddr_v6,
+ sizeof(mroute->src.ipaddr_v6));
+ nl_attr_put(&req.n, sizeof(req), RTA_DST,
+ &mroute->grp.ipaddr_v6,
+ sizeof(mroute->grp.ipaddr_v6));
+ }
+
/*
* What?
*
return 0;
}
- memset(&ip, 0, sizeof(struct ipaddr));
+ memset(&ip, 0, sizeof(ip));
ip.ipa_type = (ndm->ndm_family == AF_INET) ? IPADDR_V4 : IPADDR_V6;
memcpy(&ip.ip.addr, RTA_DATA(tb[NDA_DST]), RTA_PAYLOAD(tb[NDA_DST]));
return 0;
}
- memset(&mac, 0, sizeof(struct ethaddr));
+ memset(&mac, 0, sizeof(mac));
if (h->nlmsg_type == RTM_NEWNEIGH) {
if (tb[NDA_LLADDR]) {
if (RTA_PAYLOAD(tb[NDA_LLADDR]) != ETH_ALEN) {
struct zebra_ns *zns;
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return -1;
zns = zvrf->zns;
memset(&req, 0, sizeof(req));
struct zebra_ns *zns;
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return -1;
zns = zvrf->zns;
memset(&req, 0, sizeof(req));
uint32_t i;
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return -1;
zns = zvrf->zns;
memset(&req, 0, sizeof(req));
struct ipv6_mreq mreq;
- memset(&mreq, 0, sizeof(struct ipv6_mreq));
+ memset(&mreq, 0, sizeof(mreq));
inet_pton(AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr);
mreq.ipv6mr_interface = ifp->ifindex;
struct ipv6_mreq mreq;
- memset(&mreq, 0, sizeof(struct ipv6_mreq));
+ memset(&mreq, 0, sizeof(mreq));
inet_pton(AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr);
mreq.ipv6mr_interface = ifp->ifindex;
}
/* otherwise create a new one */
tmc = XCALLOC(MTYPE_TM_CHUNK, sizeof(struct table_manager_chunk));
- if (!tmc)
- return NULL;
if (zvrf->tbl_mgr->start || zvrf->tbl_mgr->end)
manual_conf = true;
struct stream *s = stream_new(ZEBRA_MAX_PACKET_SIZ);
zclient_create_header(s, ZEBRA_PW_STATUS_UPDATE, pw->vrf_id);
- stream_write(s, pw->ifname, IF_NAMESIZE);
+ stream_write(s, pw->ifname, INTERFACE_NAMSIZ);
stream_putl(s, pw->ifindex);
stream_putl(s, pw->status);
* the nexthop and associated MAC need to be installed.
*/
if (CHECK_FLAG(flags, ZEBRA_FLAG_EVPN_ROUTE)) {
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ memset(&vtep_ip, 0, sizeof(vtep_ip));
vtep_ip.ipa_type = IPADDR_V4;
memcpy(&(vtep_ip.ipaddr_v4), &(api_nh->gate.ipv4),
sizeof(struct in_addr));
* the nexthop and associated MAC need to be installed.
*/
if (CHECK_FLAG(flags, ZEBRA_FLAG_EVPN_ROUTE)) {
- memset(&vtep_ip, 0, sizeof(struct ipaddr));
+ memset(&vtep_ip, 0, sizeof(vtep_ip));
vtep_ip.ipa_type = IPADDR_V6;
memcpy(&vtep_ip.ipaddr_v6, &(api_nh->gate.ipv6),
sizeof(struct in6_addr));
static void zread_pseudowire(ZAPI_HANDLER_ARGS)
{
struct stream *s;
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
int type;
int af;
s = msg;
/* Get data. */
- STREAM_GET(ifname, s, IF_NAMESIZE);
- ifname[IF_NAMESIZE - 1] = '\0';
+ STREAM_GET(ifname, s, INTERFACE_NAMSIZ);
+ ifname[INTERFACE_NAMSIZ - 1] = '\0';
STREAM_GETL(s, ifindex);
STREAM_GETL(s, type);
STREAM_GETL(s, af);
struct zebra_vrf *zvrf;
zvrf = zebra_vrf_get_evpn();
- if (zvrf && zvrf->advertise_gw_macip)
+ if (zvrf->advertise_gw_macip)
return 1;
if (zevpn && zevpn->advertise_gw_macip)
struct zebra_vrf *zvrf;
zvrf = zebra_vrf_get_evpn();
- if (zvrf && zvrf->advertise_svi_macip)
+ if (zvrf->advertise_svi_macip)
return 1;
if (zevpn && zevpn->advertise_svi_macip)
struct zebra_evpn *zevpn = NULL;
zvrf = zebra_vrf_get_evpn();
- assert(zvrf);
- memset(&tmp_vni, 0, sizeof(struct zebra_evpn));
+ memset(&tmp_vni, 0, sizeof(tmp_vni));
tmp_vni.vni = vni;
zevpn = hash_lookup(zvrf->evpn_table, &tmp_vni);
struct zebra_evpn *zevpn = NULL;
zvrf = zebra_vrf_get_evpn();
- assert(zvrf);
- memset(&tmp_zevpn, 0, sizeof(struct zebra_evpn));
+ memset(&tmp_zevpn, 0, sizeof(tmp_zevpn));
tmp_zevpn.vni = vni;
zevpn = hash_get(zvrf->evpn_table, &tmp_zevpn, zebra_evpn_alloc);
- assert(zevpn);
zebra_evpn_es_evi_init(zevpn);
struct zebra_evpn *tmp_zevpn;
zvrf = zebra_vrf_get_evpn();
- assert(zvrf);
zevpn->svi_if = NULL;
}
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return;
-
-
if (zebra_evpn_mac_remote_macip_add(zevpn, zvrf, macaddr, ipa_len,
ipaddr, &mac, vtep_ip, flags, seq,
esi)
char up_str[MONOTIME_STRLEN];
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return;
-
vty = (struct vty *)ctxt;
prefix_mac2str(&mac->macaddr, buf1, sizeof(buf1));
struct zebra_mac tmp_mac;
struct zebra_mac *mac = NULL;
- memset(&tmp_mac, 0, sizeof(struct zebra_mac));
+ memset(&tmp_mac, 0, sizeof(tmp_mac));
memcpy(&tmp_mac.macaddr, macaddr, ETH_ALEN);
mac = hash_get(zevpn->mac_table, &tmp_mac, zebra_evpn_mac_alloc);
- assert(mac);
mac->zevpn = zevpn;
mac->dad_mac_auto_recovery_timer = NULL;
if (!zevpn->mac_table)
return;
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.uninstall = uninstall;
wctx.upd_client = upd_client;
if (!zevpn->mac_table)
return;
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
hash_iterate(zevpn->mac_table, zebra_evpn_send_mac_hash_entry_to_client,
acc_bd->mbr_zifs = list_new();
/* Add to hash */
- if (!hash_get(zmh_info->evpn_vlan_table, acc_bd, hash_alloc_intern)) {
- XFREE(MTYPE_ZACC_BD, acc_bd);
- return NULL;
- }
+ (void)hash_get(zmh_info->evpn_vlan_table, acc_bd, hash_alloc_intern);
/* check if an svi exists for the vlan */
if (br_if) {
nh_id = id | EVPN_NHG_ID_TYPE_BIT;
/* Add to NHG hash */
es->nhg_id = nh_id;
- if (!hash_get(zmh_info->nhg_table, es, hash_alloc_intern)) {
- bf_release_index(zmh_info->nh_id_bitmap, id);
- return 0;
- }
+ (void)hash_get(zmh_info->nhg_table, es, hash_alloc_intern);
} else {
nh_id = id | EVPN_NH_ID_TYPE_BIT;
}
nh = XCALLOC(MTYPE_L2_NH, sizeof(*nh));
nh->vtep_ip = vtep_ip;
- if (!hash_get(zmh_info->nh_ip_table, nh, hash_alloc_intern)) {
- XFREE(MTYPE_L2_NH, nh);
- return NULL;
- }
+ (void)hash_get(zmh_info->nh_ip_table, nh, hash_alloc_intern);
nh->nh_id = zebra_evpn_nhid_alloc(NULL);
if (!nh->nh_id) {
return;
zvrf = zebra_vrf_get_evpn();
- if (!zvrf) {
- zmh_info->flags |= ZEBRA_EVPN_MH_DUP_ADDR_DETECT_OFF;
- return;
- }
-
old_detect = zebra_evpn_do_dup_addr_detect(zvrf);
zmh_info->flags |= ZEBRA_EVPN_MH_DUP_ADDR_DETECT_OFF;
new_detect = zebra_evpn_do_dup_addr_detect(zvrf);
* access port is associated with an ES-ID
* - Remotes ESs are added by BGP based on received/remote EAD/Type-1 routes
* (ZEBRA_EVPNES_REMOTE)
- * - An ES can be simulatenously LOCAL and REMOTE; infact all LOCAL ESs are
+ * - An ES can be simultaneously LOCAL and REMOTE; infact all LOCAL ESs are
* expected to have REMOTE ES peers.
*/
struct zebra_evpn_es {
struct zebra_neigh tmp_n;
struct zebra_neigh *n = NULL;
- memset(&tmp_n, 0, sizeof(struct zebra_neigh));
+ memset(&tmp_n, 0, sizeof(tmp_n));
memcpy(&tmp_n.ip, ip, sizeof(struct ipaddr));
n = hash_get(zevpn->neigh_table, &tmp_n, zebra_evpn_neigh_alloc);
- assert(n);
n->state = ZEBRA_NEIGH_INACTIVE;
n->zevpn = zevpn;
if (!zevpn->neigh_table)
return;
- memset(&wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.uninstall = uninstall;
wctx.upd_client = upd_client;
{
struct neigh_walk_ctx wctx;
- memset(&wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
hash_iterate(zevpn->neigh_table,
char up_str[MONOTIME_STRLEN];
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return;
-
uptime = monotime(NULL);
uptime -= n->uptime;
vxlan_if = zl3vni_map_to_vxlan_if(zl3vni);
svi_if = zl3vni_map_to_svi_if(zl3vni);
- memset(&key, 0, sizeof(struct fpm_mac_info_t));
+ memset(&key, 0, sizeof(key));
memcpy(&key.macaddr, &rmac->macaddr, ETH_ALEN);
key.vni = zl3vni->vni;
UNSET_FLAG(fpm_mac->fpm_flags, ZEBRA_MAC_UPDATE_FPM);
return 0;
}
- } else {
+ } else
fpm_mac = hash_get(zfpm_g->fpm_mac_info_table, &key,
zfpm_mac_info_alloc);
- if (!fpm_mac)
- return 0;
- }
fpm_mac->r_vtep_ip.s_addr = rmac->fwd_info.r_vtep_ip.s_addr;
fpm_mac->zebra_flags = rmac->flags;
/* Locate or allocate LSP entry. */
tmp_ile.in_label = label;
lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
- if (!lsp)
- return -1;
/* For each active nexthop, create NHLFE. Note that we deliberately skip
* recursive nexthops right now, because intermediate hops won't
return 0;
/* Lookup nexthop in IPv4 routing table. */
- memset(&p, 0, sizeof(struct prefix_ipv4));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET;
p.prefixlen = IPV4_MAX_BITLEN;
p.prefix = nexthop->gate.ipv4;
return 0;
/* Lookup nexthop in IPv6 routing table. */
- memset(&p, 0, sizeof(struct prefix_ipv6));
+ memset(&p, 0, sizeof(p));
p.family = AF_INET6;
p.prefixlen = IPV6_MAX_BITLEN;
p.prefix = nexthop->gate.ipv6;
/* Find or create LSP object */
tmp_ile.in_label = zl->local_label;
lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
- if (!lsp)
- return -1;
}
/* Prep for route/FEC update if requested */
/* Find or create LSP object */
tmp_ile.in_label = in_label;
lsp = hash_get(lsp_table, &tmp_ile, lsp_alloc);
- if (!lsp)
- return -1;
nhlfe = lsp_add_nhlfe(lsp, type, num_out_labels, out_labels, gtype,
gate, ifindex, false /*backup*/);
/* Find or create LSP. */
tmp_ile.in_label = in_label;
lsp = hash_get(slsp_table, &tmp_ile, lsp_alloc);
- if (!lsp)
- return -1;
nhlfe = nhlfe_find(&lsp->nhlfe_list, ZEBRA_LSP_STATIC, gtype, gate,
ifindex);
return CMD_WARNING_CONFIG_FAILED;
}
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
ret = str2prefix(prefix, &p);
if (ret <= 0) {
vty_out(vty, "%% Malformed address\n");
int suc = -1;
memset(&mroute, 0, sizeof(mroute));
- STREAM_GET(&mroute.sg.src, msg, 4);
- STREAM_GET(&mroute.sg.grp, msg, 4);
- STREAM_GETL(msg, mroute.ifindex);
+ STREAM_GETL(msg, mroute.family);
- if (IS_ZEBRA_DEBUG_KERNEL) {
- char sbuf[40];
- char gbuf[40];
+ switch (mroute.family) {
+ case AF_INET:
+ SET_IPADDR_V4(&mroute.src);
+ SET_IPADDR_V4(&mroute.grp);
+ STREAM_GET(&mroute.src.ipaddr_v4, msg,
+ sizeof(mroute.src.ipaddr_v4));
+ STREAM_GET(&mroute.grp.ipaddr_v4, msg,
+ sizeof(mroute.grp.ipaddr_v4));
+ break;
+ case AF_INET6:
+ SET_IPADDR_V6(&mroute.src);
+ SET_IPADDR_V6(&mroute.grp);
+ STREAM_GET(&mroute.src.ipaddr_v6, msg,
+ sizeof(mroute.src.ipaddr_v6));
+ STREAM_GET(&mroute.grp.ipaddr_v6, msg,
+ sizeof(mroute.grp.ipaddr_v6));
+ break;
+ default:
+ zlog_warn("%s: Invalid address family received while parsing",
+ __func__);
+ return;
+ }
- inet_ntop(AF_INET, &mroute.sg.src, sbuf, sizeof(sbuf));
- inet_ntop(AF_INET, &mroute.sg.grp, gbuf, sizeof(gbuf));
+ STREAM_GETL(msg, mroute.ifindex);
- zlog_debug("Asking for (%s,%s)[%s(%u)] mroute information",
- sbuf, gbuf, zvrf->vrf->name, zvrf->vrf->vrf_id);
- }
+ if (IS_ZEBRA_DEBUG_KERNEL)
+ zlog_debug("Asking for (%pIA,%pIA)[%s(%u)] mroute information",
+ &mroute.src, &mroute.grp, zvrf->vrf->name,
+ zvrf->vrf->vrf_id);
suc = kernel_get_ipmr_sg_stats(zvrf, &mroute);
stream_reset(s);
zclient_create_header(s, ZEBRA_IPMR_ROUTE_STATS, zvrf_id(zvrf));
- stream_put_in_addr(s, &mroute.sg.src);
- stream_put_in_addr(s, &mroute.sg.grp);
+
+ if (mroute.family == AF_INET) {
+ stream_write(s, &mroute.src.ipaddr_v4,
+ sizeof(mroute.src.ipaddr_v4));
+ stream_write(s, &mroute.grp.ipaddr_v4,
+ sizeof(mroute.grp.ipaddr_v4));
+ } else {
+ stream_write(s, &mroute.src.ipaddr_v6,
+ sizeof(mroute.src.ipaddr_v6));
+ stream_write(s, &mroute.grp.ipaddr_v6,
+ sizeof(mroute.grp.ipaddr_v6));
+ }
+
stream_put(s, &mroute.lastused, sizeof(mroute.lastused));
stream_putl(s, (uint32_t)suc);
#endif
struct mcast_route_data {
- struct prefix_sg sg;
+ int family;
+ struct ipaddr src;
+ struct ipaddr grp;
unsigned int ifindex;
unsigned long long lastused;
};
struct zebra_vrf *zvrf;
vni_t vni = 0;
struct zebra_l3vni *zl3vni = NULL;
- struct zebra_vrf *zvrf_evpn = NULL;
char err[ERR_STR_SZ];
bool pfx_only = false;
const struct lyd_node *pn_dnode;
case NB_EV_ABORT:
return NB_OK;
case NB_EV_VALIDATE:
- zvrf_evpn = zebra_vrf_get_evpn();
- if (!zvrf_evpn) {
- snprintf(args->errmsg, args->errmsg_len,
- "evpn vrf is not present.");
- return NB_ERR_VALIDATION;
- }
vni = yang_dnode_get_uint32(args->dnode, NULL);
/* Get vrf info from parent node, reject configuration
* if zebra vrf already mapped to different vni id.
if (args->list_entry == NULL)
rn = route_top(zrt->table);
else
- rn = srcdest_route_next((struct route_node *)rn);
+ rn = srcdest_route_next(rn);
/* Optimization: skip empty route nodes. */
while (rn && rn->info == NULL)
rn = route_next(rn);
if (zebra_ns_notify_self_identify(&default_netns_stat))
return false;
- memset(&st, 0, sizeof(struct stat));
+ memset(&st, 0, sizeof(st));
snprintf(netnspath, sizeof(netnspath), "%s/%s", NS_RUN_DIR, name);
/* compare with local stat */
if (stat(netnspath, &st) == 0 &&
return -1;
}
- hash_get(zrouter.nhgs_id, nhe, hash_alloc_intern);
+ (void)hash_get(zrouter.nhgs_id, nhe, hash_alloc_intern);
return 0;
}
{
char buf[64];
- memset(&ptm_cb, 0, sizeof(struct zebra_ptm_cb));
+ memset(&ptm_cb, 0, sizeof(ptm_cb));
ptm_cb.out_data = calloc(1, ZEBRA_PTM_SEND_MAX_SOCKBUF);
if (!ptm_cb.out_data) {
}
/* Make server socket. */
- memset(&addr, 0, sizeof(struct sockaddr_un));
+ memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
memcpy(&addr.sun_path, ZEBRA_PTM_SOCK_NAME,
sizeof(ZEBRA_PTM_SOCK_NAME));
return -1;
}
- memset(&src_prefix, 0, sizeof(struct prefix));
+ memset(&src_prefix, 0, sizeof(src_prefix));
if (strcmp(ZEBRA_PTM_INVALID_SRC_IP, src_str)) {
if (str2prefix(src_str, &src_prefix) == 0) {
flog_err(EC_ZEBRA_PREFIX_PARSE_ERROR,
struct zebra_pw {
RB_ENTRY(zebra_pw) pw_entry, static_pw_entry;
vrf_id_t vrf_id;
- char ifname[IF_NAMESIZE];
+ char ifname[INTERFACE_NAMSIZ];
ifindex_t ifindex;
int type;
int af;
return 1;
}
-/**
- * copy_nexthop - copy a nexthop to the rib structure.
- */
-void route_entry_copy_nexthops(struct route_entry *re, struct nexthop *nh)
-{
- assert(!re->nhe->nhg.nexthop);
- copy_nexthops(&re->nhe->nhg.nexthop, nh, NULL);
-}
-
static void route_entry_attach_ref(struct route_entry *re,
struct nhg_hash_entry *new)
{
if (!table)
return 0;
- memset(&p, 0, sizeof(struct prefix));
+ memset(&p, 0, sizeof(p));
p.family = afi;
if (afi == AFI_IP) {
p.u.prefix4 = addr->ipv4;
assert(proto);
assert(rmap);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
assert(proto);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
assert(rmap);
assert(proto);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
assert(proto);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
assert(proto);
assert(rmap);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
assert(proto);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
assert(rmap);
assert(proto);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
assert(proto);
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
/*
* special macro to allow us to get the correct zebra_vrf
*/
-#define ZEBRA_DECLVAR_CONTEXT(A, B) \
- struct vrf *A; \
- if (vty->node == CONFIG_NODE) \
- A = vrf_lookup_by_id(VRF_DEFAULT); \
- else \
- A = VTY_GET_CONTEXT(vrf); \
- VTY_CHECK_CONTEXT(A); \
- struct zebra_vrf *B = A->info
+#define ZEBRA_DECLVAR_CONTEXT_VRF(vrfptr, zvrfptr) \
+ VTY_DECLVAR_CONTEXT_VRF(vrfptr); \
+ struct zebra_vrf *zvrfptr = vrfptr->info; \
+ MACRO_REQUIRE_SEMICOLON() /* end */
static inline vrf_id_t zvrf_id(struct zebra_vrf *zvrf)
{
"Filter Next Hop tracking route resolution\n"
"Resolve via default route\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
"Filter Next Hop tracking route resolution\n"
"Resolve via default route\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
"Filter Next Hop tracking route resolution\n"
"Resolve via default route\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
"Filter Next Hop tracking route resolution\n"
"Resolve via default route\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
}
}
-/*
- * Show IPv6 mroute command.Used to dump
- * the Multicast routing table.
- */
-DEFUN (show_ipv6_mroute,
- show_ipv6_mroute_cmd,
- "show ipv6 mroute [vrf NAME]",
- SHOW_STR
- IP_STR
- "IPv6 Multicast routing table\n"
- VRF_CMD_HELP_STR)
-{
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *re;
- int first = 1;
- vrf_id_t vrf_id = VRF_DEFAULT;
-
- if (argc == 5)
- VRF_GET_ID(vrf_id, argv[4]->arg, false);
-
- table = zebra_vrf_table(AFI_IP6, SAFI_MULTICAST, vrf_id);
- if (!table)
- return CMD_SUCCESS;
-
- /* Show all IPv6 route. */
- for (rn = route_top(table); rn; rn = srcdest_route_next(rn))
- RNODE_FOREACH_RE (rn, re) {
- if (first) {
- vty_out(vty, SHOW_ROUTE_V6_HEADER);
- first = 0;
- }
- vty_show_ip_route(vty, rn, re, NULL, false, false);
- }
- return CMD_SUCCESS;
-}
-
-DEFUN (show_ipv6_mroute_vrf_all,
- show_ipv6_mroute_vrf_all_cmd,
- "show ipv6 mroute vrf all",
- SHOW_STR
- IP_STR
- "IPv6 Multicast routing table\n"
- VRF_ALL_CMD_HELP_STR)
-{
- struct route_table *table;
- struct route_node *rn;
- struct route_entry *re;
- struct vrf *vrf;
- struct zebra_vrf *zvrf;
- int first = 1;
-
- RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
- if ((zvrf = vrf->info) == NULL
- || (table = zvrf->table[AFI_IP6][SAFI_MULTICAST]) == NULL)
- continue;
-
- /* Show all IPv6 route. */
- for (rn = route_top(table); rn; rn = srcdest_route_next(rn))
- RNODE_FOREACH_RE (rn, re) {
- if (first) {
- vty_out(vty, SHOW_ROUTE_V6_HEADER);
- first = 0;
- }
- vty_show_ip_route(vty, rn, re, NULL, false,
- false);
- }
- }
- return CMD_SUCCESS;
-}
-
DEFUN (allow_external_route_update,
allow_external_route_update_cmd,
"allow-external-route-update",
{
int filter = 0;
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
assert(vrf);
assert(zvrf);
{
int filter = 0;
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
vni_t vni = strtoul(argv[2]->arg, NULL, 10);
assert(vrf);
ipforward_ipv6() ? "On" : "Off");
ttable_add_row(table, "MPLS|%s", mpls_enabled ? "On" : "Off");
ttable_add_row(table, "EVPN|%s", is_evpn_enabled() ? "On" : "Off");
+ ttable_add_row(table, "Kernel socket buffer size|%d", rcvbufsize);
#ifdef GNU_LINUX
"Start Routing Table\n"
"End Routing Table\n")
{
- ZEBRA_DECLVAR_CONTEXT(vrf, zvrf);
+ ZEBRA_DECLVAR_CONTEXT_VRF(vrf, zvrf);
if (!zvrf)
return CMD_WARNING;
install_element(VRF_NODE, &no_ipv6_nht_default_route_cmd);
install_element(CONFIG_NODE, &rnh_hide_backups_cmd);
- install_element(VIEW_NODE, &show_ipv6_mroute_cmd);
-
- /* Commands for VRF */
- install_element(VIEW_NODE, &show_ipv6_mroute_vrf_all_cmd);
-
install_element(VIEW_NODE, &show_frr_cmd);
install_element(VIEW_NODE, &show_evpn_global_cmd);
install_element(VIEW_NODE, &show_evpn_vni_cmd);
* size, we try to be a bit more elegant in display by first computing
* the maximum width.
*/
- memset(&wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.addr_width = 15;
return;
}
- memset(&wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.addr_width = 15;
} else
json_object_int_add(json_evpn, "numNextHops", num_nh);
- memset(&wctx, 0, sizeof(struct nh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.vty = vty;
wctx.json = json_evpn;
hash_iterate(zl3vni->nh_table, zl3vni_print_nh_hash, &wctx);
* under the vni. Re-assign primary json object to fill
* next vni information.
*/
- memset(&wctx, 0, sizeof(struct rmac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.vty = vty;
wctx.json = json_evpn;
hash_iterate(zl3vni->rmac_table, zl3vni_print_rmac_hash, &wctx);
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return NS_WALK_STOP;
-
/* Walk VxLAN interfaces and create EVPN hash. */
for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) {
vni_t vni;
static void zevpn_build_hash_table(void)
{
- ns_walk_func(zevpn_build_hash_table_zns,
- (void *)NULL,
- (void **)NULL);
+ ns_walk_func(zevpn_build_hash_table_zns, NULL, NULL);
}
/*
struct zebra_mac tmp_rmac;
struct zebra_mac *zrmac = NULL;
- memset(&tmp_rmac, 0, sizeof(struct zebra_mac));
+ memset(&tmp_rmac, 0, sizeof(tmp_rmac));
memcpy(&tmp_rmac.macaddr, rmac, ETH_ALEN);
zrmac = hash_get(zl3vni->rmac_table, &tmp_rmac, zl3vni_rmac_alloc);
- assert(zrmac);
-
zrmac->nh_list = list_new();
zrmac->nh_list->cmp = (int (*)(void *, void *))l3vni_rmac_nh_list_cmp;
zrmac->nh_list->del = (void (*)(void *))l3vni_rmac_nh_free;
struct ipaddr ipv4_vtep;
if (!zl3vni_nh_lookup(zl3vni, vtep_ip)) {
- memset(&ipv4_vtep, 0, sizeof(struct ipaddr));
+ memset(&ipv4_vtep, 0, sizeof(ipv4_vtep));
ipv4_vtep.ipa_type = IPADDR_V4;
if (vtep_ip->ipa_type == IPADDR_V6)
ipv4_mapped_ipv6_to_ipv4(&vtep_ip->ipaddr_v6,
struct zebra_neigh tmp_n;
struct zebra_neigh *n = NULL;
- memset(&tmp_n, 0, sizeof(struct zebra_neigh));
+ memset(&tmp_n, 0, sizeof(tmp_n));
memcpy(&tmp_n.ip, ip, sizeof(struct ipaddr));
n = hash_get(zl3vni->nh_table, &tmp_n, zl3vni_nh_alloc);
- assert(n);
RB_INIT(host_rb_tree_entry, &n->host_rb);
struct zebra_l3vni tmp_l3vni;
struct zebra_l3vni *zl3vni = NULL;
- memset(&tmp_l3vni, 0, sizeof(struct zebra_l3vni));
+ memset(&tmp_l3vni, 0, sizeof(tmp_l3vni));
tmp_l3vni.vni = vni;
zl3vni = hash_lookup(zrouter.l3vni_table, &tmp_l3vni);
struct zebra_l3vni tmp_zl3vni;
struct zebra_l3vni *zl3vni = NULL;
- memset(&tmp_zl3vni, 0, sizeof(struct zebra_l3vni));
+ memset(&tmp_zl3vni, 0, sizeof(tmp_zl3vni));
tmp_zl3vni.vni = vni;
zl3vni = hash_get(zrouter.l3vni_table, &tmp_zl3vni, zl3vni_alloc);
- assert(zl3vni);
zl3vni->vrf_id = vrf_id;
zl3vni->svi_if = NULL;
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return NS_WALK_STOP;
-
assert(_pifp);
/* loop through all vxlan-interface */
assert(zvrf);
/* get the svi and vrr rmac values */
- memset(&svi_rmac, 0, sizeof(struct ethaddr));
+ memset(&svi_rmac, 0, sizeof(svi_rmac));
zl3vni_get_svi_rmac(zl3vni, &svi_rmac);
zl3vni_get_vrr_rmac(zl3vni, &vrr_rmac);
zebra_evpn_mac_del_all(zevpn, 0, 0, DEL_ALL_MAC);
/* Free up all remote VTEPs, if any. */
- zebra_evpn_vtep_del_all(zevpn, 0);
+ zebra_evpn_vtep_del_all(zevpn, 1);
/* Delete the hash entry. */
if (zebra_evpn_vxlan_del(zevpn)) {
* address. Rmac is programmed against the ipv4 vtep because we only
* support ipv4 tunnels in the h/w right now
*/
- memset(&ipv4_vtep, 0, sizeof(struct ipaddr));
+ memset(&ipv4_vtep, 0, sizeof(ipv4_vtep));
ipv4_vtep.ipa_type = IPADDR_V4;
if (vtep_ip->ipa_type == IPADDR_V6)
ipv4_mapped_ipv6_to_ipv4(&vtep_ip->ipaddr_v6,
if (use_json)
json = json_object_new_object();
- memset(&wctx, 0, sizeof(struct rmac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.vty = vty;
wctx.json = json;
if (!use_json) {
* size, we try to be a bit more elegant in display by first computing
* the maximum width.
*/
- memset(&wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.addr_width = 15;
if (use_json)
json = json_object_new_object();
- memset(&wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.addr_width = 15;
* size, we try to be a bit more elegant in display by first computing
* the maximum width.
*/
- memset(&wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.addr_width = 15;
json_mac = json_object_new_object();
}
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.json = json_mac;
if (use_json)
json = json_object_new_object();
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.vty = vty;
wctx.json = json;
wctx.print_dup = print_dup;
if (use_json)
json = json_object_new_object();
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.vty = vty;
wctx.json = json;
wctx.print_dup = print_dup;
if (use_json)
json = json_object_new_object();
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.vty = vty;
wctx.flags = SHOW_REMOTE_MAC_FROM_VTEP;
wctx.r_vtep_ip = vtep_ip;
json_mac = json_object_new_object();
}
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.json = json_mac;
zvrf = (struct zebra_vrf *)args[0];
if (hashcount(zevpn->neigh_table)) {
- memset(&n_wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&n_wctx, 0, sizeof(n_wctx));
n_wctx.zevpn = zevpn;
n_wctx.zvrf = zvrf;
hash_iterate(zevpn->neigh_table,
}
if (num_valid_macs(zevpn)) {
- memset(&m_wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&m_wctx, 0, sizeof(m_wctx));
m_wctx.zevpn = zevpn;
m_wctx.zvrf = zvrf;
hash_iterate(zevpn->mac_table, zevpn_clear_dup_mac_hash, &m_wctx);
}
if (hashcount(zevpn->neigh_table)) {
- memset(&n_wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&n_wctx, 0, sizeof(n_wctx));
n_wctx.zevpn = zevpn;
n_wctx.zvrf = zvrf;
hash_iterate(zevpn->neigh_table,
}
if (num_valid_macs(zevpn)) {
- memset(&m_wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&m_wctx, 0, sizeof(m_wctx));
m_wctx.zevpn = zevpn;
m_wctx.zvrf = zvrf;
hash_iterate(zevpn->mac_table, zevpn_clear_dup_mac_hash, &m_wctx);
json_mac = json_object_new_object();
}
- memset(&wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&wctx, 0, sizeof(wctx));
wctx.zevpn = zevpn;
wctx.vty = vty;
wctx.flags = SHOW_REMOTE_MAC_FROM_VTEP;
return;
zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return;
num_l3vnis = hashcount(zrouter.l3vni_table);
num_l2vnis = hashcount(zvrf->evpn_table);
}
zvrf = zebra_vrf_get_evpn();
- if (!zvrf) {
- if (IS_ZEBRA_DEBUG_VXLAN)
- zlog_debug(" No Evpn Global Vrf found");
- return -1;
- }
-
return zebra_evpn_add_update_local_mac(zvrf, zevpn, ifp, macaddr, vid,
sticky, local_inactive,
dp_static, NULL);
struct ethaddr macaddr;
struct zebra_evpn *zevpn = NULL;
- memset(&ip, 0, sizeof(struct ipaddr));
- memset(&macaddr, 0, sizeof(struct ethaddr));
+ memset(&ip, 0, sizeof(ip));
+ memset(&macaddr, 0, sizeof(macaddr));
/* Check if EVPN is enabled. */
if (!is_evpn_enabled())
zebra_evpn_send_add_to_client(zevpn);
/* Install any remote neighbors for this VNI. */
- memset(&n_wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&n_wctx, 0, sizeof(n_wctx));
n_wctx.zevpn = zevpn;
hash_iterate(zevpn->neigh_table, zebra_evpn_install_neigh_hash,
&n_wctx);
zebra_evpn_read_mac_neigh(zevpn, ifp);
- memset(&m_wctx, 0, sizeof(struct mac_walk_ctx));
+ memset(&m_wctx, 0, sizeof(m_wctx));
m_wctx.zevpn = zevpn;
hash_iterate(zevpn->mac_table,
zebra_evpn_install_mac_hash, &m_wctx);
- memset(&n_wctx, 0, sizeof(struct neigh_walk_ctx));
+ memset(&n_wctx, 0, sizeof(n_wctx));
n_wctx.zevpn = zevpn;
hash_iterate(zevpn->neigh_table,
zebra_evpn_install_neigh_hash, &n_wctx);
struct zebra_vrf *zvrf_evpn = NULL;
zvrf_evpn = zebra_vrf_get_evpn();
- if (!zvrf_evpn)
- return -1;
if (IS_ZEBRA_DEBUG_VXLAN)
zlog_debug("vrf %s vni %u %s", zvrf_name(zvrf), vni,
/* add the L3-VNI to the global table */
zl3vni = zl3vni_add(vni, zvrf_id(zvrf));
- if (!zl3vni) {
- snprintf(err, err_str_sz, "Could not add L3-VNI");
- return -1;
- }
/* associate the vrf with vni */
zvrf->l3vni = vni;
{
struct zebra_vrf *evpn_zvrf = zebra_vrf_get_evpn();
- if (!zvrf)
- return;
hash_iterate(zvrf->evpn_table, zebra_evpn_vxlan_cleanup_all, zvrf);
zebra_vxlan_cleanup_sg_table(zvrf);
}
vxlan_sg = zebra_vxlan_sg_new(zvrf, sg);
- if (!vxlan_sg) {
- if (parent)
- zebra_vxlan_sg_do_deref(zvrf, sip, sg->grp);
- return vxlan_sg;
- }
zebra_vxlan_sg_send(zvrf, sg, vxlan_sg->sg_str,
ZEBRA_VXLAN_SG_ADD);
{
struct zebra_vrf *zvrf = zebra_vrf_get_evpn();
- if (zvrf && CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) {
+ if (CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) {
if (IS_ZEBRA_DEBUG_VXLAN)
zlog_debug("VxLAN SG updates to PIM, stop");
UNSET_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG);
#define EVPN_ENABLED(zvrf) (zvrf)->advertise_all_vni
static inline int is_evpn_enabled(void)
{
- struct zebra_vrf *zvrf = NULL;
- zvrf = zebra_vrf_get_evpn();
- return zvrf ? EVPN_ENABLED(zvrf) : 0;
+ return EVPN_ENABLED(zebra_vrf_get_evpn());
}
static inline int
is_vxlan_flooding_head_end(void)
{
struct zebra_vrf *zvrf = zebra_vrf_get_evpn();
-
- if (!zvrf)
- return 0;
return (zvrf->vxlan_flood_ctrl == VXLAN_FLOOD_HEAD_END_REPL);
}