#include "filter.h"
#include "command.h"
#include "srv6.h"
+#include "frrstr.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_attr.h"
/* Atomic aggregate. */
static int bgp_attr_atomic(struct bgp_attr_parser_args *args)
{
+ struct peer *const peer = args->peer;
struct attr *const attr = args->attr;
const bgp_size_t length = args->length;
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto atomic_ignore;
+
/* Set atomic aggregate flag. */
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ATOMIC_AGGREGATE);
return BGP_ATTR_PARSE_PROCEED;
+
+atomic_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* Aggregator attribute */
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto aggregator_ignore;
+
if (CHECK_FLAG(peer->cap, PEER_CAP_AS4_RCV))
aggregator_as = stream_getl(peer->curr);
else
}
return BGP_ATTR_PARSE_PROCEED;
+
+aggregator_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* New Aggregator attribute */
0);
}
+ if (peer->discard_attrs[args->type])
+ goto as4_aggregator_ignore;
+
aggregator_as = stream_getl(peer->curr);
*as4_aggregator_as = aggregator_as;
}
return BGP_ATTR_PARSE_PROCEED;
+
+as4_aggregator_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* Munge Aggregator and New-Aggregator, AS_PATH and NEW_AS_PATH.
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto community_ignore;
+
bgp_attr_set_community(
attr,
community_parse((uint32_t *)stream_pnt(peer->curr), length));
args->total);
return BGP_ATTR_PARSE_PROCEED;
+
+community_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* Originator ID attribute. */
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto originator_id_ignore;
+
attr->originator_id.s_addr = stream_get_ipv4(peer->curr);
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_ORIGINATOR_ID);
return BGP_ATTR_PARSE_PROCEED;
+
+originator_id_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* Cluster list attribute. */
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto cluster_list_ignore;
+
bgp_attr_set_cluster(
attr, cluster_parse((struct in_addr *)stream_pnt(peer->curr),
length));
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST);
return BGP_ATTR_PARSE_PROCEED;
+
+cluster_list_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* Multiprotocol reachability information parse. */
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto large_community_ignore;
+
bgp_attr_set_lcommunity(
attr, lcommunity_parse(stream_pnt(peer->curr), length));
/* XXX: fix ecommunity_parse to use stream API */
args->total);
return BGP_ATTR_PARSE_PROCEED;
+
+large_community_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* Extended Community attribute. */
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto ipv6_ext_community_ignore;
+
ipv6_ecomm = ecommunity_parse_ipv6(
stream_pnt(peer->curr), length,
CHECK_FLAG(peer->flags,
args->total);
return BGP_ATTR_PARSE_PROCEED;
+
+ipv6_ext_community_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* Parse Tunnel Encap attribute in an UPDATE */
goto aigp_ignore;
}
+ if (peer->discard_attrs[args->type])
+ goto aigp_ignore;
+
if (!bgp_attr_aigp_valid(s, length))
goto aigp_ignore;
aigp_ignore:
stream_forward_getp(peer->curr, length);
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
return BGP_ATTR_PARSE_PROCEED;
}
args->total);
}
+ if (peer->discard_attrs[args->type])
+ goto otc_ignore;
+
attr->otc = stream_getl(peer->curr);
if (!attr->otc) {
flog_err(EC_BGP_ATTR_MAL_AS_PATH, "OTC attribute value is 0");
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_OTC);
return BGP_ATTR_PARSE_PROCEED;
+
+otc_ignore:
+ stream_forward_getp(peer->curr, length);
+
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
}
/* BGP unknown attribute treatment. */
/* Forward read pointer of input stream. */
stream_forward_getp(peer->curr, length);
+ if (peer->discard_attrs[type]) {
+ if (bgp_debug_update(peer, NULL, NULL, 1))
+ zlog_debug("%pBP: Ignoring attribute %s", peer,
+ lookup_msg(attr_str, args->type, NULL));
+
+ return BGP_ATTR_PARSE_PROCEED;
+ }
+
/* If any of the mandatory well-known attributes are not recognized,
then the Error Subcode is set to Unrecognized Well-known
Attribute. The Data field contains the unrecognized attribute
len = stream_get_endp(s) - cp - 2;
stream_putw_at(s, cp, len);
}
+
+void bgp_path_attribute_discard_vty(struct vty *vty, struct peer *peer,
+ const char *discard_attrs)
+{
+ int i, num_attributes;
+ char **attributes;
+ afi_t afi;
+ safi_t safi;
+
+ if (discard_attrs) {
+ frrstr_split(discard_attrs, " ", &attributes, &num_attributes);
+
+ for (i = 0; i < BGP_ATTR_MAX; i++)
+ peer->discard_attrs[i] = false;
+
+ for (i = 0; i < num_attributes; i++) {
+ uint8_t attr_num = strtoul(attributes[i], NULL, 10);
+
+ XFREE(MTYPE_TMP, attributes[i]);
+
+ /* Some of the attributes, just can't be ignored. */
+ if (attr_num == BGP_ATTR_ORIGIN ||
+ attr_num == BGP_ATTR_AS_PATH ||
+ attr_num == BGP_ATTR_NEXT_HOP ||
+ attr_num == BGP_ATTR_MULTI_EXIT_DISC ||
+ attr_num == BGP_ATTR_MP_REACH_NLRI ||
+ attr_num == BGP_ATTR_MP_UNREACH_NLRI ||
+ attr_num == BGP_ATTR_EXT_COMMUNITIES) {
+ vty_out(vty,
+ "%% Can't discard path-attribute %s, ignoring.\n",
+ lookup_msg(attr_str, attr_num, NULL));
+ continue;
+ }
+
+ /* Ignore local-pref, originator-id, cluster-list only
+ * for eBGP.
+ */
+ if (peer->sort != BGP_PEER_EBGP &&
+ (attr_num == BGP_ATTR_LOCAL_PREF ||
+ attr_num == BGP_ATTR_ORIGINATOR_ID ||
+ attr_num == BGP_ATTR_CLUSTER_LIST)) {
+ vty_out(vty,
+ "%% Can discard path-attribute %s only for eBGP, ignoring.\n",
+ lookup_msg(attr_str, attr_num, NULL));
+ continue;
+ }
+
+ peer->discard_attrs[attr_num] = true;
+ }
+ XFREE(MTYPE_TMP, attributes);
+
+ /* Configuring path attributes to be discarded will trigger
+ * an inbound Route Refresh to ensure that the routing table
+ * is up to date.
+ */
+ FOREACH_AFI_SAFI (afi, safi)
+ peer_clear_soft(peer, afi, safi, BGP_CLEAR_SOFT_IN);
+ }
+}
extern void attr_show_all(struct vty *vty);
extern unsigned long int attr_count(void);
extern unsigned long int attr_unknown_count(void);
+extern void bgp_path_attribute_discard_vty(struct vty *vty, struct peer *peer,
+ const char *discard_attrs);
/* Cluster list prototypes. */
extern bool cluster_loop_check(struct cluster_list *cluster,
{BGP_NOTIFY_OPEN_BAD_PEER_AS, "/Bad Peer AS"},
{BGP_NOTIFY_OPEN_BAD_BGP_IDENT, "/Bad BGP Identifier"},
{BGP_NOTIFY_OPEN_UNSUP_PARAM, "/Unsupported Optional Parameter"},
- {BGP_NOTIFY_OPEN_AUTH_FAILURE, "/Authentication Failure"},
{BGP_NOTIFY_OPEN_UNACEP_HOLDTIME, "/Unacceptable Hold Time"},
{BGP_NOTIFY_OPEN_UNSUP_CAPBL, "/Unsupported Capability"},
{BGP_NOTIFY_OPEN_ROLE_MISMATCH, "/Role Mismatch"},
{BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR, "/Attribute Flags Error"},
{BGP_NOTIFY_UPDATE_ATTR_LENG_ERR, "/Attribute Length Error"},
{BGP_NOTIFY_UPDATE_INVAL_ORIGIN, "/Invalid ORIGIN Attribute"},
- {BGP_NOTIFY_UPDATE_AS_ROUTE_LOOP, "/AS Routing Loop"},
{BGP_NOTIFY_UPDATE_INVAL_NEXT_HOP, "/Invalid NEXT_HOP Attribute"},
{BGP_NOTIFY_UPDATE_OPT_ATTR_ERR, "/Optional Attribute Error"},
{BGP_NOTIFY_UPDATE_INVAL_NETWORK, "/Invalid Network Field"},
return 0;
}
-static int bgp_auth_parse(struct peer *peer, size_t length)
-{
- bgp_notify_send(peer, BGP_NOTIFY_OPEN_ERR,
- BGP_NOTIFY_OPEN_AUTH_FAILURE);
- return -1;
-}
-
static bool strict_capability_same(struct peer *peer)
{
int i, j;
zlog_debug(
"%s rcvd OPEN w/ optional parameter type %u (%s) len %u",
peer->host, opt_type,
- opt_type == BGP_OPEN_OPT_AUTH
- ? "Authentication"
- : opt_type == BGP_OPEN_OPT_CAP
- ? "Capability"
- : "Unknown",
+ opt_type == BGP_OPEN_OPT_CAP ? "Capability"
+ : "Unknown",
opt_length);
switch (opt_type) {
- case BGP_OPEN_OPT_AUTH:
- ret = bgp_auth_parse(peer, opt_length);
- break;
case BGP_OPEN_OPT_CAP:
ret = bgp_capability_parse(peer, opt_length,
mp_capability, &error);
}
/* AS path loop check. */
- if (onlypeer && onlypeer->as_path_loop_detection
- && aspath_loop_check(piattr->aspath, onlypeer->as)) {
+ if (peer->as_path_loop_detection &&
+ aspath_loop_check(piattr->aspath, peer->as)) {
if (bgp_debug_update(NULL, p, subgrp->update_group, 0))
zlog_debug(
"%pBP [Update:SEND] suppress announcement to peer AS %u that is part of AS path.",
- onlypeer, onlypeer->as);
+ peer, peer->as);
return false;
}
dst->change_local_as = src->change_local_as;
dst->shared_network = src->shared_network;
dst->local_role = src->local_role;
+ dst->as_path_loop_detection = src->as_path_loop_detection;
if (src->soo[afi][safi]) {
ecommunity_free(&dst->soo[afi][safi]);
key = jhash_1word(peer->max_packet_size, key);
key = jhash_1word(peer->pmax_out[afi][safi], key);
+ if (peer->as_path_loop_detection)
+ key = jhash_2words(peer->as, peer->as_path_loop_detection, key);
+
if (peer->group)
key = jhash_1word(jhash(peer->group->name,
strlen(peer->group->name), SEED1),
(intmax_t)CHECK_FLAG(peer->flags, PEER_UPDGRP_FLAGS),
(intmax_t)CHECK_FLAG(flags, PEER_UPDGRP_AF_FLAGS));
zlog_debug(
- "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u",
+ "%pBP Update Group Hash: addpath: %u UpdGrpCapFlag: %u UpdGrpCapAFFlag: %u route_adv: %u change local as: %u, as_path_loop_detection: %d",
peer, (uint32_t)peer->addpath_type[afi][safi],
CHECK_FLAG(peer->cap, PEER_UPDGRP_CAP_FLAGS),
CHECK_FLAG(peer->af_cap[afi][safi],
PEER_UPDGRP_AF_CAP_FLAGS),
- peer->v_routeadv, peer->change_local_as);
+ peer->v_routeadv, peer->change_local_as,
+ peer->as_path_loop_detection);
zlog_debug(
"%pBP Update Group Hash: max packet size: %u pmax_out: %u Peer Group: %s rmap out: %s",
peer, peer->max_packet_size, peer->pmax_out[afi][safi],
return CMD_SUCCESS;
}
+DEFPY(neighbor_path_attribute_discard,
+ neighbor_path_attribute_discard_cmd,
+ "neighbor <A.B.C.D|X:X::X:X|WORD>$neighbor path-attribute discard (1-255)...",
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Manipulate path attributes from incoming UPDATE messages\n"
+ "Drop specified attributes from incoming UPDATE messages\n"
+ "Attribute number\n")
+{
+ struct peer *peer;
+ int idx = 0;
+ const char *discard_attrs = NULL;
+
+ peer = peer_and_group_lookup_vty(vty, neighbor);
+ if (!peer)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ argv_find(argv, argc, "(1-255)", &idx);
+ if (idx)
+ discard_attrs = argv_concat(argv, argc, idx);
+
+ bgp_path_attribute_discard_vty(vty, peer, discard_attrs);
+
+ return CMD_SUCCESS;
+}
+
static int set_ecom_list(struct vty *vty, int argc, struct cmd_token **argv,
struct ecommunity **list, bool is_rt6)
{
vty_out(vty, " neighbor %s sender-as-path-loop-detection\n",
addr);
+ /* path-attribute discard */
+ char discard_attrs_str[BUFSIZ] = {0};
+ bool discard_attrs = bgp_path_attribute_discard(
+ peer, discard_attrs_str, sizeof(discard_attrs_str));
+
+ if (discard_attrs)
+ vty_out(vty, " neighbor %s path-attribute discard %s\n", addr,
+ discard_attrs_str);
+
if (!CHECK_FLAG(peer->peer_gr_new_status_flag,
PEER_GRACEFUL_RESTART_NEW_STATE_INHERIT)) {
install_element(BGP_NODE, &neighbor_aspath_loop_detection_cmd);
install_element(BGP_NODE, &no_neighbor_aspath_loop_detection_cmd);
+ /* "neighbor path-attribute discard" commands. */
+ install_element(BGP_NODE, &neighbor_path_attribute_discard_cmd);
+
/* "neighbor passive" commands. */
install_element(BGP_NODE, &neighbor_passive_cmd);
install_element(BGP_NODE, &no_neighbor_passive_cmd);
if (IS_BGP_INST_KNOWN_TO_ZEBRA(bgp)) {
if (BGP_DEBUG(zebra, ZEBRA))
zlog_debug("%s: Registering BGP instance %s to zebra",
- __func__, name);
+ __func__, bgp->name_pretty);
bgp_zebra_instance_register(bgp);
}
peer->group->name, dncount);
}
+bool bgp_path_attribute_discard(struct peer *peer, char *buf, size_t size)
+{
+ if (!buf)
+ return false;
+
+ buf[0] = '\0';
+
+ for (unsigned int i = 0; i < BGP_ATTR_MAX; i++) {
+ if (peer->discard_attrs[i])
+ snprintf(buf + strlen(buf), size - strlen(buf), "%s%d",
+ (strlen(buf) > 0) ? " " : "", i);
+ }
+
+ if (strlen(buf) > 0)
+ return true;
+
+ return false;
+}
+
/* If peer is configured at least one address family return 1. */
bool peer_active(struct peer *peer)
{
bool shut_during_cfg;
+#define BGP_ATTR_MAX 255
+ /* Path attributes discard */
+ bool discard_attrs[BGP_ATTR_MAX];
+
QOBJ_FIELDS;
};
DECLARE_QOBJ_TYPE(peer);
#define BGP_MSG_ROUTE_REFRESH_OLD 128
/* BGP open optional parameter. */
-#define BGP_OPEN_OPT_AUTH 1
#define BGP_OPEN_OPT_CAP 2
/* BGP4 attribute type codes. */
#define BGP_NOTIFY_OPEN_BAD_PEER_AS 2
#define BGP_NOTIFY_OPEN_BAD_BGP_IDENT 3
#define BGP_NOTIFY_OPEN_UNSUP_PARAM 4
-#define BGP_NOTIFY_OPEN_AUTH_FAILURE 5
#define BGP_NOTIFY_OPEN_UNACEP_HOLDTIME 6
#define BGP_NOTIFY_OPEN_UNSUP_CAPBL 7
#define BGP_NOTIFY_OPEN_ROLE_MISMATCH 11
#define BGP_NOTIFY_UPDATE_ATTR_FLAG_ERR 4
#define BGP_NOTIFY_UPDATE_ATTR_LENG_ERR 5
#define BGP_NOTIFY_UPDATE_INVAL_ORIGIN 6
-#define BGP_NOTIFY_UPDATE_AS_ROUTE_LOOP 7
#define BGP_NOTIFY_UPDATE_INVAL_NEXT_HOP 8
#define BGP_NOTIFY_UPDATE_OPT_ATTR_ERR 9
#define BGP_NOTIFY_UPDATE_INVAL_NETWORK 10
safi_t safi);
extern void peer_on_policy_change(struct peer *peer, afi_t afi, safi_t safi,
int outbound);
+extern bool bgp_path_attribute_discard(struct peer *peer, char *buf,
+ size_t size);
#ifdef _FRR_ATTRIBUTE_PRINTFRR
/* clang-format off */
#pragma FRR printfrr_ext "%pBP" (struct peer *)
that interface.
+.. _bfd-static-peer-config:
+
+BFD Static Route Monitoring Configuration
+-----------------------------------------
+
+A monitored static route conditions the installation to the RIB on the
+BFD session running state: when BFD session is up the route is installed
+to RIB, but when the BFD session is down it is removed from the RIB.
+
+The following commands are available inside the configuration node:
+
+.. clicmd:: ip route A.B.C.D/M A.B.C.D bfd [{multi-hop|source A.B.C.D|profile BFDPROF}]
+
+ Configure a static route for ``A.B.C.D/M`` using gateway ``A.B.C.D`` and use
+ the gateway address as BFD peer destination address.
+
+.. clicmd:: ipv6 route X:X::X:X/M [from X:X::X:X/M] X:X::X:X bfd [{multi-hop|source X:X::X:X|profile BFDPROF}]
+
+ Configure a static route for ``X:X::X:X/M`` using gateway
+ ``X:X::X:X`` and use the gateway address as BFD peer destination
+ address.
+
+The static routes when uninstalled will no longer show up in the output of
+the command ``show ip route`` or ``show ipv6 route``, instead we must use the
+BFD static route show command to see these monitored route status.
+
+.. clicmd:: show bfd static route [json]
+
+ Show all monitored static routes and their status.
+
+ Example output:
+
+ ::
+
+ Showing BFD monitored static routes:
+
+ Route groups:
+ rtg1 peer 172.16.0.1 (status: uninstalled):
+ 2001:db8::100/128
+
+ Next hops:
+ VRF default IPv4 Unicast:
+ 192.168.100.0/24 peer 172.16.0.1 (status: uninstalled)
+
+ VRF default IPv4 Multicast:
+
+ VRF default IPv6 Unicast:
+
.. _bfd-configuration:
Configuration
Default: disabled.
+.. clicmd:: neighbor <A.B.C.D|X:X::X:X|WORD> path-attribute discard (1-255)...
+
+ Drops specified path attributes from BGP UPDATE messages from the specified neighbor.
+
+ If you do not want specific attributes, you can drop them using this command, and
+ let the BGP proceed by ignoring those attributes.
+
.. clicmd:: neighbor <A.B.C.D|X:X::X:X|WORD> graceful-shutdown
Mark all routes from this neighbor as less preferred by setting ``graceful-shutdown``
#include "stream.h"
#include "vrf.h"
#include "zclient.h"
+#include "libfrr.h"
#include "table.h"
#include "vty.h"
#include "bfd.h"
DEFINE_MTYPE_STATIC(LIB, BFD_INFO, "BFD info");
+DEFINE_MTYPE_STATIC(LIB, BFD_SOURCE, "BFD source cache");
/**
* BFD protocol integration configuration.
BSE_INSTALL,
};
+/**
+ * BFD source selection result cache.
+ *
+ * This structure will keep track of the result based on the destination
+ * prefix. When the result changes all related BFD sessions with automatic
+ * source will be updated.
+ */
+struct bfd_source_cache {
+ /** Address VRF belongs. */
+ vrf_id_t vrf_id;
+ /** Destination network address. */
+ struct prefix address;
+ /** Source selected. */
+ struct prefix source;
+ /** Is the source address valid? */
+ bool valid;
+ /** BFD sessions using this. */
+ size_t refcount;
+
+ SLIST_ENTRY(bfd_source_cache) entry;
+};
+SLIST_HEAD(bfd_source_list, bfd_source_cache);
+
/**
* Data structure to do the necessary tricks to hide the BFD protocol
* integration internals.
/** BFD session installation state. */
bool installed;
+ /** Automatic source selection. */
+ bool auto_source;
+ /** Currently selected source. */
+ struct bfd_source_cache *source_cache;
+
/** Global BFD paramaters list. */
TAILQ_ENTRY(bfd_session_params) entry;
};
* without code duplication among daemons.
*/
TAILQ_HEAD(bsplist, bfd_session_params) bsplist;
+ /** BFD automatic source selection cache. */
+ struct bfd_source_list source_list;
/** Pointer to FRR's event manager. */
struct thread_master *tm;
/** Pointer to zebra client data structure. */
struct zclient *zc;
+ /** Zebra next hop tracking (NHT) client. */
+ struct zclient *nht_zclient;
/** Debugging state. */
bool debugging;
/** Global empty address for IPv4/IPv6. */
static const struct in6_addr i6a_zero;
+/*
+ * Prototypes
+ */
+static void bfd_nht_zclient_connect(struct thread *thread);
+
+static void bfd_nht_zclient_connected(struct zclient *zclient);
+static int bfd_nht_update(ZAPI_CALLBACK_ARGS);
+
+static void bfd_source_cache_get(struct bfd_session_params *session);
+static void bfd_source_cache_put(struct bfd_session_params *session);
+
+static inline void
+bfd_source_cache_register(const struct bfd_source_cache *source)
+{
+ zclient_send_rnh(bsglobal.nht_zclient, ZEBRA_NEXTHOP_REGISTER,
+ &source->address, SAFI_UNICAST, false, false,
+ source->vrf_id);
+}
+
+static inline void
+bfd_source_cache_unregister(const struct bfd_source_cache *source)
+{
+ zclient_send_rnh(bsglobal.nht_zclient, ZEBRA_NEXTHOP_UNREGISTER,
+ &source->address, SAFI_UNICAST, false, false,
+ source->vrf_id);
+}
+
+
/*
* bfd_get_peer_info - Extract the Peer information for which the BFD session
* went down from the message sent from Zebra to clients.
/* Remove from global list. */
TAILQ_REMOVE(&bsglobal.bsplist, (*bsp), entry);
+ bfd_source_cache_put(*bsp);
+
/* Free the memory and point to NULL. */
XFREE(MTYPE_BFD_INFO, (*bsp));
}
/* If already installed, remove the old setting. */
_bfd_sess_remove(bsp);
+ /* Address changed so we must reapply auto source. */
+ bfd_source_cache_put(bsp);
bsp->args.family = AF_INET;
assert(dst);
memcpy(&bsp->args.dst, dst, sizeof(struct in_addr));
+
+ if (bsp->auto_source)
+ bfd_source_cache_get(bsp);
}
void bfd_sess_set_ipv6_addrs(struct bfd_session_params *bsp,
/* If already installed, remove the old setting. */
_bfd_sess_remove(bsp);
+ /* Address changed so we must reapply auto source. */
+ bfd_source_cache_put(bsp);
bsp->args.family = AF_INET6;
assert(dst);
bsp->args.dst = *dst;
+
+ if (bsp->auto_source)
+ bfd_source_cache_get(bsp);
}
void bfd_sess_set_interface(struct bfd_session_params *bsp, const char *ifname)
/* If already installed, remove the old setting. */
_bfd_sess_remove(bsp);
+ /* Address changed so we must reapply auto source. */
+ bfd_source_cache_put(bsp);
bsp->args.vrf_id = vrf_id;
+
+ if (bsp->auto_source)
+ bfd_source_cache_get(bsp);
}
void bfd_sess_set_hop_count(struct bfd_session_params *bsp, uint8_t hops)
bsp->args.min_tx = min_tx;
}
+void bfd_sess_set_auto_source(struct bfd_session_params *bsp, bool enable)
+{
+ if (bsp->auto_source == enable)
+ return;
+
+ bsp->auto_source = enable;
+ if (enable)
+ bfd_source_cache_get(bsp);
+ else
+ bfd_source_cache_put(bsp);
+}
+
void bfd_sess_install(struct bfd_session_params *bsp)
{
bsp->lastev = BSE_INSTALL;
*min_tx = bsp->args.min_tx;
}
+bool bfd_sess_auto_source(const struct bfd_session_params *bsp)
+{
+ return bsp->auto_source;
+}
+
void bfd_sess_show(struct vty *vty, struct json_object *json,
struct bfd_session_params *bsp)
{
return 0;
}
+/**
+ * Frees all allocated resources and stops any activity.
+ *
+ * Must be called after every BFD session has been successfully
+ * unconfigured otherwise this function will `free()` any available
+ * session causing existing pointers to dangle.
+ *
+ * This is just a comment, in practice it will be called by the FRR
+ * library late finish hook. \see `bfd_protocol_integration_init`.
+ */
+static int bfd_protocol_integration_finish(void)
+{
+ if (bsglobal.zc == NULL)
+ return 0;
+
+ while (!TAILQ_EMPTY(&bsglobal.bsplist)) {
+ struct bfd_session_params *session =
+ TAILQ_FIRST(&bsglobal.bsplist);
+ bfd_sess_free(&session);
+ }
+
+ /*
+ * BFD source cache is linked to sessions, if all sessions are gone
+ * then the source cache must be empty.
+ */
+ if (!SLIST_EMPTY(&bsglobal.source_list))
+ zlog_warn("BFD integration source cache not empty");
+
+ zclient_stop(bsglobal.nht_zclient);
+ zclient_free(bsglobal.nht_zclient);
+
+ return 0;
+}
+
+static zclient_handler *const bfd_nht_handlers[] = {
+ [ZEBRA_NEXTHOP_UPDATE] = bfd_nht_update,
+};
+
void bfd_protocol_integration_init(struct zclient *zc, struct thread_master *tm)
{
+ struct zclient_options bfd_nht_options = zclient_options_default;
+
/* Initialize data structure. */
TAILQ_INIT(&bsglobal.bsplist);
+ SLIST_INIT(&bsglobal.source_list);
/* Copy pointers. */
bsglobal.zc = zc;
/* Send the client registration */
bfd_client_sendmsg(zc, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
+
+ /* Start NHT client (for automatic source decisions). */
+ bsglobal.nht_zclient =
+ zclient_new(tm, &bfd_nht_options, bfd_nht_handlers,
+ array_size(bfd_nht_handlers));
+ bsglobal.nht_zclient->sock = -1;
+ bsglobal.nht_zclient->privs = zc->privs;
+ bsglobal.nht_zclient->zebra_connected = bfd_nht_zclient_connected;
+ thread_add_timer(tm, bfd_nht_zclient_connect, bsglobal.nht_zclient, 1,
+ &bsglobal.nht_zclient->t_connect);
+
+ hook_register(frr_fini, bfd_protocol_integration_finish);
}
void bfd_protocol_integration_set_debug(bool enable)
{
return bsglobal.shutting_down;
}
+
+/*
+ * BFD automatic source selection
+ *
+ * This feature will use the next hop tracking (NHT) provided by zebra
+ * to find out the source address by looking at the output interface.
+ *
+ * When the interface address / routing table change we'll be notified
+ * and be able to update the source address accordingly.
+ *
+ * <daemon> zebra
+ * |
+ * +-----------------+
+ * | BFD session set |
+ * | to auto source |
+ * +-----------------+
+ * |
+ * \ +-----------------+
+ * --------------> | Resolves |
+ * | destination |
+ * | address |
+ * +-----------------+
+ * |
+ * +-----------------+ /
+ * | Sets resolved | <----------
+ * | source address |
+ * +-----------------+
+ */
+static bool
+bfd_source_cache_session_match(const struct bfd_source_cache *source,
+ const struct bfd_session_params *session)
+{
+ const struct in_addr *address;
+ const struct in6_addr *address_v6;
+
+ if (session->args.vrf_id != source->vrf_id)
+ return false;
+ if (session->args.family != source->address.family)
+ return false;
+
+ switch (session->args.family) {
+ case AF_INET:
+ address = (const struct in_addr *)&session->args.dst;
+ if (address->s_addr != source->address.u.prefix4.s_addr)
+ return false;
+ break;
+ case AF_INET6:
+ address_v6 = &session->args.dst;
+ if (memcmp(address_v6, &source->address.u.prefix6,
+ sizeof(struct in6_addr)))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static struct bfd_source_cache *
+bfd_source_cache_find(vrf_id_t vrf_id, const struct prefix *prefix)
+{
+ struct bfd_source_cache *source;
+
+ SLIST_FOREACH (source, &bsglobal.source_list, entry) {
+ if (source->vrf_id != vrf_id)
+ continue;
+ if (!prefix_same(&source->address, prefix))
+ continue;
+
+ return source;
+ }
+
+ return NULL;
+}
+
+static void bfd_source_cache_get(struct bfd_session_params *session)
+{
+ struct bfd_source_cache *source;
+ struct prefix target = {};
+
+ switch (session->args.family) {
+ case AF_INET:
+ target.family = AF_INET;
+ target.prefixlen = IPV4_MAX_BITLEN;
+ memcpy(&target.u.prefix4, &session->args.dst,
+ sizeof(struct in_addr));
+ break;
+ case AF_INET6:
+ target.family = AF_INET6;
+ target.prefixlen = IPV6_MAX_BITLEN;
+ memcpy(&target.u.prefix6, &session->args.dst,
+ sizeof(struct in6_addr));
+ break;
+ default:
+ return;
+ }
+
+ source = bfd_source_cache_find(session->args.vrf_id, &target);
+ if (source) {
+ if (session->source_cache == source)
+ return;
+
+ bfd_source_cache_put(session);
+ session->source_cache = source;
+ source->refcount++;
+ return;
+ }
+
+ source = XCALLOC(MTYPE_BFD_SOURCE, sizeof(*source));
+ prefix_copy(&source->address, &target);
+ source->vrf_id = session->args.vrf_id;
+ SLIST_INSERT_HEAD(&bsglobal.source_list, source, entry);
+
+ bfd_source_cache_put(session);
+ session->source_cache = source;
+ source->refcount = 1;
+
+ bfd_source_cache_register(source);
+
+ return;
+}
+
+static void bfd_source_cache_put(struct bfd_session_params *session)
+{
+ if (session->source_cache == NULL)
+ return;
+
+ session->source_cache->refcount--;
+ if (session->source_cache->refcount > 0) {
+ session->source_cache = NULL;
+ return;
+ }
+
+ bfd_source_cache_unregister(session->source_cache);
+ SLIST_REMOVE(&bsglobal.source_list, session->source_cache,
+ bfd_source_cache, entry);
+ XFREE(MTYPE_BFD_SOURCE, session->source_cache);
+}
+
+/** Updates BFD running session if source address has changed. */
+static void
+bfd_source_cache_update_session(const struct bfd_source_cache *source,
+ struct bfd_session_params *session)
+{
+ const struct in_addr *address;
+ const struct in6_addr *address_v6;
+
+ switch (session->args.family) {
+ case AF_INET:
+ address = (const struct in_addr *)&session->args.src;
+ if (memcmp(address, &source->source.u.prefix4,
+ sizeof(struct in_addr)) == 0)
+ return;
+
+ _bfd_sess_remove(session);
+ memcpy(&session->args.src, &source->source.u.prefix4,
+ sizeof(struct in_addr));
+ break;
+ case AF_INET6:
+ address_v6 = &session->args.src;
+ if (memcmp(address_v6, &source->source.u.prefix6,
+ sizeof(struct in6_addr)) == 0)
+ return;
+
+ _bfd_sess_remove(session);
+ memcpy(&session->args.src, &source->source.u.prefix6,
+ sizeof(struct in6_addr));
+ break;
+ default:
+ return;
+ }
+
+ bfd_sess_install(session);
+}
+
+static void
+bfd_source_cache_update_sessions(const struct bfd_source_cache *source)
+{
+ struct bfd_session_params *session;
+
+ if (!source->valid)
+ return;
+
+ TAILQ_FOREACH (session, &bsglobal.bsplist, entry) {
+ if (!session->auto_source)
+ continue;
+ if (!bfd_source_cache_session_match(source, session))
+ continue;
+
+ bfd_source_cache_update_session(source, session);
+ }
+}
+
+/**
+ * Try to translate next hop information into source address.
+ *
+ * \returns `true` if source changed otherwise `false`.
+ */
+static bool bfd_source_cache_update(struct bfd_source_cache *source,
+ const struct zapi_route *route)
+{
+ size_t nh_index;
+
+ for (nh_index = 0; nh_index < route->nexthop_num; nh_index++) {
+ const struct zapi_nexthop *nh = &route->nexthops[nh_index];
+ const struct interface *interface;
+ const struct connected *connected;
+ const struct listnode *node;
+
+ interface = if_lookup_by_index(nh->ifindex, nh->vrf_id);
+ if (interface == NULL) {
+ zlog_err("next hop interface not found (index %d)",
+ nh->ifindex);
+ continue;
+ }
+
+ for (ALL_LIST_ELEMENTS_RO(interface->connected, node,
+ connected)) {
+ if (source->address.family !=
+ connected->address->family)
+ continue;
+ if (prefix_same(connected->address, &source->source))
+ return false;
+ /*
+ * Skip link-local as it is only useful for single hop
+ * and in that case no source is specified usually.
+ */
+ if (source->address.family == AF_INET6 &&
+ IN6_IS_ADDR_LINKLOCAL(
+ &connected->address->u.prefix6))
+ continue;
+
+ prefix_copy(&source->source, connected->address);
+ source->valid = true;
+ return true;
+ }
+ }
+
+ memset(&source->source, 0, sizeof(source->source));
+ source->valid = false;
+ return false;
+}
+
+static void bfd_nht_zclient_connect(struct thread *thread)
+{
+ struct zclient *zclient = THREAD_ARG(thread);
+
+ if (bsglobal.debugging)
+ zlog_debug("BFD NHT zclient connection attempt");
+
+ if (zclient_start(zclient) == -1) {
+ if (bsglobal.debugging)
+ zlog_debug("BFD NHT zclient connection failed");
+
+ thread_add_timer(bsglobal.tm, bfd_nht_zclient_connect, zclient,
+ 3, &zclient->t_connect);
+ return;
+ }
+
+ if (bsglobal.debugging)
+ zlog_debug("BFD NHT zclient connection succeeded");
+}
+
+static void bfd_nht_zclient_connected(struct zclient *zclient)
+{
+ struct bfd_source_cache *source;
+
+ if (bsglobal.debugging)
+ zlog_debug("BFD NHT zclient connected");
+
+ SLIST_FOREACH (source, &bsglobal.source_list, entry)
+ bfd_source_cache_register(source);
+}
+
+static int bfd_nht_update(ZAPI_CALLBACK_ARGS)
+{
+ struct bfd_source_cache *source;
+ struct zapi_route route;
+ struct prefix match;
+
+ if (!zapi_nexthop_update_decode(zclient->ibuf, &match, &route)) {
+ zlog_warn("BFD NHT update decode failure");
+ return 0;
+ }
+ if (cmd != ZEBRA_NEXTHOP_UPDATE)
+ return 0;
+
+ if (bsglobal.debugging)
+ zlog_debug("BFD NHT update for %pFX", &route.prefix);
+
+ SLIST_FOREACH (source, &bsglobal.source_list, entry) {
+ if (source->vrf_id != route.vrf_id)
+ continue;
+ if (!prefix_same(&match, &source->address))
+ continue;
+ if (bfd_source_cache_update(source, &route))
+ bfd_source_cache_update_sessions(source);
+ }
+
+ return 0;
+}
uint8_t detection_multiplier, uint32_t min_rx,
uint32_t min_tx);
+/**
+ * Configures the automatic source selection for the BFD session.
+ *
+ * NOTE:
+ * Setting this configuration will override the IP source value set by
+ * `bfd_sess_set_ipv4_addrs` or `bfd_sess_set_ipv6_addrs`.
+ *
+ * \param bsp BFD session parameters
+ * \param enable BFD automatic source selection state.
+ */
+void bfd_sess_set_auto_source(struct bfd_session_params *bsp, bool enable);
+
/**
* Installs or updates the BFD session based on the saved session arguments.
*
uint8_t *detection_multiplier, uint32_t *min_rx,
uint32_t *min_tx);
+/**
+ * Gets the automatic source selection state.
+ */
+bool bfd_sess_auto_source(const struct bfd_session_params *bsp);
+
/**
* Show BFD session configuration and status. If `json` is provided (e.g. not
* `NULL`) then information will be inserted in object, otherwise printed to
#define ROLE_STR \
"Providing transit\nRoute server\nRS client\nUsing transit\nPublic/private peering\n"
+/* BFD protocol integration strings. */
+#define BFD_INTEGRATION_STR "BFD monitoring\n"
+#define BFD_INTEGRATION_MULTI_HOP_STR "Use BFD multi hop session\n"
+#define BFD_INTEGRATION_SOURCE_STR "Use source for BFD session\n"
+#define BFD_INTEGRATION_SOURCEV4_STR "Use IPv4 source for BFD session\n"
+#define BFD_INTEGRATION_SOURCEV6_STR "Use IPv4 source for BFD session\n"
+
/* Prototypes. */
extern void install_node(struct cmd_node *node);
extern void install_default(enum node_type);
--- /dev/null
+/*
+ * Static daemon BFD integration.
+ *
+ * Copyright (C) 2020-2022 Network Device Education Foundation, Inc. ("NetDEF")
+ * Rafael Zalamena
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA.
+ */
+
+#include <zebra.h>
+
+#include "lib/bfd.h"
+#include "lib/printfrr.h"
+#include "lib/srcdest_table.h"
+
+#include "staticd/static_routes.h"
+#include "staticd/static_zebra.h"
+#include "staticd/static_debug.h"
+
+#include "lib/openbsd-queue.h"
+
+/*
+ * Next hop BFD monitoring settings.
+ */
+static void static_next_hop_bfd_change(struct static_nexthop *sn,
+ const struct bfd_session_status *bss)
+{
+ switch (bss->state) {
+ case BSS_UNKNOWN:
+ /* FALLTHROUGH: no known state yet. */
+ case BSS_ADMIN_DOWN:
+ /* NOTHING: we or the remote end administratively shutdown. */
+ break;
+ case BSS_DOWN:
+ /* Peer went down, remove this next hop. */
+ DEBUGD(&static_dbg_bfd,
+ "%s: next hop is down, remove it from RIB", __func__);
+ sn->path_down = true;
+ static_zebra_route_add(sn->pn, true);
+ break;
+ case BSS_UP:
+ /* Peer is back up, add this next hop. */
+ DEBUGD(&static_dbg_bfd, "%s: next hop is up, add it to RIB",
+ __func__);
+ sn->path_down = false;
+ static_zebra_route_add(sn->pn, true);
+ break;
+ }
+}
+
+static void static_next_hop_bfd_updatecb(
+ __attribute__((unused)) struct bfd_session_params *bsp,
+ const struct bfd_session_status *bss, void *arg)
+{
+ static_next_hop_bfd_change(arg, bss);
+}
+
+static inline int
+static_next_hop_type_to_family(const struct static_nexthop *sn)
+{
+ switch (sn->type) {
+ case STATIC_IPV4_GATEWAY_IFNAME:
+ case STATIC_IPV6_GATEWAY_IFNAME:
+ case STATIC_IPV4_GATEWAY:
+ case STATIC_IPV6_GATEWAY:
+ if (sn->type == STATIC_IPV4_GATEWAY ||
+ sn->type == STATIC_IPV4_GATEWAY_IFNAME)
+ return AF_INET;
+ else
+ return AF_INET6;
+ break;
+ case STATIC_IFNAME:
+ case STATIC_BLACKHOLE:
+ default:
+ zlog_err("%s: invalid next hop type", __func__);
+ break;
+ }
+
+ return AF_UNSPEC;
+}
+
+void static_next_hop_bfd_monitor_enable(struct static_nexthop *sn,
+ const struct lyd_node *dnode)
+{
+ bool use_interface;
+ bool use_profile;
+ bool use_source;
+ bool onlink;
+ bool mhop;
+ int family;
+ struct ipaddr source;
+
+ use_interface = false;
+ use_source = yang_dnode_exists(dnode, "./source");
+ use_profile = yang_dnode_exists(dnode, "./profile");
+ onlink = yang_dnode_exists(dnode, "../onlink") &&
+ yang_dnode_get_bool(dnode, "../onlink");
+ mhop = yang_dnode_get_bool(dnode, "./multi-hop");
+
+
+ family = static_next_hop_type_to_family(sn);
+ if (family == AF_UNSPEC)
+ return;
+
+ if (sn->type == STATIC_IPV4_GATEWAY_IFNAME ||
+ sn->type == STATIC_IPV6_GATEWAY_IFNAME)
+ use_interface = true;
+
+ /* Reconfigure or allocate new memory. */
+ if (sn->bsp == NULL)
+ sn->bsp = bfd_sess_new(static_next_hop_bfd_updatecb, sn);
+
+ /* Configure the session. */
+ if (use_source)
+ yang_dnode_get_ip(&source, dnode, "./source");
+
+ if (onlink || mhop == false)
+ bfd_sess_set_auto_source(sn->bsp, false);
+ else
+ bfd_sess_set_auto_source(sn->bsp, !use_source);
+
+ /* Configure the session.*/
+ if (family == AF_INET)
+ bfd_sess_set_ipv4_addrs(sn->bsp,
+ use_source ? &source.ip._v4_addr : NULL,
+ &sn->addr.ipv4);
+ else if (family == AF_INET6)
+ bfd_sess_set_ipv6_addrs(sn->bsp,
+ use_source ? &source.ip._v6_addr : NULL,
+ &sn->addr.ipv6);
+
+ bfd_sess_set_interface(sn->bsp, use_interface ? sn->ifname : NULL);
+
+ bfd_sess_set_profile(sn->bsp, use_profile ? yang_dnode_get_string(
+ dnode, "./profile")
+ : NULL);
+
+ bfd_sess_set_hop_count(sn->bsp, (onlink || mhop == false) ? 1 : 254);
+
+ /* Install or update the session. */
+ bfd_sess_install(sn->bsp);
+
+ /* Update current path status. */
+ sn->path_down = (bfd_sess_status(sn->bsp) != BSS_UP);
+}
+
+void static_next_hop_bfd_monitor_disable(struct static_nexthop *sn)
+{
+ bfd_sess_free(&sn->bsp);
+
+ /* Reset path status. */
+ sn->path_down = false;
+}
+
+void static_next_hop_bfd_source(struct static_nexthop *sn,
+ const struct ipaddr *source)
+{
+ int family;
+
+ if (sn->bsp == NULL)
+ return;
+
+ family = static_next_hop_type_to_family(sn);
+ if (family == AF_UNSPEC)
+ return;
+
+ bfd_sess_set_auto_source(sn->bsp, false);
+ if (family == AF_INET)
+ bfd_sess_set_ipv4_addrs(sn->bsp, &source->ip._v4_addr,
+ &sn->addr.ipv4);
+ else if (family == AF_INET6)
+ bfd_sess_set_ipv6_addrs(sn->bsp, &source->ip._v6_addr,
+ &sn->addr.ipv6);
+
+ bfd_sess_install(sn->bsp);
+}
+
+void static_next_hop_bfd_auto_source(struct static_nexthop *sn)
+{
+ if (sn->bsp == NULL)
+ return;
+
+ bfd_sess_set_auto_source(sn->bsp, true);
+ bfd_sess_install(sn->bsp);
+}
+
+void static_next_hop_bfd_multi_hop(struct static_nexthop *sn, bool mhop)
+{
+ if (sn->bsp == NULL)
+ return;
+
+ bfd_sess_set_hop_count(sn->bsp, mhop ? 254 : 1);
+ bfd_sess_install(sn->bsp);
+}
+
+void static_next_hop_bfd_profile(struct static_nexthop *sn, const char *name)
+{
+ if (sn->bsp == NULL)
+ return;
+
+ bfd_sess_set_profile(sn->bsp, name);
+ bfd_sess_install(sn->bsp);
+}
+
+void static_bfd_initialize(struct zclient *zc, struct thread_master *tm)
+{
+ /* Initialize BFD integration library. */
+ bfd_protocol_integration_init(zc, tm);
+}
+
+/*
+ * Display functions
+ */
+static void static_bfd_show_nexthop_json(struct vty *vty,
+ struct json_object *jo,
+ const struct static_nexthop *sn)
+{
+ const struct prefix *dst_p, *src_p;
+ struct json_object *jo_nh;
+
+ jo_nh = json_object_new_object();
+
+ srcdest_rnode_prefixes(sn->rn, &dst_p, &src_p);
+ if (src_p)
+ json_object_string_addf(jo_nh, "from", "%pFX", src_p);
+
+ json_object_string_addf(jo_nh, "prefix", "%pFX", dst_p);
+ json_object_string_add(jo_nh, "vrf", sn->nh_vrfname);
+
+ json_object_boolean_add(jo_nh, "installed", !sn->path_down);
+
+ json_object_array_add(jo, jo_nh);
+}
+
+static void static_bfd_show_path_json(struct vty *vty, struct json_object *jo,
+ struct route_table *rt)
+{
+ struct route_node *rn;
+
+ for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) {
+ struct static_route_info *si = static_route_info_from_rnode(rn);
+ struct static_path *sp;
+
+ if (si == NULL)
+ continue;
+
+ frr_each (static_path_list, &si->path_list, sp) {
+ struct static_nexthop *sn;
+
+ frr_each (static_nexthop_list, &sp->nexthop_list, sn) {
+ /* Skip non configured BFD sessions. */
+ if (sn->bsp == NULL)
+ continue;
+
+ static_bfd_show_nexthop_json(vty, jo, sn);
+ }
+ }
+ }
+}
+
+static void static_bfd_show_json(struct vty *vty)
+{
+ struct json_object *jo, *jo_path, *jo_afi_safi;
+ struct vrf *vrf;
+
+ jo = json_object_new_object();
+ jo_path = json_object_new_object();
+
+ json_object_object_add(jo, "path-list", jo_path);
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ const struct static_vrf *svrf = vrf->info;
+ struct route_table *rt;
+
+ jo_afi_safi = json_object_new_array();
+ json_object_object_add(jo_path, "ipv4-unicast", jo_afi_safi);
+ rt = svrf->stable[AFI_IP][SAFI_UNICAST];
+ if (rt)
+ static_bfd_show_path_json(vty, jo_afi_safi, rt);
+
+ jo_afi_safi = json_object_new_array();
+ json_object_object_add(jo_path, "ipv4-multicast", jo_afi_safi);
+ rt = svrf->stable[AFI_IP][SAFI_MULTICAST];
+ if (rt)
+ static_bfd_show_path_json(vty, jo_afi_safi, rt);
+
+ jo_afi_safi = json_object_new_array();
+ json_object_object_add(jo_path, "ipv6-unicast", jo_afi_safi);
+ rt = svrf->stable[AFI_IP6][SAFI_UNICAST];
+ if (rt)
+ static_bfd_show_path_json(vty, jo_afi_safi, rt);
+ }
+
+ vty_out(vty, "%s\n", json_object_to_json_string_ext(jo, 0));
+ json_object_free(jo);
+}
+
+static void static_bfd_show_nexthop(struct vty *vty,
+ const struct static_nexthop *sn)
+{
+ vty_out(vty, " %pRN", sn->rn);
+
+ if (sn->bsp == NULL) {
+ vty_out(vty, "\n");
+ return;
+ }
+
+ if (sn->type == STATIC_IPV4_GATEWAY ||
+ sn->type == STATIC_IPV4_GATEWAY_IFNAME)
+ vty_out(vty, " peer %pI4", &sn->addr.ipv4);
+ else if (sn->type == STATIC_IPV6_GATEWAY ||
+ sn->type == STATIC_IPV6_GATEWAY_IFNAME)
+ vty_out(vty, " peer %pI6", &sn->addr.ipv6);
+ else
+ vty_out(vty, " peer unknown");
+
+ vty_out(vty, " (status: %s)\n",
+ sn->path_down ? "uninstalled" : "installed");
+}
+
+static void static_bfd_show_path(struct vty *vty, struct route_table *rt)
+{
+ struct route_node *rn;
+
+ for (rn = route_top(rt); rn; rn = srcdest_route_next(rn)) {
+ struct static_route_info *si = static_route_info_from_rnode(rn);
+ struct static_path *sp;
+
+ if (si == NULL)
+ continue;
+
+ frr_each (static_path_list, &si->path_list, sp) {
+ struct static_nexthop *sn;
+
+ frr_each (static_nexthop_list, &sp->nexthop_list, sn) {
+ /* Skip non configured BFD sessions. */
+ if (sn->bsp == NULL)
+ continue;
+
+ static_bfd_show_nexthop(vty, sn);
+ }
+ }
+ }
+}
+
+void static_bfd_show(struct vty *vty, bool json)
+{
+ struct vrf *vrf;
+
+ if (json) {
+ static_bfd_show_json(vty);
+ return;
+ }
+
+ vty_out(vty, "Showing BFD monitored static routes:\n");
+ vty_out(vty, "\n Next hops:\n");
+ RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) {
+ const struct static_vrf *svrf = vrf->info;
+ struct route_table *rt;
+
+ vty_out(vty, " VRF %s IPv4 Unicast:\n", vrf->name);
+ rt = svrf->stable[AFI_IP][SAFI_UNICAST];
+ if (rt)
+ static_bfd_show_path(vty, rt);
+
+ vty_out(vty, "\n VRF %s IPv4 Multicast:\n", vrf->name);
+ rt = svrf->stable[AFI_IP][SAFI_MULTICAST];
+ if (rt)
+ static_bfd_show_path(vty, rt);
+
+ vty_out(vty, "\n VRF %s IPv6 Unicast:\n", vrf->name);
+ rt = svrf->stable[AFI_IP6][SAFI_UNICAST];
+ if (rt)
+ static_bfd_show_path(vty, rt);
+ }
+
+ vty_out(vty, "\n");
+}
#include "lib/command.h"
#include "lib/debug.h"
+#include "lib/bfd.h"
#include "static_debug.h"
/* clang-format off */
struct debug static_dbg_events = {0, "Staticd events"};
struct debug static_dbg_route = {0, "Staticd route"};
+struct debug static_dbg_bfd = {0, "Staticd bfd"};
struct debug *static_debug_arr[] = {
&static_dbg_events,
- &static_dbg_route
+ &static_dbg_route,
+ &static_dbg_bfd
};
const char *static_debugs_conflines[] = {
"debug static events",
- "debug static route"
+ "debug static route",
+ "debug static bfd"
};
/* clang-format on */
* Debug general internal events
*
*/
-void static_debug_set(int vtynode, bool onoff, bool events, bool route)
+void static_debug_set(int vtynode, bool onoff, bool events, bool route,
+ bool bfd)
{
uint32_t mode = DEBUG_NODE2MODE(vtynode);
DEBUG_MODE_SET(&static_dbg_events, mode, onoff);
if (route)
DEBUG_MODE_SET(&static_dbg_route, mode, onoff);
+ if (bfd) {
+ DEBUG_MODE_SET(&static_dbg_bfd, mode, onoff);
+ bfd_protocol_integration_set_debug(onoff);
+ }
}
/*
/* staticd debugging records */
extern struct debug static_dbg_events;
extern struct debug static_dbg_route;
+extern struct debug static_dbg_bfd;
/*
* Initialize staticd debugging.
* Debug general internal events
*
*/
-void static_debug_set(int vtynode, bool onoff, bool events, bool route);
+void static_debug_set(int vtynode, bool onoff, bool events, bool route,
+ bool bfd);
#ifdef __cplusplus
}
.destroy = routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy,
}
},
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring",
+ .cbs = {
+ .create = route_next_hop_bfd_create,
+ .destroy = route_next_hop_bfd_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring/source",
+ .cbs = {
+ .modify = route_next_hop_bfd_source_modify,
+ .destroy = route_next_hop_bfd_source_destroy,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring/multi-hop",
+ .cbs = {
+ .modify = route_next_hop_bfd_multi_hop_modify,
+ }
+ },
+ {
+ .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring/profile",
+ .cbs = {
+ .modify = route_next_hop_bfd_profile_modify,
+ .destroy = route_next_hop_bfd_profile_destroy,
+ }
+ },
{
.xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list",
.cbs = {
struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_path_list_frr_nexthops_nexthop_mpls_label_stack_entry_traffic_class_destroy(
struct nb_cb_destroy_args *args);
+int route_next_hop_bfd_create(struct nb_cb_create_args *args);
+int route_next_hop_bfd_destroy(struct nb_cb_destroy_args *args);
+int route_next_hop_bfd_source_modify(struct nb_cb_modify_args *args);
+int route_next_hop_bfd_source_destroy(struct nb_cb_destroy_args *args);
+int route_next_hop_bfd_profile_modify(struct nb_cb_modify_args *args);
+int route_next_hop_bfd_profile_destroy(struct nb_cb_destroy_args *args);
+int route_next_hop_bfd_multi_hop_modify(struct nb_cb_modify_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_create(
struct nb_cb_create_args *args);
int routing_control_plane_protocols_control_plane_protocol_staticd_route_list_src_list_destroy(
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring
+ */
+int route_next_hop_bfd_create(struct nb_cb_create_args *args)
+{
+ struct static_nexthop *sn;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ sn = nb_running_get_entry(args->dnode, NULL, true);
+ static_next_hop_bfd_monitor_enable(sn, args->dnode);
+ return NB_OK;
+}
+
+int route_next_hop_bfd_destroy(struct nb_cb_destroy_args *args)
+{
+ struct static_nexthop *sn;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ sn = nb_running_get_entry(args->dnode, NULL, true);
+ static_next_hop_bfd_monitor_disable(sn);
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring/source
+ */
+int route_next_hop_bfd_source_modify(struct nb_cb_modify_args *args)
+{
+ struct static_nexthop *sn;
+ struct ipaddr source;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ sn = nb_running_get_entry(args->dnode, NULL, true);
+ yang_dnode_get_ip(&source, args->dnode, NULL);
+ static_next_hop_bfd_source(sn, &source);
+ return NB_OK;
+}
+
+int route_next_hop_bfd_source_destroy(struct nb_cb_destroy_args *args)
+{
+ struct static_nexthop *sn;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ sn = nb_running_get_entry(args->dnode, NULL, true);
+ static_next_hop_bfd_auto_source(sn);
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring/multi-hop
+ */
+int route_next_hop_bfd_multi_hop_modify(struct nb_cb_modify_args *args)
+{
+ struct static_nexthop *sn;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ sn = nb_running_get_entry(args->dnode, NULL, true);
+ static_next_hop_bfd_multi_hop(sn,
+ yang_dnode_get_bool(args->dnode, NULL));
+
+ return NB_OK;
+}
+
+/*
+ * XPath:
+ * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/path-list/frr-nexthops/nexthop/bfd-monitoring/profile
+ */
+int route_next_hop_bfd_profile_modify(struct nb_cb_modify_args *args)
+{
+ struct static_nexthop *sn;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ sn = nb_running_get_entry(args->dnode, NULL, true);
+ static_next_hop_bfd_profile(sn,
+ yang_dnode_get_string(args->dnode, NULL));
+
+ return NB_OK;
+}
+
+int route_next_hop_bfd_profile_destroy(struct nb_cb_destroy_args *args)
+{
+ struct static_nexthop *sn;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ sn = nb_running_get_entry(args->dnode, NULL, true);
+ static_next_hop_bfd_profile(sn, NULL);
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/route-list/src-list
/* Make new static route structure. */
nh = XCALLOC(MTYPE_STATIC_NEXTHOP, sizeof(struct static_nexthop));
+ /* Copy back pointers. */
+ nh->rn = rn;
nh->pn = pn;
nh->type = type;
struct route_node *rn = pn->rn;
static_nexthop_list_del(&(pn->nexthop_list), nh);
+ /* Remove BFD session/configuration if any. */
+ bfd_sess_free(&nh->bsp);
if (nh->nh_vrf_id == VRF_UNKNOWN)
goto EXIT;
nh->ifindex = IFINDEX_INTERNAL;
}
+ /* Remove previously configured route if any. */
+ static_uninstall_path(pn);
static_install_path(pn);
}
#ifndef __STATIC_ROUTES_H__
#define __STATIC_ROUTES_H__
+#include "lib/bfd.h"
#include "lib/mpls.h"
#include "table.h"
#include "memory.h"
DECLARE_MGROUP(STATIC);
+#include "staticd/static_vrf.h"
+
/* Static route label information */
struct static_nh_label {
uint8_t num_labels;
/* SR-TE color */
uint32_t color;
+
+ /** BFD integration data. */
+ struct bfd_session_params *bsp;
+ /** Back pointer for route node. */
+ struct route_node *rn;
+ /** Path connection status. */
+ bool path_down;
};
DECLARE_DLIST(static_nexthop_list, struct static_nexthop, list);
extern void static_get_nh_str(struct static_nexthop *nh, char *nexthop,
size_t size);
+/*
+ * BFD integration.
+ */
+extern void static_next_hop_bfd_source(struct static_nexthop *sn,
+ const struct ipaddr *source);
+extern void static_next_hop_bfd_auto_source(struct static_nexthop *sn);
+extern void static_next_hop_bfd_monitor_enable(struct static_nexthop *sn,
+ const struct lyd_node *dnode);
+extern void static_next_hop_bfd_monitor_disable(struct static_nexthop *sn);
+extern void static_next_hop_bfd_profile(struct static_nexthop *sn,
+ const char *name);
+extern void static_next_hop_bfd_multi_hop(struct static_nexthop *sn, bool mhop);
+
+/** Call this function after zebra client initialization. */
+extern void static_bfd_initialize(struct zclient *zc, struct thread_master *tm);
+
+extern void static_bfd_show(struct vty *vty, bool isjson);
+
#ifdef __cplusplus
}
#endif
const char *label;
const char *table;
const char *color;
+
+ bool bfd;
+ bool bfd_multi_hop;
+ const char *bfd_source;
+ const char *bfd_profile;
};
static int static_route_nb_run(struct vty *vty, struct static_route_args *args)
apply_mask(&p);
prefix2str(&p, buf_prefix, sizeof(buf_prefix));
+ if (args->bfd && args->gateway == NULL) {
+ vty_out(vty, "%% Route monitoring requires a gateway\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
if (args->source)
prefix2str(&src, buf_src_prefix, sizeof(buf_src_prefix));
if (args->gateway)
nb_cli_enqueue_change(vty, xpath_mpls, NB_OP_DESTROY,
NULL);
}
+
+ if (args->bfd) {
+ char xpath_bfd[XPATH_MAXLEN];
+
+ if (args->bfd_source) {
+ strlcpy(xpath_bfd, xpath_nexthop,
+ sizeof(xpath_bfd));
+ strlcat(xpath_bfd,
+ "/frr-staticd:bfd-monitoring/source",
+ sizeof(xpath_bfd));
+ nb_cli_enqueue_change(vty, xpath_bfd,
+ NB_OP_MODIFY,
+ args->bfd_source);
+ }
+
+ strlcpy(xpath_bfd, xpath_nexthop, sizeof(xpath_bfd));
+ strlcat(xpath_bfd,
+ "/frr-staticd:bfd-monitoring/multi-hop",
+ sizeof(xpath_bfd));
+ nb_cli_enqueue_change(vty, xpath_bfd, NB_OP_MODIFY,
+ args->bfd_multi_hop ? "true"
+ : "false");
+
+ if (args->bfd_profile) {
+ strlcpy(xpath_bfd, xpath_nexthop,
+ sizeof(xpath_bfd));
+ strlcat(xpath_bfd,
+ "/frr-staticd:bfd-monitoring/profile",
+ sizeof(xpath_bfd));
+ nb_cli_enqueue_change(vty, xpath_bfd,
+ NB_OP_MODIFY,
+ args->bfd_profile);
+ }
+ }
+
ret = nb_cli_apply_changes(vty, xpath_prefix);
} else {
if (args->source)
/* Static unicast routes for multicast RPF lookup. */
DEFPY_YANG (ip_mroute_dist,
ip_mroute_dist_cmd,
- "[no] ip mroute A.B.C.D/M$prefix <A.B.C.D$gate|INTERFACE$ifname> [(1-255)$distance]",
+ "[no] ip mroute A.B.C.D/M$prefix <A.B.C.D$gate|INTERFACE$ifname> [{"
+ "(1-255)$distance"
+ "|bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}]"
+ "}]",
NO_STR
IP_STR
"Configure static unicast route into MRIB for multicast RPF lookup\n"
"IP destination prefix (e.g. 10.0.0.0/8)\n"
"Nexthop address\n"
"Nexthop interface name\n"
- "Distance\n")
+ "Distance\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.gateway = gate_str,
.interface_name = ifname,
.distance = distance_str,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|nexthop-vrf NAME \
|onlink$onlink \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR IP_STR
"Establish static routes\n"
VRF_CMD_HELP_STR
"Treat the nexthop as directly attached to the interface\n"
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.onlink = !!onlink,
.vrf = vrf,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|nexthop-vrf NAME \
|onlink$onlink \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR IP_STR
"Establish static routes\n"
VRF_CMD_HELP_STR
"Treat the nexthop as directly attached to the interface\n"
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.onlink = !!onlink,
.xpath_vrf = true,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|table (1-4294967295) \
|nexthop-vrf NAME \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR IP_STR
"Establish static routes\n"
"The table number to configure\n"
VRF_CMD_HELP_STR
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.color = color_str,
.vrf = vrf,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|table (1-4294967295) \
|nexthop-vrf NAME \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source A.B.C.D$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR IP_STR
"Establish static routes\n"
"The table number to configure\n"
VRF_CMD_HELP_STR
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.color = color_str,
.xpath_vrf = true,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|nexthop-vrf NAME \
|onlink$onlink \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source X:X::X:X$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR
IPV6_STR
VRF_CMD_HELP_STR
"Treat the nexthop as directly attached to the interface\n"
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.onlink = !!onlink,
.vrf = vrf,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|nexthop-vrf NAME \
|onlink$onlink \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source X:X::X:X$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR
IPV6_STR
VRF_CMD_HELP_STR
"Treat the nexthop as directly attached to the interface\n"
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.onlink = !!onlink,
.xpath_vrf = true,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|table (1-4294967295) \
|nexthop-vrf NAME \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source X:X::X:X$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR
IPV6_STR
"The table number to configure\n"
VRF_CMD_HELP_STR
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.color = color_str,
.vrf = vrf,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
|table (1-4294967295) \
|nexthop-vrf NAME \
|color (1-4294967295) \
+ |bfd$bfd [{multi-hop$bfd_multi_hop|source X:X::X:X$bfd_source|profile BFDPROF$bfd_profile}] \
}]",
NO_STR
IPV6_STR
"The table number to configure\n"
VRF_CMD_HELP_STR
"SR-TE color\n"
- "The SR-TE color to configure\n")
+ "The SR-TE color to configure\n"
+ BFD_INTEGRATION_STR
+ BFD_INTEGRATION_MULTI_HOP_STR
+ BFD_INTEGRATION_SOURCE_STR
+ BFD_INTEGRATION_SOURCEV4_STR
+ BFD_PROFILE_STR
+ BFD_PROFILE_NAME_STR)
{
struct static_route_args args = {
.delete = !!no,
.color = color_str,
.xpath_vrf = true,
.nexthop_vrf = nexthop_vrf,
+ .bfd = !!bfd,
+ .bfd_multi_hop = !!bfd_multi_hop,
+ .bfd_source = bfd_source_str,
+ .bfd_profile = bfd_profile,
};
return static_route_nb_run(vty, &args);
vty_out(vty, " color %s",
yang_dnode_get_string(nexthop, "./srte-color"));
+ if (yang_dnode_exists(nexthop, "./bfd-monitoring")) {
+ const struct lyd_node *bfd_dnode =
+ yang_dnode_get(nexthop, "./bfd-monitoring");
+
+ if (yang_dnode_get_bool(bfd_dnode, "./multi-hop")) {
+ vty_out(vty, " bfd multi-hop");
+
+ if (yang_dnode_exists(bfd_dnode, "./source"))
+ vty_out(vty, " source %s",
+ yang_dnode_get_string(bfd_dnode,
+ "./source"));
+ } else
+ vty_out(vty, " bfd");
+
+ if (yang_dnode_exists(bfd_dnode, "./profile"))
+ vty_out(vty, " profile %s",
+ yang_dnode_get_string(bfd_dnode, "./profile"));
+ }
+
vty_out(vty, "\n");
}
}
DEFPY_YANG(debug_staticd, debug_staticd_cmd,
- "[no] debug static [{events$events|route$route}]",
+ "[no] debug static [{events$events|route$route|bfd$bfd}]",
NO_STR DEBUG_STR STATICD_STR
"Debug events\n"
- "Debug route\n")
+ "Debug route\n"
+ "Debug bfd\n")
{
/* If no specific category, change all */
if (strmatch(argv[argc - 1]->text, "static"))
- static_debug_set(vty->node, !no, true, true);
+ static_debug_set(vty->node, !no, true, true, true);
else
- static_debug_set(vty->node, !no, !!events, !!route);
+ static_debug_set(vty->node, !no, !!events, !!route, !!bfd);
return CMD_SUCCESS;
}
+DEFPY(staticd_show_bfd_routes, staticd_show_bfd_routes_cmd,
+ "show bfd static route [json]$isjson",
+ SHOW_STR
+ BFD_INTEGRATION_STR
+ STATICD_STR
+ ROUTE_STR
+ JSON_STR)
+{
+ static_bfd_show(vty, !!isjson);
+ return CMD_SUCCESS;
+}
+
DEFUN_NOSH (show_debugging_static,
show_debugging_static_cmd,
"show debugging [static]",
install_element(ENABLE_NODE, &show_debugging_static_cmd);
install_element(ENABLE_NODE, &debug_staticd_cmd);
install_element(CONFIG_NODE, &debug_staticd_cmd);
+
+ install_element(ENABLE_NODE, &staticd_show_bfd_routes_cmd);
}
api_nh = &api.nexthops[nh_num];
if (nh->nh_vrf_id == VRF_UNKNOWN)
continue;
+ /* Skip next hop which peer is down. */
+ if (nh->path_down)
+ continue;
api_nh->vrf_id = nh->nh_vrf_id;
if (nh->onlink)
zclient->zebra_connected = zebra_connected;
static_nht_hash_init(static_nht_hash);
+ static_bfd_initialize(zclient, master);
}
/* static_zebra_stop used by tests/lib/test_grpc.cpp */
endif
staticd_libstatic_a_SOURCES = \
+ staticd/static_bfd.c \
staticd/static_debug.c \
staticd/static_nht.c \
staticd/static_routes.c \
staticd_staticd_LDADD = staticd/libstatic.a lib/libfrr.la $(LIBCAP)
nodist_staticd_staticd_SOURCES = \
+ yang/frr-bfdd.yang.c \
yang/frr-staticd.yang.c \
# end
--- /dev/null
+{
+ "path-list": {
+ "ipv4-multicast": [],
+ "ipv4-unicast": [],
+ "ipv6-unicast": [
+ {
+ "prefix": "2001:db8:5::\/64",
+ "vrf": "default",
+ "installed": false
+ }
+ ]
+ }}
--- /dev/null
+{
+ "path-list": {
+ "ipv4-multicast": [],
+ "ipv4-unicast": [],
+ "ipv6-unicast": [
+ {
+ "prefix": "2001:db8:5::\/64",
+ "vrf": "default",
+ "installed": true
+ }
+ ]
+ }
+}
--- /dev/null
+ipv6 route 2001:db8:5::/64 2001:db8:4::3 bfd multi-hop profile slow-tx
"remote-transmit-interval": 2000,
"status": "up",
"uptime": "*",
- "transmit-interval": 2000
+ "transmit-interval": 2000,
+ "vrf": "default"
},
{
"detect-multiplier": 3,
"remote-transmit-interval": 2000,
"status": "up",
"uptime": "*",
- "transmit-interval": 2000
+ "transmit-interval": 2000,
+ "vrf": "default"
+ },
+ {
+ "detect-multiplier": 3,
+ "diagnostic": "ok",
+ "echo-receive-interval": 50,
+ "echo-transmit-interval": 0,
+ "id": "*",
+ "multihop": false,
+ "passive-mode": false,
+ "peer": "192.168.4.3",
+ "receive-interval": 2000,
+ "remote-detect-multiplier": 3,
+ "remote-diagnostic": "ok",
+ "remote-echo-receive-interval": 50,
+ "remote-id": "*",
+ "remote-receive-interval": 2000,
+ "remote-transmit-interval": 2000,
+ "status": "up",
+ "transmit-interval": 2000,
+ "uptime": "*",
+ "vrf": "default"
+ },
+ {
+ "detect-multiplier": 3,
+ "diagnostic": "ok",
+ "echo-receive-interval": 50,
+ "echo-transmit-interval": 0,
+ "id": "*",
+ "multihop": false,
+ "passive-mode": false,
+ "peer": "192.168.4.2",
+ "receive-interval": 2000,
+ "remote-detect-multiplier": 3,
+ "remote-diagnostic": "ok",
+ "remote-echo-receive-interval": 50,
+ "remote-id": "*",
+ "remote-receive-interval": 2000,
+ "remote-transmit-interval": 2000,
+ "status": "up",
+ "transmit-interval": 2000,
+ "uptime": "*",
+ "vrf": "default"
}
]
neighbor 2001:db8:1::1 bfd profile slow-tx-mh
address-family ipv4 unicast
redistribute connected
+ redistribute static
exit-address-family
address-family ipv6 unicast
redistribute connected
--- /dev/null
+ip route 10.254.254.5/32 192.168.4.2 bfd profile slow-tx
+ip route 10.254.254.6/32 192.168.4.3 bfd profile slow-tx
ip address 192.168.3.1/24
ipv6 address 2001:db8:3::1/64
!
+interface r4-eth1
+ ip address 192.168.4.1/24
+ ipv6 address 2001:db8:4::1/64
+!
--- /dev/null
+[
+ {
+ "detect-multiplier": 3,
+ "diagnostic": "ok",
+ "echo-receive-interval": 50,
+ "echo-transmit-interval": 0,
+ "id": "*",
+ "multihop": false,
+ "passive-mode": false,
+ "peer": "192.168.4.1",
+ "receive-interval": 2000,
+ "remote-detect-multiplier": 3,
+ "remote-diagnostic": "ok",
+ "remote-echo-receive-interval": 50,
+ "remote-id": "*",
+ "remote-receive-interval": 2000,
+ "remote-transmit-interval": 2000,
+ "status": "up",
+ "transmit-interval": 2000,
+ "uptime": "*",
+ "vrf": "default"
+ }
+]
--- /dev/null
+debug bfd network
+debug bfd peer
+debug bfd zebra
+!
+bfd
+ profile slow-tx
+ receive-interval 2000
+ transmit-interval 2000
+ minimum-ttl 250
+ !
+!
--- /dev/null
+ip route 0.0.0.0/0 192.168.4.1
+ip route 10.254.254.4/32 192.168.4.1 bfd profile slow-tx
--- /dev/null
+ip forwarding
+ipv6 forwarding
+!
+interface lo
+ ip address 10.254.254.5/32
+!
+interface r5-eth0
+ ip address 192.168.4.2/24
+ ipv6 address 2001:db8:4::2/64
+!
--- /dev/null
+[
+ {
+ "detect-multiplier": 3,
+ "diagnostic": "ok",
+ "echo-receive-interval": 50,
+ "echo-transmit-interval": 0,
+ "id": "*",
+ "multihop": false,
+ "passive-mode": false,
+ "peer": "192.168.4.1",
+ "receive-interval": 2000,
+ "remote-detect-multiplier": 3,
+ "remote-diagnostic": "ok",
+ "remote-echo-receive-interval": 50,
+ "remote-id": "*",
+ "remote-receive-interval": 2000,
+ "remote-transmit-interval": 2000,
+ "status": "up",
+ "transmit-interval": 2000,
+ "uptime": "*",
+ "vrf": "default"
+ },
+ {
+ "detect-multiplier": 3,
+ "diagnostic": "ok",
+ "echo-receive-interval": 50,
+ "echo-transmit-interval": 0,
+ "id": "*",
+ "local": "2001:db8:4::3",
+ "minimum-ttl": 2,
+ "multihop": true,
+ "passive-mode": false,
+ "peer": "2001:db8:3::2",
+ "receive-interval": 2000,
+ "remote-detect-multiplier": 3,
+ "remote-diagnostic": "ok",
+ "remote-echo-receive-interval": 50,
+ "remote-id": "*",
+ "remote-receive-interval": 2000,
+ "remote-transmit-interval": 2000,
+ "status": "up",
+ "transmit-interval": 2000,
+ "uptime": "*",
+ "vrf": "default"
+ }
+]
--- /dev/null
+{
+ "path-list": {
+ "ipv4-multicast": [],
+ "ipv4-unicast": [
+ {
+ "installed": true,
+ "prefix": "10.254.254.4/32",
+ "vrf": "default"
+ }
+ ],
+ "ipv6-unicast": [
+ {
+ "prefix": "2001:db8:1::\/64",
+ "vrf": "default",
+ "installed": false
+ }
+ ]
+ }
+}
--- /dev/null
+{
+ "path-list": {
+ "ipv4-multicast": [],
+ "ipv4-unicast": [
+ {
+ "installed": true,
+ "prefix": "10.254.254.4/32",
+ "vrf": "default"
+ }
+ ],
+ "ipv6-unicast": [
+ {
+ "prefix": "2001:db8:1::\/64",
+ "vrf": "default",
+ "installed": true
+ }
+ ]
+ }
+}
--- /dev/null
+debug bfd network
+debug bfd peer
+debug bfd zebra
+!
+bfd
+ profile slow-tx
+ receive-interval 2000
+ transmit-interval 2000
+ minimum-ttl 250
+ !
+!
--- /dev/null
+ip route 0.0.0.0/0 192.168.4.1
+ip route 10.254.254.4/32 192.168.4.1 bfd profile slow-tx
+!
+ipv6 route 2001:db8:3::/64 2001:db8:4::1
+ipv6 route 2001:db8:1::/64 2001:db8:3::2 bfd multi-hop source 2001:db8:4::3 profile slow-tx
--- /dev/null
+ip forwarding
+ipv6 forwarding
+!
+interface lo
+ ip address 10.254.254.6/32
+!
+interface r6-eth0
+ ip address 192.168.4.3/24
+ ipv6 address 2001:db8:4::3/64
+!
fillcolor="#f08080",
style=filled,
];
+ r5 [
+ shape=doubleoctagon
+ label="r5",
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r6 [
+ shape=doubleoctagon
+ label="r6",
+ fillcolor="#f08080",
+ style=filled,
+ ];
# Switches
sw1 [
fillcolor="#d0e0d0",
style=filled,
];
+ sw4 [
+ shape=oval,
+ label="sw4\n192.168.4.0/24\n2001:db8:4::/64",
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
# Connections
r1 -- sw1 [label="eth0\n.1"];
r4 -- sw3 [label="eth0\n.1"];
r3 -- sw3 [label="eth2\n.2"];
+
+ r4 -- sw4 [label="eth1\n.1"];
+ r5 -- sw4 [label="eth0\n.2"];
+ r6 -- sw4 [label="eth0\n.3"];
}
"s1": ("r1", "r2"),
"s2": ("r2", "r3"),
"s3": ("r3", "r4"),
+ "s4": ("r4", "r5", "r6"),
}
tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
if os.path.isfile(daemon_file):
router.load_config(TopoRouter.RD_BGP, daemon_file)
+ daemon_file = "{}/{}/staticd.conf".format(CWD, rname)
+ if os.path.isfile(daemon_file):
+ router.load_config(TopoRouter.RD_STATIC, daemon_file)
+
# Initialize all routers.
tgen.start_router()
expect_loopback_route("r1", "ip", "10.254.254.3/32", "bgp")
# Wait for R1 <-> R4 convergence.
expect_loopback_route("r1", "ip", "10.254.254.4/32", "bgp")
+ # Wait for R1 <-> R5 convergence.
+ expect_loopback_route("r1", "ip", "10.254.254.5/32", "bgp")
+ # Wait for R1 <-> R6 convergence.
+ expect_loopback_route("r1", "ip", "10.254.254.6/32", "bgp")
# Wait for R2 <-> R1 convergence.
expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp")
expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp")
# Wait for R2 <-> R4 convergence.
expect_loopback_route("r2", "ip", "10.254.254.4/32", "bgp")
+ # Wait for R2 <-> R5 convergence.
+ expect_loopback_route("r2", "ip", "10.254.254.5/32", "bgp")
+ # Wait for R2 <-> R6 convergence.
+ expect_loopback_route("r2", "ip", "10.254.254.6/32", "bgp")
# Wait for R3 <-> R1 convergence.
expect_loopback_route("r3", "ip", "10.254.254.1/32", "bgp")
expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp")
# Wait for R3 <-> R4 convergence.
expect_loopback_route("r3", "ip", "10.254.254.4/32", "bgp")
+ # Wait for R3 <-> R5 convergence.
+ expect_loopback_route("r3", "ip", "10.254.254.5/32", "bgp")
+ # Wait for R3 <-> R6 convergence.
+ expect_loopback_route("r3", "ip", "10.254.254.6/32", "bgp")
# Wait for R4 <-> R1 convergence.
expect_loopback_route("r4", "ip", "10.254.254.1/32", "bgp")
expect_loopback_route("r4", "ip", "10.254.254.2/32", "bgp")
# Wait for R4 <-> R3 convergence.
expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp")
+ # Wait for R4 <-> R5 convergence.
+ expect_loopback_route("r4", "ip", "10.254.254.5/32", "static")
+ # Wait for R4 <-> R6 convergence.
+ expect_loopback_route("r4", "ip", "10.254.254.6/32", "static")
+
+ # Wait for R5 <-> R6 convergence.
+ expect_loopback_route("r3", "ipv6", "2001:db8:5::/64", "static")
+ # Wait for R6 <-> R5 convergence.
+ expect_loopback_route("r6", "ipv6", "2001:db8:1::/64", "static")
def test_wait_bfd_convergence():
expect_bfd_configuration("r2")
expect_bfd_configuration("r3")
expect_bfd_configuration("r4")
+ expect_bfd_configuration("r5")
+ expect_bfd_configuration("r6")
+
+
+def test_static_route_monitoring():
+ "Test static route monitoring output."
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("test BFD static route status")
+
+ def expect_static_bfd_output(router, filename):
+ "Load JSON file and compare with 'show bfd peer json'"
+ logger.info("waiting BFD configuration on router {}".format(router))
+ bfd_config = json.loads(
+ open("{}/{}/{}.json".format(CWD, router, filename)).read()
+ )
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router],
+ "show bfd static route json",
+ bfd_config,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assertmsg = '"{}" BFD static route status failure'.format(router)
+ assert result is None, assertmsg
+
+ expect_static_bfd_output("r3", "bfd-static")
+ expect_static_bfd_output("r6", "bfd-static")
+
+ logger.info("Setting r4 link down ...")
+
+ tgen.gears["r4"].link_enable("r4-eth0", False)
+
+ expect_static_bfd_output("r3", "bfd-static-down")
+ expect_static_bfd_output("r6", "bfd-static-down")
+
+
+def test_expect_static_rib_removal():
+ "Test that route got removed from RIB (staticd and bgpd)."
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def expect_route_missing(router, iptype, route):
+ "Wait until route is present on RIB for protocol."
+ logger.info("waiting route {} to disapear in {}".format(route, router))
+ test_func = partial(
+ topotest.router_json_cmp,
+ tgen.gears[router],
+ "show {} route json".format(iptype),
+ {route: None},
+ )
+ rv, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assertmsg = '"{}" convergence failure'.format(router)
+ assert result is None, assertmsg
+
+ expect_route_missing("r1", "ip", "10.254.254.5/32")
+ expect_route_missing("r2", "ip", "10.254.254.5/32")
+ expect_route_missing("r3", "ip", "10.254.254.5/32")
+ expect_route_missing("r3", "ipv6", "2001:db8:5::/64")
+ expect_route_missing("r6", "ipv6", "2001:db8:1::/64")
def teardown_module(_mod):
--- /dev/null
+[exabgp.api]
+encoder = text
+highres = false
+respawn = false
+socket = ''
+
+[exabgp.bgp]
+openwait = 60
+
+[exabgp.cache]
+attributes = true
+nexthops = true
+
+[exabgp.daemon]
+daemonize = true
+pid = '/var/run/exabgp/exabgp.pid'
+user = 'exabgp'
+##daemonize = false
+
+[exabgp.log]
+all = false
+configuration = true
+daemon = true
+destination = '/var/log/exabgp.log'
+enable = true
+level = INFO
+message = false
+network = true
+packets = false
+parser = false
+processes = true
+reactor = true
+rib = false
+routes = false
+short = false
+timers = false
+
+[exabgp.pdb]
+enable = false
+
+[exabgp.profile]
+enable = false
+file = ''
+
+[exabgp.reactor]
+speed = 1.0
+
+[exabgp.tcp]
+acl = false
+bind = ''
+delay = 0
+once = false
+port = 179
--- /dev/null
+neighbor 10.0.0.1 {
+ router-id 10.0.0.2;
+ local-address 10.0.0.2;
+ local-as 65001;
+ peer-as 65002;
+
+ capability {
+ route-refresh;
+ }
+
+ static {
+ route 192.168.100.101/32 {
+ atomic-aggregate;
+ community 65001:101;
+ next-hop 10.0.0.2;
+ }
+
+ route 192.168.100.102/32 {
+ originator-id 10.0.0.2;
+ community 65001:102;
+ next-hop 10.0.0.2;
+ }
+ }
+}
--- /dev/null
+!
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 10.0.0.2 remote-as external
+ neighbor 10.0.0.2 timers 3 10
+!
--- /dev/null
+!
+interface r1-eth0
+ ip address 10.0.0.1/24
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2022 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if `neighbor path-attribute discard` command works correctly,
+can discard unwanted attributes from UPDATE messages, and ignore them
+by continuing to process UPDATE messages.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ r1 = tgen.add_router("r1")
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(r1)
+ switch.add_link(peer1)
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router = tgen.gears["r1"]
+ router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, "r1/zebra.conf"))
+ router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, "r1/bgpd.conf"))
+ router.start()
+
+ peer = tgen.gears["peer1"]
+ peer.start(os.path.join(CWD, "peer1"), os.path.join(CWD, "exabgp.env"))
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_path_attribute_discard():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ def _bgp_converge():
+ output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast json detail"))
+ expected = {
+ "routes": {
+ "192.168.100.101/32": [
+ {
+ "valid": True,
+ "atomicAggregate": True,
+ "community": {
+ "string": "65001:101",
+ },
+ }
+ ],
+ "192.168.100.102/32": [
+ {
+ "valid": True,
+ "originatorId": "10.0.0.2",
+ "community": {
+ "string": "65001:102",
+ },
+ }
+ ],
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed bgp convergence"
+
+ step("Discard atomic-aggregate, community, and originator-id attributes from peer1")
+ r1.vtysh_cmd(
+ """
+ configure terminal
+ router bgp
+ neighbor 10.0.0.2 path-attribute discard 6 8 9
+ """
+ )
+
+ def _bgp_check_if_attributes_discarded():
+ output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast json detail"))
+ expected = {
+ "routes": {
+ "192.168.100.101/32": [
+ {
+ "valid": True,
+ "atomicAggregate": None,
+ "community": None,
+ }
+ ],
+ "192.168.100.102/32": [
+ {
+ "valid": True,
+ "originatorId": None,
+ "community": None,
+ }
+ ],
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_if_attributes_discarded)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert (
+ result is None
+ ), "Failed to discard path attributes (atomic-aggregate, community, and originator-id)"
+
+
+def test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
! exit1
router bgp 65001
- no bgp ebgp-requires-policy
- neighbor 192.168.255.1 remote-as 65002
- neighbor 192.168.255.1 timers 3 10
- address-family ipv4 unicast
- neighbor 192.168.255.1 route-map prepend out
- redistribute connected
- exit-address-family
- !
+ no bgp ebgp-requires-policy
+ neighbor 192.168.255.1 remote-as 65002
+ neighbor 192.168.255.1 timers 3 10
+ address-family ipv4 unicast
+ neighbor 192.168.255.1 route-map prepend out
+ redistribute connected
+ exit-address-family
+ !
+!
+ip prefix-list p1 seq 5 permit 172.16.255.253/32
!
route-map prepend permit 10
- set as-path prepend 65003
+ match ip address prefix-list p1
+ set as-path prepend 65003
+!
+route-map prepend permit 20
!
! exit1
interface lo
+ ip address 172.16.255.253/32
ip address 172.16.255.254/32
!
interface r1-eth0
! spine
router bgp 65002
- no bgp ebgp-requires-policy
- neighbor 192.168.255.2 remote-as 65001
- neighbor 192.168.255.2 timers 3 10
- neighbor 192.168.255.2 solo
- neighbor 192.168.254.2 remote-as 65003
- neighbor 192.168.254.2 timers 3 10
- neighbor 192.168.254.2 solo
- neighbor 192.168.254.2 sender-as-path-loop-detection
+ no bgp ebgp-requires-policy
+ neighbor 192.168.255.2 remote-as 65001
+ neighbor 192.168.255.2 timers 3 10
+ neighbor 192.168.255.2 sender-as-path-loop-detection
+ neighbor 192.168.254.2 remote-as 65003
+ neighbor 192.168.254.2 timers 3 10
+ neighbor 192.168.254.2 sender-as-path-loop-detection
!
! exit2
router bgp 65003
- no bgp ebgp-requires-policy
- neighbor 192.168.254.1 remote-as 65002
- neighbor 192.168.254.1 timers 3 10
+ no bgp ebgp-requires-policy
+ neighbor 192.168.254.1 remote-as 65002
+ neighbor 192.168.254.1 timers 3 10
!
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears["r2"]
+ r2 = tgen.gears["r2"]
- def _bgp_converge(router):
- output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json"))
+ def _bgp_converge():
+ output = json.loads(r2.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json"))
expected = {
"192.168.255.2": {
"bgpState": "Established",
- "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 3}},
}
}
return topotest.json_cmp(output, expected)
- def _bgp_has_route_from_r1(router):
- output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json"))
+ def _bgp_has_route_from_r1():
+ output = json.loads(r2.vtysh_cmd("show ip bgp 172.16.255.253/32 json"))
expected = {
"paths": [
{
}
return topotest.json_cmp(output, expected)
- def _bgp_suppress_route_to_r3(router):
+ def _bgp_suppress_route_to_r1():
output = json.loads(
- router.vtysh_cmd(
- "show ip bgp neighbor 192.168.254.2 advertised-routes json"
- )
+ r2.vtysh_cmd("show ip bgp neighbor 192.168.255.2 advertised-routes json")
)
expected = {"totalPrefixCounter": 0}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_bgp_converge, router)
- success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
-
- assert result is None, 'Failed bgp convergence in "{}"'.format(router)
+ def _bgp_suppress_route_to_r3():
+ output = json.loads(
+ r2.vtysh_cmd("show ip bgp neighbor 192.168.254.2 advertised-routes json")
+ )
+ expected = {"totalPrefixCounter": 2}
+ return topotest.json_cmp(output, expected)
- test_func = functools.partial(_bgp_has_route_from_r1, router)
- success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed bgp to convergence"
- assert result is None, 'Failed to see a route from r1 in "{}"'.format(router)
+ test_func = functools.partial(_bgp_has_route_from_r1)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Failed to see a route from r1"
- test_func = functools.partial(_bgp_suppress_route_to_r3, router)
- success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ test_func = functools.partial(_bgp_suppress_route_to_r3)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Route 172.16.255.253/32 should not be sent to r3 from r2"
- assert (
- result is None
- ), 'Route 172.16.255.254/32 should not be sent to r3 "{}"'.format(router)
+ test_func = functools.partial(_bgp_suppress_route_to_r1)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
+ assert result is None, "Routes should not be sent to r1 from r2"
if __name__ == "__main__":
# Table of defaults, used for timeout values and 'expected' objects
scale_defaults = dict(
- zip(scale_keys, [None, None, 7, 30, expected_installed, expected_removed])
+ zip(scale_keys, [None, None, 10, 50, expected_installed, expected_removed])
)
# List of params for each step in the test; note extra time given
"bgpd",
"fabricd",
"isisd",
+ "babeld",
"ospf6d",
"ospfd",
"pbrd",
"staticd",
"vrrpd",
"ldpd",
+ "nhrpd",
"pathd",
"bfdd",
"eigrpd",
}
}
+ grouping bfd-monitoring {
+ description
+ "BFD monitoring template for protocol integration.";
+
+ leaf source {
+ type inet:ip-address;
+ description
+ "Source address to use for liveness check.
+
+ When source is not set and multi-hop is `false` the source
+ address will be `0.0.0.0` (any).
+
+ When source is not set and multi-hop is `true` the source
+ address will be automatic selected through Next Hop Tracking (NHT).";
+ }
+
+ leaf multi-hop {
+ description
+ "Use multi hop session instead of single hop.";
+ type boolean;
+ default false;
+ }
+
+ leaf profile {
+ description
+ "BFD pre configured profile.";
+ type frr-bfdd:profile-ref;
+ }
+ }
+
grouping session-states {
/*
* Local settings.
prefix inet;
}
+ import frr-bfdd {
+ prefix frr-bfdd;
+ }
+
organization
"FRRouting";
contact
"AFI-SAFI type.";
}
- uses staticd-prefix-attributes;
+ uses staticd-prefix-attributes {
+ augment "path-list/frr-nexthops/nexthop" {
+ container bfd-monitoring {
+ description "BFD monitoring options.";
+ presence
+ "Present if BFD configuration is available.";
+
+ when "../nh-type = 'ip4' or ../nh-type = 'ip4-ifindex' or
+ ../nh-type = 'ip6' or ../nh-type = 'ip6-ifindex'";
+ uses frr-bfdd:bfd-monitoring;
+ }
+ }
+ }
list src-list {
key "src-prefix";
#define ZEBRA_IF_PTM_ENABLE_ON 1
#define ZEBRA_IF_PTM_ENABLE_UNSPEC 2
-#define IS_BFD_ENABLED_PROTOCOL(protocol) ( \
- (protocol) == ZEBRA_ROUTE_BGP || \
- (protocol) == ZEBRA_ROUTE_OSPF || \
- (protocol) == ZEBRA_ROUTE_OSPF6 || \
- (protocol) == ZEBRA_ROUTE_ISIS || \
- (protocol) == ZEBRA_ROUTE_PIM || \
- (protocol) == ZEBRA_ROUTE_OPENFABRIC \
-)
+#define IS_BFD_ENABLED_PROTOCOL(protocol) \
+ ((protocol) == ZEBRA_ROUTE_BGP || (protocol) == ZEBRA_ROUTE_OSPF || \
+ (protocol) == ZEBRA_ROUTE_OSPF6 || (protocol) == ZEBRA_ROUTE_ISIS || \
+ (protocol) == ZEBRA_ROUTE_PIM || \
+ (protocol) == ZEBRA_ROUTE_OPENFABRIC || \
+ (protocol) == ZEBRA_ROUTE_STATIC)
void zebra_ptm_init(void);
void zebra_ptm_finish(void);