#define BABEL_UNICAST_HELLO 0x8000
DO_NTOHS(flags, message + 2);
- /*
- * RFC 8966 4.6.5
- * All other bits MUST be sent as a 0 and silently
- * ignored on reception
- */
- if (CHECK_FLAG(flags, ~BABEL_UNICAST_HELLO)) {
- debugf(BABEL_DEBUG_COMMON,
- "Received Hello from %s on %s that does not have all 0's in the unused section of flags, ignoring",
- format_address(from), ifp->name);
- goto done;
- }
-
/*
* RFC 8966 Appendix F
* TL;DR -> Please ignore Unicast hellos until FRR's
enum ecommunity_token {
ecommunity_token_unknown = 0,
ecommunity_token_rt,
+ ecommunity_token_nt,
ecommunity_token_soo,
ecommunity_token_val,
ecommunity_token_rt6,
(void)ptr; /* consume value */
}
+bool ecommunity_node_target_match(struct ecommunity *ecom,
+ struct in_addr *local_id)
+{
+ uint32_t i;
+ bool match = false;
+
+ if (!ecom || !ecom->size)
+ return NULL;
+
+ for (i = 0; i < ecom->size; i++) {
+ const uint8_t *pnt;
+ uint8_t type, sub_type;
+
+ pnt = (ecom->val + (i * ECOMMUNITY_SIZE));
+ type = *pnt++;
+ sub_type = *pnt++;
+
+ if (type == ECOMMUNITY_ENCODE_IP &&
+ sub_type == ECOMMUNITY_NODE_TARGET) {
+ /* Node Target ID is encoded as A.B.C.D:0 */
+ if (IPV4_ADDR_SAME((struct in_addr *)pnt, local_id))
+ match = true;
+ (void)pnt;
+ }
+ }
+
+ return match;
+}
+
+static void ecommunity_node_target_str(char *buf, size_t bufsz, uint8_t *ptr)
+{
+ /*
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | 0x01 or 0x41 | Sub-Type(0x09) | Target BGP Identifier |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Target BGP Identifier (cont.) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ struct in_addr node_id = {};
+
+ IPV4_ADDR_COPY(&node_id, (struct in_addr *)ptr);
+
+ snprintfrr(buf, bufsz, "NT:%pI4", &node_id);
+
+ (void)ptr; /* consume value */
+}
+
static int ecommunity_encode_internal(uint8_t type, uint8_t sub_type,
int trans, as_t as,
struct in_addr *ip,
eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE;
eval->val[1] = sub_type;
if (type == ECOMMUNITY_ENCODE_AS) {
- eval->val[2] = (as >> 8) & 0xff;
- eval->val[3] = as & 0xff;
- eval->val[4] = (val >> 24) & 0xff;
- eval->val[5] = (val >> 16) & 0xff;
- eval->val[6] = (val >> 8) & 0xff;
- eval->val[7] = val & 0xff;
+ encode_route_target_as(as, val, eval, trans);
} else if (type == ECOMMUNITY_ENCODE_IP) {
- memcpy(&eval->val[2], ip, sizeof(struct in_addr));
- eval->val[6] = (val >> 8) & 0xff;
- eval->val[7] = val & 0xff;
+ if (sub_type == ECOMMUNITY_NODE_TARGET)
+ encode_node_target(ip, eval, trans);
+ else
+ encode_route_target_ip(ip, val, eval, trans);
} else if (type == ECOMMUNITY_ENCODE_TRANS_EXP &&
sub_type == ECOMMUNITY_FLOWSPEC_REDIRECT_IPV6) {
memcpy(&eval6->val[2], ip6, sizeof(struct in6_addr));
eval6->val[18] = (val >> 8) & 0xff;
eval6->val[19] = val & 0xff;
} else {
- eval->val[2] = (as >> 24) & 0xff;
- eval->val[3] = (as >> 16) & 0xff;
- eval->val[4] = (as >> 8) & 0xff;
- eval->val[5] = as & 0xff;
- eval->val[6] = (val >> 8) & 0xff;
- eval->val[7] = val & 0xff;
+ encode_route_target_as4(as, val, eval, trans);
}
return 0;
}
/* Get next Extended Communities token from the string. */
-static const char *ecommunity_gettoken(const char *str,
- void *eval_ptr,
- enum ecommunity_token *token)
+static const char *ecommunity_gettoken(const char *str, void *eval_ptr,
+ enum ecommunity_token *token, int type)
{
int ret;
int dot = 0;
if (*p == '\0')
return NULL;
- /* "rt" and "soo" keyword parse. */
+ /* "rt", "nt", and "soo" keyword parse. */
if (!isdigit((unsigned char)*p)) {
/* "rt" match check. */
if (tolower((unsigned char)*p) == 'r') {
}
goto error;
}
+ /* "nt" match check. */
+ if (tolower((unsigned char)*p) == 'n') {
+ p++;
+ if (tolower((unsigned char)*p) == 't') {
+ p++;
+ *token = ecommunity_token_nt;
+ return p;
+ }
+ if (isspace((unsigned char)*p) || *p == '\0') {
+ *token = ecommunity_token_nt;
+ return p;
+ }
+ goto error;
+ }
/* "soo" match check. */
else if (tolower((unsigned char)*p) == 's') {
p++;
ecomm_type = ECOMMUNITY_ENCODE_AS4;
else
ecomm_type = ECOMMUNITY_ENCODE_AS;
- if (ecommunity_encode(ecomm_type, 0, 1, as, ip, val, eval))
+ if (ecommunity_encode(ecomm_type, type, 1, as, ip, val, eval))
goto error;
*token = ecommunity_token_val;
return p;
if (is_ipv6_extcomm)
token = ecommunity_token_rt6;
- while ((str = ecommunity_gettoken(str, (void *)&eval, &token))) {
+ while ((str = ecommunity_gettoken(str, (void *)&eval, &token, type))) {
switch (token) {
case ecommunity_token_rt:
+ case ecommunity_token_nt:
case ecommunity_token_rt6:
case ecommunity_token_soo:
if (!keyword_included || keyword) {
if (token == ecommunity_token_soo) {
type = ECOMMUNITY_SITE_ORIGIN;
}
+ if (token == ecommunity_token_nt) {
+ type = ECOMMUNITY_NODE_TARGET;
+ }
break;
case ecommunity_token_val:
if (keyword_included) {
ecommunity_lb_str(
encbuf, sizeof(encbuf), pnt,
ecom->disable_ieee_floating);
+ } else if (sub_type == ECOMMUNITY_NODE_TARGET &&
+ type == ECOMMUNITY_ENCODE_IP) {
+ ecommunity_node_target_str(
+ encbuf, sizeof(encbuf), pnt);
} else
unk_ecom = 1;
} else {
ecom->disable_ieee_floating);
else
unk_ecom = 1;
+ } else if (type == ECOMMUNITY_ENCODE_IP_NON_TRANS) {
+ sub_type = *pnt++;
+ if (sub_type == ECOMMUNITY_NODE_TARGET)
+ ecommunity_node_target_str(encbuf,
+ sizeof(encbuf), pnt);
+ else
+ unk_ecom = 1;
} else if (type == ECOMMUNITY_ENCODE_OPAQUE_NON_TRANS) {
sub_type = *pnt++;
if (sub_type == ECOMMUNITY_ORIGIN_VALIDATION_STATE)
/* Extended Community readable string length */
#define ECOMMUNITY_STRLEN 64
+/* Node Target Extended Communities */
+#define ECOMMUNITY_NODE_TARGET 0x09
+#define ECOMMUNITY_NODE_TARGET_RESERVED 0
+
/* Extended Communities attribute. */
struct ecommunity {
/* Reference counter. */
* Encode BGP Route Target AS:nn.
*/
static inline void encode_route_target_as(as_t as, uint32_t val,
- struct ecommunity_val *eval)
+ struct ecommunity_val *eval,
+ bool trans)
{
eval->val[0] = ECOMMUNITY_ENCODE_AS;
+ if (!trans)
+ eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE;
eval->val[1] = ECOMMUNITY_ROUTE_TARGET;
eval->val[2] = (as >> 8) & 0xff;
eval->val[3] = as & 0xff;
/*
* Encode BGP Route Target IP:nn.
*/
-static inline void encode_route_target_ip(struct in_addr ip, uint16_t val,
- struct ecommunity_val *eval)
+static inline void encode_route_target_ip(struct in_addr *ip, uint16_t val,
+ struct ecommunity_val *eval,
+ bool trans)
{
eval->val[0] = ECOMMUNITY_ENCODE_IP;
+ if (!trans)
+ eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE;
eval->val[1] = ECOMMUNITY_ROUTE_TARGET;
- memcpy(&eval->val[2], &ip, sizeof(struct in_addr));
+ memcpy(&eval->val[2], ip, sizeof(struct in_addr));
eval->val[6] = (val >> 8) & 0xff;
eval->val[7] = val & 0xff;
}
* Encode BGP Route Target AS4:nn.
*/
static inline void encode_route_target_as4(as_t as, uint16_t val,
- struct ecommunity_val *eval)
+ struct ecommunity_val *eval,
+ bool trans)
{
eval->val[0] = ECOMMUNITY_ENCODE_AS4;
+ if (!trans)
+ eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE;
eval->val[1] = ECOMMUNITY_ROUTE_TARGET;
eval->val[2] = (as >> 24) & 0xff;
eval->val[3] = (as >> 16) & 0xff;
eval->val[7] = ovs_state;
}
+static inline void encode_node_target(struct in_addr *node_id,
+ struct ecommunity_val *eval, bool trans)
+{
+ /*
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | 0x01 or 0x41 | Sub-Type(0x09) | Target BGP Identifier |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Target BGP Identifier (cont.) | Reserved |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ memset(eval, 0, sizeof(*eval));
+ eval->val[0] = ECOMMUNITY_ENCODE_IP;
+ if (!trans)
+ eval->val[0] |= ECOMMUNITY_ENCODE_IP_NON_TRANS;
+ eval->val[1] = ECOMMUNITY_NODE_TARGET;
+ memcpy(&eval->val[2], node_id, sizeof(*node_id));
+ eval->val[6] = ECOMMUNITY_NODE_TARGET_RESERVED;
+ eval->val[7] = ECOMMUNITY_NODE_TARGET_RESERVED;
+}
+
extern void ecommunity_init(void);
extern void ecommunity_finish(void);
extern void ecommunity_free(struct ecommunity **);
extern struct ecommunity *
ecommunity_add_origin_validation_state(enum rpki_states rpki_state,
struct ecommunity *ecom);
+extern struct ecommunity *ecommunity_add_node_target(struct in_addr *node_id,
+ struct ecommunity *old,
+ bool non_trans);
+extern bool ecommunity_node_target_match(struct ecommunity *ecomm,
+ struct in_addr *local_id);
#endif /* _QUAGGA_BGP_ECOMMUNITY_H */
if (bgp->advertise_autort_rfc8365)
vni |= EVPN_AUTORT_VXLAN;
- encode_route_target_as((bgp->as & 0xFFFF), vni, &eval);
+ encode_route_target_as((bgp->as & 0xFFFF), vni, &eval, true);
ecomadd = ecommunity_new();
ecommunity_add_val(ecomadd, &eval, false, false);
if (bgp->advertise_autort_rfc8365)
vni |= EVPN_AUTORT_VXLAN;
- encode_route_target_as((bgp->as & 0xFFFF), vni, &eval);
+ encode_route_target_as((bgp->as & 0xFFFF), vni, &eval, true);
ecom_auto = ecommunity_new();
ecommunity_add_val(ecom_auto, &eval, false, false);
struct bgp *bgp_vrf = VTY_GET_CONTEXT(bgp); /* bgp vrf instance */
struct bgp *bgp_evpn = NULL;
- if (EVPN_ENABLED(bgp_vrf)) {
+ if (!bgp_vrf || EVPN_ENABLED(bgp_vrf)) {
vty_out(vty,
"This command is supported under L3VNI BGP EVPN VRF\n");
return CMD_WARNING_CONFIG_FAILED;
}
} else
return SNMP_INTEGER(MPLSL3VPNVRFRTECIDRTYPEOTHER);
+ break;
case MPLSL3VPNVRFRTEINETCIDRPROTO:
switch (pi->type) {
case ZEBRA_ROUTE_CONNECT:
goto filtered;
}
+ /* If the route has Node Target Extended Communities, check
+ * if it's allowed to be installed locally.
+ */
+ if ((attr->flag & ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES))) {
+ struct ecommunity *ecomm = bgp_attr_get_ecommunity(attr);
+
+ if (ecommunity_lookup(ecomm, ECOMMUNITY_ENCODE_IP,
+ ECOMMUNITY_NODE_TARGET) &&
+ !ecommunity_node_target_match(ecomm, &peer->local_id)) {
+ reason =
+ "Node-Target Extended Communities do not contain own BGP Identifier;";
+ goto filtered;
+ }
+ }
+
/* RFC 8212 to prevent route leaks.
* This specification intends to improve this situation by requiring the
* explicit configuration of both BGP Import and Export Policies for any
bgp_static->label = label;
bgp_static->prd = prd;
- if (rd_str)
- bgp_static->prd_pretty = XSTRDUP(MTYPE_BGP, rd_str);
+ bgp_static->prd_pretty = XSTRDUP(MTYPE_BGP, rd_str);
+
if (rmap_str) {
XFREE(MTYPE_ROUTE_MAP_NAME, bgp_static->rmap.name);
route_map_counter_decrement(bgp_static->rmap.map);
route_set_ecommunity_free,
};
+static void *route_set_ecommunity_nt_compile(const char *arg)
+{
+ struct rmap_ecom_set *rcs;
+ struct ecommunity *ecom;
+
+ ecom = ecommunity_str2com(arg, ECOMMUNITY_NODE_TARGET, 0);
+ if (!ecom)
+ return NULL;
+
+ rcs = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(struct rmap_ecom_set));
+ rcs->ecom = ecommunity_intern(ecom);
+ rcs->none = false;
+
+ return rcs;
+}
+
+static const struct route_map_rule_cmd route_set_ecommunity_nt_cmd = {
+ "extcommunity nt",
+ route_set_ecommunity,
+ route_set_ecommunity_nt_compile,
+ route_set_ecommunity_free,
+};
+
/* `set extcommunity bandwidth' */
struct rmap_ecomm_lb_set {
"BGP extended community attribute\n"
"Link bandwidth extended community\n")
+DEFPY_YANG (set_ecommunity_nt,
+ set_ecommunity_nt_cmd,
+ "set extcommunity nt RTLIST...",
+ SET_STR
+ "BGP extended community attribute\n"
+ "Node Target extended community\n"
+ "Node Target ID\n")
+{
+ int idx_nt = 3;
+ char *str;
+ int ret;
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-extcommunity-nt']";
+ char xpath_value[XPATH_MAXLEN];
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ snprintf(xpath_value, sizeof(xpath_value),
+ "%s/rmap-set-action/frr-bgp-route-map:extcommunity-nt", xpath);
+ str = argv_concat(argv, argc, idx_nt);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, str);
+ ret = nb_cli_apply_changes(vty, NULL);
+ XFREE(MTYPE_TMP, str);
+ return ret;
+}
+
+DEFPY_YANG (no_set_ecommunity_nt,
+ no_set_ecommunity_nt_cmd,
+ "no set extcommunity nt RTLIST...",
+ NO_STR
+ SET_STR
+ "BGP extended community attribute\n"
+ "Node Target extended community\n"
+ "Node Target ID\n")
+{
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-extcommunity-nt']";
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+ALIAS_YANG (no_set_ecommunity_nt,
+ no_set_ecommunity_nt_short_cmd,
+ "no set extcommunity nt",
+ NO_STR
+ SET_STR
+ "BGP extended community attribute\n"
+ "Node Target extended community\n")
+
DEFUN_YANG (set_origin,
set_origin_cmd,
"set origin <egp|igp|incomplete>",
route_map_install_set(&route_set_vpnv6_nexthop_cmd);
route_map_install_set(&route_set_originator_id_cmd);
route_map_install_set(&route_set_ecommunity_rt_cmd);
+ route_map_install_set(&route_set_ecommunity_nt_cmd);
route_map_install_set(&route_set_ecommunity_soo_cmd);
route_map_install_set(&route_set_ecommunity_lb_cmd);
route_map_install_set(&route_set_ecommunity_none_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_lb_short_cmd);
install_element(RMAP_NODE, &set_ecommunity_none_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_none_cmd);
+ install_element(RMAP_NODE, &set_ecommunity_nt_cmd);
+ install_element(RMAP_NODE, &no_set_ecommunity_nt_cmd);
+ install_element(RMAP_NODE, &no_set_ecommunity_nt_short_cmd);
#ifdef KEEP_OLD_VPN_COMMANDS
install_element(RMAP_NODE, &set_vpn_nexthop_cmd);
install_element(RMAP_NODE, &no_set_vpn_nexthop_cmd);
.destroy = lib_route_map_entry_set_action_rmap_set_action_extcommunity_rt_destroy,
}
},
+ {
+ .xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-nt",
+ .cbs = {
+ .modify = lib_route_map_entry_set_action_rmap_set_action_extcommunity_nt_modify,
+ .destroy = lib_route_map_entry_set_action_rmap_set_action_extcommunity_nt_destroy,
+ }
+ },
{
.xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-soo",
.cbs = {
int lib_route_map_entry_set_action_rmap_set_action_distance_destroy(struct nb_cb_destroy_args *args);
int lib_route_map_entry_set_action_rmap_set_action_extcommunity_rt_modify(struct nb_cb_modify_args *args);
int lib_route_map_entry_set_action_rmap_set_action_extcommunity_rt_destroy(struct nb_cb_destroy_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_nt_modify(
+ struct nb_cb_modify_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_nt_destroy(
+ struct nb_cb_destroy_args *args);
int lib_route_map_entry_set_action_rmap_set_action_extcommunity_soo_modify(struct nb_cb_modify_args *args);
int lib_route_map_entry_set_action_rmap_set_action_extcommunity_soo_destroy(struct nb_cb_destroy_args *args);
int lib_route_map_entry_set_action_rmap_set_action_ipv4_address_modify(struct nb_cb_modify_args *args);
return NB_OK;
}
+/*
+ * XPath:
+ * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-nt
+ */
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_nt_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct routemap_hook_context *rhc;
+ const char *str;
+ int rv;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ /* Add configuration. */
+ rhc = nb_running_get_entry(args->dnode, NULL, true);
+ str = yang_dnode_get_string(args->dnode, NULL);
+
+ /* Set destroy information. */
+ rhc->rhc_shook = generic_set_delete;
+ rhc->rhc_rule = "extcommunity nt";
+ rhc->rhc_event = RMAP_EVENT_SET_DELETED;
+
+ rv = generic_set_add(rhc->rhc_rmi, "extcommunity nt", str,
+ args->errmsg, args->errmsg_len);
+ if (rv != CMD_SUCCESS) {
+ rhc->rhc_shook = NULL;
+ return NB_ERR_INCONSISTENCY;
+ }
+ }
+
+ return NB_OK;
+}
+
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_nt_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return lib_route_map_entry_match_destroy(args);
+ }
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-soo
.. code:: shell
cd test_to_be_run
- ./test_to_be_run.py
+ sudo -E pytest ./test_to_be_run.py
For example, and assuming you are inside the frr directory:
.. code:: shell
cd tests/topotests/bgp_l3vpn_to_bgp_vrf
- ./test_bgp_l3vpn_to_bgp_vrf.py
+ sudo -E pytest ./test_bgp_l3vpn_to_bgp_vrf.py
For further options, refer to pytest documentation.
.. _screen: https://www.gnu.org/software/screen/
.. _tmux: https://github.com/tmux/tmux/wiki
+Capturing Packets
+"""""""""""""""""
+
+One can view and capture packets on any of the networks or interfaces defined by
+the topotest by specifying the ``--pcap=NET|INTF|all[,NET|INTF,...]`` CLI option
+as shown in the examples below.
+
+.. code:: shell
+
+ # Capture on all networks in isis_topo1 test
+ sudo -E pytest isis_topo1 --pcap=all
+
+ # Capture on `sw1` network
+ sudo -E pytest isis_topo1 --pcap=sw1
+
+ # Capture on `sw1` network and on interface `eth0` on router `r2`
+ sudo -E pytest isis_topo1 --pcap=sw1,r2:r2-eth0
+
+For each capture a window is opened displaying a live summary of the captured
+packets. Additionally, the entire packet stream is captured in a pcap file in
+the tests log directory e.g.,::
+
+.. code:: console
+
+ $ sudo -E pytest isis_topo1 --pcap=sw1,r2:r2-eth0
+ ...
+ $ ls -l /tmp/topotests/isis_topo1.test_isis_topo1/
+ -rw------- 1 root root 45172 Apr 19 05:30 capture-r2-r2-eth0.pcap
+ -rw------- 1 root root 48412 Apr 19 05:30 capture-sw1.pcap
+ ...
+-
+Viewing Live Daemon Logs
+""""""""""""""""""""""""
+
+One can live view daemon or the frr logs in separate windows using the
+``--logd`` CLI option as shown below.
+
+.. code:: shell
+
+ # View `ripd` logs on all routers in test
+ sudo -E pytest rip_allow_ecmp --logd=ripd
+
+ # View `ripd` logs on all routers and `mgmtd` log on `r1`
+ sudo -E pytest rip_allow_ecmp --logd=ripd --logd=mgmtd,r1
+
+For each capture a window is opened displaying a live summary of the captured
+packets. Additionally, the entire packet stream is captured in a pcap file in
+the tests log directory e.g.,::
+
+When using a unified log file `frr.log` one substitutes `frr` for the daemon
+name in the ``--logd`` CLI option, e.g.,
+
+.. code:: shell
+
+ # View `frr` log on all routers in test
+ sudo -E pytest some_test_suite --logd=frr
+
Spawning Debugging CLI, ``vtysh`` or Shells on Routers on Test Failure
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
test_bgp_multiview_topo1/test_bgp_routingTable> help
- Commands:
- help :: this help
- sh [hosts] <shell-command> :: execute <shell-command> on <host>
- term [hosts] :: open shell terminals for hosts
- vtysh [hosts] :: open vtysh terminals for hosts
- [hosts] <vtysh-command> :: execute vtysh-command on hosts
+ Basic Commands:
+ cli :: open a secondary CLI window
+ help :: this help
+ hosts :: list hosts
+ quit :: quit the cli
+
+ HOST can be a host or one of the following:
+ - '*' for all hosts
+ - '.' for the parent munet
+ - a regex specified between '/' (e.g., '/rtr.*/')
+
+ New Window Commands:
+ logd HOST [HOST ...] DAEMON :: tail -f on the logfile of the given DAEMON for the given HOST[S]
+ pcap NETWORK :: capture packets from NETWORK into file capture-NETWORK.pcap the command is run within a new window which also shows packet summaries. NETWORK can also be an interface specified as HOST:INTF. To capture inside the host namespace.
+ stderr HOST [HOST ...] DAEMON :: tail -f on the stderr of the given DAEMON for the given HOST[S]
+ stdlog HOST [HOST ...] :: tail -f on the `frr.log` for the given HOST[S]
+ stdout HOST [HOST ...] DAEMON :: tail -f on the stdout of the given DAEMON for the given HOST[S]
+ term HOST [HOST ...] :: open terminal[s] (TMUX or XTerm) on HOST[S], * for all
+ vtysh ROUTER [ROUTER ...] ::
+ xterm HOST [HOST ...] :: open XTerm[s] on HOST[S], * for all
+ Inline Commands:
+ [ROUTER ...] COMMAND :: execute vtysh COMMAND on the router[s]
+ [HOST ...] sh <SHELL-COMMAND> :: execute <SHELL-COMMAND> on hosts
+ [HOST ...] shi <INTERACTIVE-COMMAND> :: execute <INTERACTIVE-COMMAND> on HOST[s]
test_bgp_multiview_topo1/test_bgp_routingTable> r1 show int br
------ Host: r1 ------
sudo -E pytest --valgrind-memleaks all-protocol-startup
+Collecting Performance Data using perf(1)
+"""""""""""""""""""""""""""""""""""""""""
+
+Topotest can automatically launch any daemon under ``perf(1)`` to collect
+performance data. The daemon is run in non-daemon mode with ``perf record -g``.
+The ``perf.data`` file will be saved in the router specific directory under the
+tests run directoy.
+
+Here's an example of collecting performance data from ``mgmtd`` on router ``r1``
+during the config_timing test.
+
+.. code:: console
+
+ $ sudo -E pytest --perf=mgmtd,r1 config_timing
+ ...
+ $ find /tmp/topotests/ -name '*perf.data*'
+ /tmp/topotests/config_timing.test_config_timing/r1/perf.data
+
+To specify different arguments for ``perf record``, one can use the
+``--perf-options`` this will replace the ``-g`` used by default.
+
.. _topotests_docker:
Running Tests with Docker
* :rfc:`5880`
* :rfc:`5881`
+* :rfc:`5882`
* :rfc:`5883`
Currently, there are two implementations of the BFD commands in FRR:
that interface.
+.. _bfd-rip-peer-config:
+
+RIP BFD configuration
+---------------------
+
+The following commands are available inside the interface configuration node:
+
+.. clicmd:: ip rip bfd
+
+ Automatically create BFD session for each RIP peer discovered in this
+ interface. When the BFD session monitor signalize that the link is down
+ the RIP peer is removed and all the learned routes associated with that
+ peer are removed.
+
+
+.. clicmd:: ip rip bfd profile BFD_PROFILE_NAME
+
+ Selects a BFD profile for the BFD sessions created in this interface.
+
+
+The following command is available in the RIP router configuration node:
+
+.. clicmd:: bfd default-profile BFD_PROFILE_NAME
+
+ Selects a default BFD profile for all sessions without a profile specified.
+
+
.. _bfd-static-peer-config:
BFD Static Route Monitoring Configuration
.. clicmd:: set extcommunity rt EXTCOMMUNITY
- This command set Route Target value.
+ This command sets Route Target value.
+
+.. clicmd:: set extcommunity nt EXTCOMMUNITY
+
+ This command sets Node Target value.
+
+ If the receiving BGP router supports Node Target Extended Communities,
+ it will install the route with the community that contains it's own
+ local BGP Identifier. Otherwise, it's not installed.
.. clicmd:: set extcommunity soo EXTCOMMUNITY
- This command set Site of Origin value.
+ This command sets Site of Origin value.
.. clicmd:: set extcommunity bandwidth <(1-25600) | cumulative | num-multipaths> [non-transitive]
:t:`Bidirectional Forwarding Detection (BFD), D. Katz, D. Ward. June 2010`
- :rfc:`5881`
:t:`Bidirectional Forwarding Detection (BFD) for IPv4 and IPv6 (Single Hop), D. Katz, D. Ward. June 2010`
+- :rfc:`5882`
+ :t:`Generic Application of Bidirectional Forwarding Detection (BFD), D. Katz, D. Ward. June 2010`
- :rfc:`5883`
:t:`Bidirectional Forwarding Detection (BFD) for Multihop Paths, D. Katz, D. Ward. June 2010`
fd = open(filename, O_RDONLY | O_NOCTTY);
if (fd < 0 || fstat(fd, &st)) {
PyErr_SetFromErrnoWithFilename(PyExc_OSError, filename);
- close(fd);
+ if (fd > 0)
+ close(fd);
goto out;
}
w->len = st.st_size;
(strmatch(A, "frr-bgp-route-map:set-extcommunity-none"))
#define IS_SET_EXTCOMMUNITY_RT(A) \
(strmatch(A, "frr-bgp-route-map:set-extcommunity-rt"))
+#define IS_SET_EXTCOMMUNITY_NT(A) \
+ (strmatch(A, "frr-bgp-route-map:set-extcommunity-nt"))
#define IS_SET_EXTCOMMUNITY_SOO(A) \
(strmatch(A, "frr-bgp-route-map:set-extcommunity-soo"))
#define IS_SET_EXTCOMMUNITY_LB(A) \
yang_dnode_get_string(
dnode,
"./rmap-set-action/frr-bgp-route-map:extcommunity-rt"));
+ } else if (IS_SET_EXTCOMMUNITY_NT(action)) {
+ vty_out(vty, " set extcommunity nt %s\n",
+ yang_dnode_get_string(
+ dnode,
+ "./rmap-set-action/frr-bgp-route-map:extcommunity-nt"));
} else if (IS_SET_EXTCOMMUNITY_SOO(action)) {
vty_out(vty, " set extcommunity soo %s\n",
yang_dnode_get_string(
nodist_lib_libfrr_la_SOURCES = \
yang/frr-affinity-map.yang.c \
yang/frr-filter.yang.c \
+ yang/frr-if-rmap.yang.c \
yang/frr-interface.yang.c \
yang/frr-route-map.yang.c \
yang/frr-route-types.yang.c \
uint32_t newsize = head->count, i, j;
uint8_t newshift, delta;
+ /* note hash_grow is called after head->count++, so newsize is
+ * guaranteed to be >= 1. So the minimum argument to builtin_ctz
+ * below is 2, which returns 1, and that makes newshift >= 2.
+ *
+ * Calling hash_grow with a zero head->count would result in a
+ * malformed hash table that has tabshift == 1.
+ */
+ assert(head->count > 0);
+
hash_consistency_check(head);
newsize |= newsize >> 1;
struct thash_item **entries;
uint32_t count;
+ /* tabshift can be 0 if the hash table is empty and entries is NULL.
+ * otherwise it will always be 2 or larger because it contains
+ * the shift value *plus 1*. This is a trick to make HASH_SIZE return
+ * the correct value (with the >> 1) for tabshift == 0, without needing
+ * a conditional branch.
+ */
uint8_t tabshift;
uint8_t minshift, maxshift;
};
((1U << (tabshift)) >> 1)
#define HASH_SIZE(head) \
_HASH_SIZE((head).tabshift)
-#define _HASH_KEY(tabshift, val) \
- ((val) >> (33 - (tabshift)))
+#define _HASH_KEY(tabshift, val) \
+ ({ \
+ assume((tabshift) >= 2 && (tabshift) <= 33); \
+ (val) >> (33 - (tabshift)); \
+ })
#define HASH_KEY(head, val) \
_HASH_KEY((head).tabshift, val)
#define HASH_GROW_THRESHOLD(head) \
macro_pure bool prefix ## _member(const struct prefix##_head *h, \
const type *item) \
{ \
+ if (!h->hh.tabshift) \
+ return NULL; \
uint32_t hval = item->field.hi.hashval, hbits = HASH_KEY(h->hh, hval); \
const struct thash_item *hitem = h->hh.entries[hbits]; \
while (hitem && hitem->hashval < hval) \
static uint64_t mgmt_client_id_next;
static uint64_t mgmt_last_req_id = UINT64_MAX;
+static bool vty_debug;
+#define VTY_DBG(fmt, ...) \
+ do { \
+ if (vty_debug) \
+ zlog_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
PREDECL_DLIST(vtyservs);
struct vty_serv {
static void vty_mgmt_server_connected(uintptr_t lib_hndl, uintptr_t usr_data,
bool connected)
{
- zlog_err("%sGot %sconnected %s MGMTD Frontend Server",
- !connected ? "ERROR: " : "", !connected ? "dis: " : "",
- !connected ? "from" : "to");
+ VTY_DBG("%sGot %sconnected %s MGMTD Frontend Server",
+ !connected ? "ERROR: " : "", !connected ? "dis: " : "",
+ !connected ? "from" : "to");
mgmt_fe_connected = connected;
vty = (struct vty *)session_ctx;
if (!success) {
- zlog_err("%s session for client %llu failed!",
- create ? "Creating" : "Destroying",
- (unsigned long long)client_id);
+ zlog_err("%s session for client %" PRIu64 " failed!",
+ create ? "Creating" : "Destroying", client_id);
return;
}
- zlog_err("%s session for client %llu successfully!",
- create ? "Created" : "Destroyed",
- (unsigned long long)client_id);
+ VTY_DBG("%s session for client %" PRIu64 " successfully",
+ create ? "Created" : "Destroyed", client_id);
if (create)
vty->mgmt_session_id = session_id;
}
vty = (struct vty *)session_ctx;
if (!success) {
- zlog_err("%socking for DS %u failed! Err: '%s'",
+ zlog_err("%socking for DS %u failed, Err: '%s'",
lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
- vty_out(vty, "ERROR: %socking for DS %u failed! Err: '%s'\n",
+ vty_out(vty, "ERROR: %socking for DS %u failed, Err: '%s'\n",
lock_ds ? "L" : "Unl", ds_id, errmsg_if_any);
} else {
- zlog_err("%socked DS %u successfully!", lock_ds ? "L" : "Unl",
- ds_id);
+ VTY_DBG("%socked DS %u successfully", lock_ds ? "L" : "Unl",
+ ds_id);
}
vty_mgmt_resume_response(vty, success);
vty = (struct vty *)session_ctx;
if (!success) {
- zlog_err(
- "SET_CONFIG request for client 0x%llx failed! Error: '%s'",
- (unsigned long long)client_id,
- errmsg_if_any ? errmsg_if_any : "Unknown");
- vty_out(vty, "ERROR: SET_CONFIG request failed! Error: %s\n",
+ zlog_err("SET_CONFIG request for client 0x%" PRIx64
+ " failed, Error: '%s'",
+ client_id, errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: SET_CONFIG request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
} else {
- zlog_err(
- "SET_CONFIG request for client 0x%llx req-id %llu was successfull!",
- (unsigned long long)client_id,
- (unsigned long long)req_id);
+ VTY_DBG("SET_CONFIG request for client 0x%" PRIx64
+ " req-id %" PRIu64 " was successfull",
+ client_id, req_id);
}
vty_mgmt_resume_response(vty, success);
vty = (struct vty *)session_ctx;
if (!success) {
- zlog_err(
- "COMMIT_CONFIG request for client 0x%llx failed! Error: '%s'",
- (unsigned long long)client_id,
- errmsg_if_any ? errmsg_if_any : "Unknown");
- vty_out(vty, "ERROR: COMMIT_CONFIG request failed! Error: %s\n",
+ zlog_err("COMMIT_CONFIG request for client 0x%" PRIx64
+ " failed, Error: '%s'",
+ client_id, errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: COMMIT_CONFIG request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
} else {
- zlog_err(
- "COMMIT_CONFIG request for client 0x%llx req-id %llu was successfull!",
- (unsigned long long)client_id,
- (unsigned long long)req_id);
+ VTY_DBG("COMMIT_CONFIG request for client 0x%" PRIx64
+ " req-id %" PRIu64 " was successfull",
+ client_id, req_id);
if (errmsg_if_any)
vty_out(vty, "MGMTD: %s\n", errmsg_if_any);
}
vty = (struct vty *)session_ctx;
if (!success) {
- zlog_err(
- "GET_DATA request for client 0x%llx failed! Error: '%s'",
- (unsigned long long)client_id,
- errmsg_if_any ? errmsg_if_any : "Unknown");
- vty_out(vty, "ERROR: GET_DATA request failed! Error: %s\n",
+ zlog_err("GET_DATA request for client 0x%" PRIx64
+ " failed, Error: '%s'",
+ client_id, errmsg_if_any ? errmsg_if_any : "Unknown");
+ vty_out(vty, "ERROR: GET_DATA request failed, Error: %s\n",
errmsg_if_any ? errmsg_if_any : "Unknown");
vty_mgmt_resume_response(vty, success);
return MGMTD_INTERNAL_ERROR;
}
- zlog_debug(
- "GET_DATA request for client 0x%llx req-id %llu was successfull!",
- (unsigned long long)client_id, (unsigned long long)req_id);
+ VTY_DBG("GET_DATA request succeeded, client 0x%" PRIx64
+ " req-id %" PRIu64,
+ client_id, req_id);
if (req_id != mgmt_last_req_id) {
mgmt_last_req_id = req_id;
ret = mgmt_fe_lock_ds(mgmt_lib_hndl, vty->mgmt_session_id,
vty->mgmt_req_id, ds_id, lock);
if (ret != MGMTD_SUCCESS) {
- zlog_err(
- "Failed to send %sLOCK-DS-REQ to MGMTD for req-id %llu.",
- lock ? "" : "UN",
- (unsigned long long)vty->mgmt_req_id);
- vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!",
+ zlog_err("Failed sending %sLOCK-DS-REQ req-id %" PRIu64,
+ lock ? "" : "UN", vty->mgmt_req_id);
+ vty_out(vty, "Failed to send %sLOCK-DS-REQ to MGMTD!\n",
lock ? "" : "UN");
return -1;
}
MGMTD_DS_RUNNING) != MGMTD_SUCCESS) {
zlog_err("Failed to send %d Config Xpaths to MGMTD!!",
(int)indx);
+ vty_out(vty, "Failed to send SETCFG-REQ to MGMTD!\n");
return -1;
}
MGMTD_DS_CANDIDATE, MGMTD_DS_RUNNING, validate_only,
abort);
if (ret != MGMTD_SUCCESS) {
- zlog_err(
- "Failed to send COMMIT-REQ to MGMTD for req-id %llu.",
- (unsigned long long)vty->mgmt_req_id);
- vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!");
+ zlog_err("Failed sending COMMIT-REQ req-id %" PRIu64,
+ vty->mgmt_req_id);
+ vty_out(vty, "Failed to send COMMIT-REQ to MGMTD!\n");
return -1;
}
num_req);
if (ret != MGMTD_SUCCESS) {
- zlog_err("Failed to send GET-CONFIG to MGMTD for req-id %llu.",
- (unsigned long long)vty->mgmt_req_id);
- vty_out(vty, "Failed to send GET-CONFIG to MGMTD!");
+ zlog_err(
+ "Failed to send GET-CONFIG to MGMTD for req-id %" PRIu64
+ ".",
+ vty->mgmt_req_id);
+ vty_out(vty, "Failed to send GET-CONFIG to MGMTD!\n");
return -1;
}
vty->mgmt_req_id, datastore, getreq, num_req);
if (ret != MGMTD_SUCCESS) {
- zlog_err("Failed to send GET-DATA to MGMTD for req-id %llu.",
- (unsigned long long)vty->mgmt_req_id);
- vty_out(vty, "Failed to send GET-DATA to MGMTD!");
+ zlog_err("Failed to send GET-DATA to MGMTD for req-id %" PRIu64
+ ".",
+ vty->mgmt_req_id);
+ vty_out(vty, "Failed to send GET-DATA to MGMTD!\n");
return -1;
}
#define MGMTD_DS_DBG(fmt, ...) \
do { \
if (mgmt_debug_ds) \
- zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MGMTD_DS_ERR(fmt, ...) \
zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
#define MGMTD_TXN_ERR(fmt, ...) \
fprintf(stderr, "%s: ERROR, " fmt "\n", __func__, ##__VA_ARGS__)
#else /* REDIRECT_DEBUG_TO_STDERR */
-#define MGMTD_TXN_DBG(fmt, ...) \
+#define MGMTD_TXN_DBG(fmt, ...) \
do { \
- if (mgmt_debug_txn) \
- zlog_err("%s: " fmt, __func__, ##__VA_ARGS__); \
+ if (mgmt_debug_txn) \
+ zlog_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MGMTD_TXN_ERR(fmt, ...) \
zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
&nifp->afi[afi].nhslist_head, nhs)
nhrp_nhs_free(nifp, afi, nhs);
}
+ nhrp_peer_interface_del(ifp);
}
}
* everything else is thrown into pkt for creation of state in pass 2
*/
static void gm_handle_v2_pass1(struct gm_packet_state *pkt,
- struct mld_v2_rec_hdr *rechdr)
+ struct mld_v2_rec_hdr *rechdr, size_t n_src)
{
/* NB: pkt->subscriber can be NULL here if the subscriber was not
* previously seen!
struct gm_sg *grp;
struct gm_packet_sg *old_grp = NULL;
struct gm_packet_sg *item;
- size_t n_src = ntohs(rechdr->n_src);
size_t j;
bool is_excl = false;
return;
}
- /* errors after this may at least partially process the packet */
- gm_ifp->stats.rx_new_report++;
-
hdr = (struct mld_v2_report_hdr *)data;
data += sizeof(*hdr);
len -= sizeof(*hdr);
+ n_records = ntohs(hdr->n_records);
+ if (n_records > len / sizeof(struct mld_v2_rec_hdr)) {
+ /* note this is only an upper bound, records with source lists
+ * are larger. This is mostly here to make coverity happy.
+ */
+ zlog_warn(log_pkt_src(
+ "malformed MLDv2 report (infeasible record count)"));
+ gm_ifp->stats.rx_drop_malformed++;
+ return;
+ }
+
+ /* errors after this may at least partially process the packet */
+ gm_ifp->stats.rx_new_report++;
+
/* can't have more *,G and S,G items than there is space for ipv6
* addresses, so just use this to allocate temporary buffer
*/
pkt->iface = gm_ifp;
pkt->subscriber = gm_subscriber_findref(gm_ifp, pkt_src->sin6_addr);
- n_records = ntohs(hdr->n_records);
-
/* validate & remove state in v2_pass1() */
for (i = 0; i < n_records; i++) {
struct mld_v2_rec_hdr *rechdr;
data += record_size;
len -= record_size;
- gm_handle_v2_pass1(pkt, rechdr);
+ gm_handle_v2_pass1(pkt, rechdr, n_src);
}
if (!pkt->n_active) {
gm_handle_q_group(gm_ifp, &timers, hdr->grp);
gm_ifp->stats.rx_query_new_group++;
} else {
+ /* this is checked above:
+ * if (len >= sizeof(struct mld_v2_query_hdr)) {
+ * size_t src_space = ntohs(hdr->n_src) * sizeof(pim_addr);
+ * if (len < sizeof(struct mld_v2_query_hdr) + src_space) {
+ */
+ assume(ntohs(hdr->n_src) <=
+ (len - sizeof(struct mld_v2_query_hdr)) /
+ sizeof(pim_addr));
+
gm_handle_q_groupsrc(gm_ifp, &timers, hdr->grp, hdr->srcs,
ntohs(hdr->n_src));
gm_ifp->stats.rx_query_new_groupsrc++;
if (!pim_ifp->mld) {
changed = true;
gm_start(ifp);
+ assume(pim_ifp->mld != NULL);
}
gm_ifp = pim_ifp->mld;
struct gm_if *gm_ifp = pim_ifp->mld;
bool querier;
+ assume(js_if || tt);
+
querier = IPV6_ADDR_SAME(&gm_ifp->querier, &pim_ifp->ll_lowest);
if (js_if) {
if (js) {
js_if = json_object_new_object();
+ /*
+ * If we have js as true and detail as false
+ * and if Coverity thinks that js_if is NULL
+ * because of a failed call to new then
+ * when we call gm_show_if_one below
+ * the tt can be deref'ed and as such
+ * FRR will crash. But since we know
+ * that json_object_new_object never fails
+ * then let's tell Coverity that this assumption
+ * is true. I'm not worried about fast path
+ * here at all.
+ */
+ assert(js_if);
json_object_object_add(js_vrf, ifp->name, js_if);
}
vrf_id, new_vrf_id);
pim = pim_get_pim_instance(new_vrf_id);
+ if (!pim)
+ return 0;
if_update_to_new_vrf(ifp, new_vrf_id);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RIP BFD integration.
+ * Copyright (C) 2021-2023 Network Device Education Foundation, Inc. ("NetDEF")
+ */
+
+#include <zebra.h>
+
+#include "lib/zclient.h"
+#include "lib/bfd.h"
+
+#include "ripd/ripd.h"
+#include "ripd/rip_bfd.h"
+#include "ripd/rip_debug.h"
+
+extern struct zclient *zclient;
+
+static const char *rip_bfd_interface_profile(struct rip_interface *ri)
+{
+ struct rip *rip = ri->rip;
+
+ if (ri->bfd.profile)
+ return ri->bfd.profile;
+
+ if (rip->default_bfd_profile)
+ return rip->default_bfd_profile;
+
+ return NULL;
+}
+
+static void rip_bfd_session_change(struct bfd_session_params *bsp,
+ const struct bfd_session_status *bss,
+ void *arg)
+{
+ struct rip_peer *rp = arg;
+
+ /* BFD peer went down. */
+ if (bss->state == BFD_STATUS_DOWN &&
+ bss->previous_state == BFD_STATUS_UP) {
+ if (IS_RIP_DEBUG_EVENT)
+ zlog_debug("%s: peer %pI4: BFD Down", __func__,
+ &rp->addr);
+
+ rip_peer_delete_routes(rp);
+ listnode_delete(rp->rip->peer_list, rp);
+ rip_peer_free(rp);
+ return;
+ }
+
+ /* BFD peer went up. */
+ if (bss->state == BSS_UP && bss->previous_state == BSS_DOWN)
+ if (IS_RIP_DEBUG_EVENT)
+ zlog_debug("%s: peer %pI4: BFD Up", __func__,
+ &rp->addr);
+}
+
+void rip_bfd_session_update(struct rip_peer *rp)
+{
+ struct rip_interface *ri = rp->ri;
+
+ /* BFD configuration was removed. */
+ if (ri == NULL || !ri->bfd.enabled) {
+ bfd_sess_free(&rp->bfd_session);
+ return;
+ }
+
+ /* New BFD session. */
+ if (rp->bfd_session == NULL) {
+ rp->bfd_session = bfd_sess_new(rip_bfd_session_change, rp);
+ bfd_sess_set_ipv4_addrs(rp->bfd_session, NULL, &rp->addr);
+ bfd_sess_set_interface(rp->bfd_session, ri->ifp->name);
+ bfd_sess_set_vrf(rp->bfd_session, rp->rip->vrf->vrf_id);
+ }
+
+ /* Set new configuration. */
+ bfd_sess_set_timers(rp->bfd_session, BFD_DEF_DETECT_MULT,
+ BFD_DEF_MIN_RX, BFD_DEF_MIN_TX);
+ bfd_sess_set_profile(rp->bfd_session, rip_bfd_interface_profile(ri));
+
+ bfd_sess_install(rp->bfd_session);
+}
+
+void rip_bfd_interface_update(struct rip_interface *ri)
+{
+ struct rip *rip;
+ struct rip_peer *rp;
+ struct listnode *node;
+
+ rip = ri->rip;
+ if (!rip)
+ return;
+
+ for (ALL_LIST_ELEMENTS_RO(rip->peer_list, node, rp)) {
+ if (rp->ri != ri)
+ continue;
+
+ rip_bfd_session_update(rp);
+ }
+}
+
+void rip_bfd_instance_update(struct rip *rip)
+{
+ struct interface *ifp;
+
+ FOR_ALL_INTERFACES (rip->vrf, ifp) {
+ struct rip_interface *ri;
+
+ ri = ifp->info;
+ if (!ri)
+ continue;
+
+ rip_bfd_interface_update(ri);
+ }
+}
+
+void rip_bfd_init(struct event_loop *tm)
+{
+ bfd_protocol_integration_init(zclient, tm);
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RIP BFD integration.
+ * Copyright (C) 2021-2023 Network Device Education Foundation, Inc. ("NetDEF")
+ */
+
+#ifndef _RIP_BFD_
+#define _RIP_BFD_
+
+#include "frrevent.h"
+
+struct rip;
+struct rip_interface;
+struct rip_peer;
+
+void rip_bfd_session_update(struct rip_peer *rp);
+void rip_bfd_interface_update(struct rip_interface *ri);
+void rip_bfd_instance_update(struct rip *rip);
+void rip_bfd_init(struct event_loop *tm);
+
+#endif /* _RIP_BFD_ */
}
}
+/*
+ * XPath: /frr-ripd:ripd/instance/default-bfd-profile
+ */
+DEFPY_YANG(rip_bfd_default_profile, rip_bfd_default_profile_cmd,
+ "bfd default-profile BFDPROF$profile",
+ "Bidirectional Forwarding Detection\n"
+ "BFD default profile\n"
+ "Profile name\n")
+{
+ nb_cli_enqueue_change(vty, "./default-bfd-profile", NB_OP_MODIFY,
+ profile);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY_YANG(no_rip_bfd_default_profile, no_rip_bfd_default_profile_cmd,
+ "no bfd default-profile [BFDPROF]",
+ NO_STR
+ "Bidirectional Forwarding Detection\n"
+ "BFD default profile\n"
+ "Profile name\n")
+{
+ nb_cli_enqueue_change(vty, "./default-bfd-profile", NB_OP_DESTROY,
+ NULL);
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void cli_show_ripd_instance_default_bfd_profile(struct vty *vty,
+ const struct lyd_node *dnode,
+ bool show_defaults)
+{
+ vty_out(vty, " bfd default-profile %s\n",
+ yang_dnode_get_string(dnode, NULL));
+}
+
/*
* XPath: /frr-interface:lib/interface/frr-ripd:rip/split-horizon
*/
yang_dnode_get_string(dnode, NULL));
}
+/*
+ * XPath: /frr-interface:lib/interface/frr-ripd:rip/bfd-monitoring/enable
+ */
+DEFPY_YANG(ip_rip_bfd, ip_rip_bfd_cmd, "[no] ip rip bfd",
+ NO_STR IP_STR
+ "Routing Information Protocol\n"
+ "Enable BFD support\n")
+{
+ nb_cli_enqueue_change(vty, "./bfd-monitoring/enable", NB_OP_MODIFY,
+ no ? "false" : "true");
+
+ return nb_cli_apply_changes(vty, "./frr-ripd:rip");
+}
+
+void cli_show_ip_rip_bfd_enable(struct vty *vty, const struct lyd_node *dnode,
+ bool show_defaults)
+{
+ vty_out(vty, " ip rip bfd\n");
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-ripd:rip/bfd/profile
+ */
+DEFPY_YANG(ip_rip_bfd_profile, ip_rip_bfd_profile_cmd,
+ "[no] ip rip bfd profile BFDPROF$profile",
+ NO_STR IP_STR
+ "Routing Information Protocol\n"
+ "Enable BFD support\n"
+ "Use a pre-configured profile\n"
+ "Profile name\n")
+{
+ if (no)
+ nb_cli_enqueue_change(vty, "./bfd-monitoring/profile",
+ NB_OP_DESTROY, NULL);
+ else
+ nb_cli_enqueue_change(vty, "./bfd-monitoring/profile",
+ NB_OP_MODIFY, profile);
+
+ return nb_cli_apply_changes(vty, "./frr-ripd:rip");
+}
+
+DEFPY_YANG(no_ip_rip_bfd_profile, no_ip_rip_bfd_profile_cmd,
+ "no ip rip bfd profile",
+ NO_STR IP_STR
+ "Routing Information Protocol\n"
+ "Enable BFD support\n"
+ "Use a pre-configured profile\n")
+{
+ nb_cli_enqueue_change(vty, "./bfd-monitoring/profile", NB_OP_DESTROY,
+ NULL);
+ return nb_cli_apply_changes(vty, "./frr-ripd:rip");
+}
+
+void cli_show_ip_rip_bfd_profile(struct vty *vty, const struct lyd_node *dnode,
+ bool show_defaults)
+{
+ vty_out(vty, " ip rip bfd profile %s\n",
+ yang_dnode_get_string(dnode, NULL));
+}
+
/*
* XPath: /frr-ripd:clear-rip-route
*/
install_element(RIP_NODE, &no_rip_timers_cmd);
install_element(RIP_NODE, &rip_version_cmd);
install_element(RIP_NODE, &no_rip_version_cmd);
+ install_element(RIP_NODE, &rip_bfd_default_profile_cmd);
+ install_element(RIP_NODE, &no_rip_bfd_default_profile_cmd);
install_element(INTERFACE_NODE, &ip_rip_split_horizon_cmd);
install_element(INTERFACE_NODE, &ip_rip_v2_broadcast_cmd);
install_element(INTERFACE_NODE, &ip_rip_authentication_key_chain_cmd);
install_element(INTERFACE_NODE,
&no_ip_rip_authentication_key_chain_cmd);
+ install_element(INTERFACE_NODE, &ip_rip_bfd_cmd);
+ install_element(INTERFACE_NODE, &ip_rip_bfd_profile_cmd);
+ install_element(INTERFACE_NODE, &no_ip_rip_bfd_profile_cmd);
install_element(ENABLE_NODE, &clear_ip_rip_cmd);
}
ri->sent_updates = 0;
ri->passive = 0;
+ XFREE(MTYPE_TMP, ri->bfd.profile);
rip_interface_clean(ri);
}
struct rip_interface *ri;
ri = ifp->info;
+ ri->ifp = ifp;
if (ri)
ri->rip = ifp->vrf->info;
}
#include "if_rmap.h"
#include "libfrr.h"
#include "routemap.h"
+#include "bfd.h"
#include "ripd/ripd.h"
+#include "ripd/rip_bfd.h"
#include "ripd/rip_nb.h"
#include "ripd/rip_errors.h"
{
zlog_notice("Terminating on signal");
+ bfd_protocol_integration_set_shutdown(true);
rip_vrf_terminate();
if_rmap_terminate();
rip_zclient_stop();
rip_if_init();
rip_cli_init();
rip_zclient_init(master);
+ rip_bfd_init(master);
frr_config_fork();
frr_run(master);
.modify = ripd_instance_version_send_modify,
},
},
+ {
+ .xpath = "/frr-ripd:ripd/instance/default-bfd-profile",
+ .cbs = {
+ .modify = ripd_instance_default_bfd_profile_modify,
+ .destroy = ripd_instance_default_bfd_profile_destroy,
+ .cli_show = cli_show_ripd_instance_default_bfd_profile,
+ },
+ },
{
.xpath = "/frr-interface:lib/interface/frr-ripd:rip/split-horizon",
.cbs = {
.modify = lib_interface_rip_authentication_key_chain_modify,
},
},
+ {
+ .xpath = "/frr-interface:lib/interface/frr-ripd:rip/bfd-monitoring",
+ .cbs = {
+ .create = lib_interface_rip_bfd_create,
+ .destroy = lib_interface_rip_bfd_destroy,
+ },
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-ripd:rip/bfd-monitoring/enable",
+ .cbs = {
+ .cli_show = cli_show_ip_rip_bfd_enable,
+ .modify = lib_interface_rip_bfd_enable_modify,
+ },
+ },
+ {
+ .xpath = "/frr-interface:lib/interface/frr-ripd:rip/bfd-monitoring/profile",
+ .cbs = {
+ .cli_show = cli_show_ip_rip_bfd_profile,
+ .modify = lib_interface_rip_bfd_profile_modify,
+ .destroy = lib_interface_rip_bfd_profile_destroy,
+ },
+ },
{
.xpath = "/frr-ripd:ripd/instance/state/neighbors/neighbor",
.cbs = {
int ripd_instance_timers_update_interval_modify(struct nb_cb_modify_args *args);
int ripd_instance_version_receive_modify(struct nb_cb_modify_args *args);
int ripd_instance_version_send_modify(struct nb_cb_modify_args *args);
+int ripd_instance_default_bfd_profile_modify(struct nb_cb_modify_args *args);
+int ripd_instance_default_bfd_profile_destroy(struct nb_cb_destroy_args *args);
const void *ripd_instance_state_neighbors_neighbor_get_next(
struct nb_cb_get_next_args *args);
int ripd_instance_state_neighbors_neighbor_get_keys(
struct nb_cb_modify_args *args);
int lib_interface_rip_authentication_key_chain_destroy(
struct nb_cb_destroy_args *args);
+int lib_interface_rip_bfd_create(struct nb_cb_create_args *args);
+int lib_interface_rip_bfd_destroy(struct nb_cb_destroy_args *args);
+int lib_interface_rip_bfd_enable_modify(struct nb_cb_modify_args *args);
+int lib_interface_rip_bfd_enable_destroy(struct nb_cb_destroy_args *args);
+int lib_interface_rip_bfd_profile_modify(struct nb_cb_modify_args *args);
+int lib_interface_rip_bfd_profile_destroy(struct nb_cb_destroy_args *args);
/* Optional 'apply_finish' callbacks. */
void ripd_instance_redistribute_apply_finish(
bool show_defaults);
void cli_show_ip_rip_send_version(struct vty *vty, const struct lyd_node *dnode,
bool show_defaults);
+void cli_show_ripd_instance_default_bfd_profile(struct vty *vty,
+ const struct lyd_node *dnode,
+ bool show_defaults);
void cli_show_ip_rip_authentication_scheme(struct vty *vty,
const struct lyd_node *dnode,
bool show_defaults);
void cli_show_ip_rip_authentication_key_chain(struct vty *vty,
const struct lyd_node *dnode,
bool show_defaults);
+void cli_show_ip_rip_bfd_enable(struct vty *vty, const struct lyd_node *dnode,
+ bool show_defaults);
+void cli_show_ip_rip_bfd_profile(struct vty *vty, const struct lyd_node *dnode,
+ bool show_defaults);
/* Notifications. */
extern void ripd_notif_send_auth_type_failure(const char *ifname);
#include "ripd/rip_nb.h"
#include "ripd/rip_debug.h"
#include "ripd/rip_interface.h"
+#include "ripd/rip_bfd.h"
+
+DEFINE_MTYPE_STATIC(RIPD, RIP_BFD_PROFILE, "RIP BFD profile name");
/*
* XPath: /frr-ripd:ripd/instance
rip = nb_running_get_entry(args->dnode, NULL, true);
ifname = yang_dnode_get_string(args->dnode, NULL);
- return rip_passive_nondefault_unset(rip, ifname);
+ return rip_passive_nondefault_set(rip, ifname);
}
int ripd_instance_non_passive_interface_destroy(struct nb_cb_destroy_args *args)
rip = nb_running_get_entry(args->dnode, NULL, true);
ifname = yang_dnode_get_string(args->dnode, NULL);
- return rip_passive_nondefault_set(rip, ifname);
+ return rip_passive_nondefault_unset(rip, ifname);
}
/*
return NB_OK;
}
+/*
+ * XPath: /frr-ripd:ripd/instance/default-bfd-profile
+ */
+int ripd_instance_default_bfd_profile_modify(struct nb_cb_modify_args *args)
+{
+ struct rip *rip;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ rip = nb_running_get_entry(args->dnode, NULL, true);
+ XFREE(MTYPE_RIP_BFD_PROFILE, rip->default_bfd_profile);
+ rip->default_bfd_profile =
+ XSTRDUP(MTYPE_RIP_BFD_PROFILE,
+ yang_dnode_get_string(args->dnode, NULL));
+ rip_bfd_instance_update(rip);
+
+ return NB_OK;
+}
+
+int ripd_instance_default_bfd_profile_destroy(struct nb_cb_destroy_args *args)
+{
+ struct rip *rip;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ rip = nb_running_get_entry(args->dnode, NULL, true);
+ XFREE(MTYPE_RIP_BFD_PROFILE, rip->default_bfd_profile);
+ rip_bfd_instance_update(rip);
+
+ return NB_OK;
+}
+
/*
* XPath: /frr-interface:lib/interface/frr-ripd:rip/split-horizon
*/
return NB_OK;
}
+/*
+ * XPath: /frr-interface:lib/interface/frr-ripd:rip/bfd-monitoring
+ */
+int lib_interface_rip_bfd_create(struct nb_cb_create_args *args)
+{
+ struct interface *ifp;
+ struct rip_interface *ri;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ ri = ifp->info;
+ ri->bfd.enabled = yang_dnode_get_bool(args->dnode, "./enable");
+ XFREE(MTYPE_RIP_BFD_PROFILE, ri->bfd.profile);
+ if (yang_dnode_exists(args->dnode, "./profile"))
+ ri->bfd.profile = XSTRDUP(
+ MTYPE_RIP_BFD_PROFILE,
+ yang_dnode_get_string(args->dnode, "./profile"));
+
+ rip_bfd_interface_update(ri);
+
+ return NB_OK;
+}
+
+int lib_interface_rip_bfd_destroy(struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct rip_interface *ri;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ ri = ifp->info;
+ ri->bfd.enabled = false;
+ XFREE(MTYPE_RIP_BFD_PROFILE, ri->bfd.profile);
+ rip_bfd_interface_update(ri);
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-ripd:rip/bfd-monitoring/enable
+ */
+int lib_interface_rip_bfd_enable_modify(struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct rip_interface *ri;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ ri = ifp->info;
+ ri->bfd.enabled = yang_dnode_get_bool(args->dnode, NULL);
+ rip_bfd_interface_update(ri);
+
+ return NB_OK;
+}
+
+/*
+ * XPath: /frr-interface:lib/interface/frr-ripd:rip/bfd-monitoring/profile
+ */
+int lib_interface_rip_bfd_profile_modify(struct nb_cb_modify_args *args)
+{
+ struct interface *ifp;
+ struct rip_interface *ri;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ ri = ifp->info;
+ XFREE(MTYPE_RIP_BFD_PROFILE, ri->bfd.profile);
+ ri->bfd.profile = XSTRDUP(MTYPE_RIP_BFD_PROFILE,
+ yang_dnode_get_string(args->dnode, NULL));
+ rip_bfd_interface_update(ri);
+
+ return NB_OK;
+}
+
+int lib_interface_rip_bfd_profile_destroy(struct nb_cb_destroy_args *args)
+{
+ struct interface *ifp;
+ struct rip_interface *ri;
+
+ if (args->event != NB_EV_APPLY)
+ return NB_OK;
+
+ ifp = nb_running_get_entry(args->dnode, NULL, true);
+ ri = ifp->info;
+ XFREE(MTYPE_RIP_BFD_PROFILE, ri->bfd.profile);
+ rip_bfd_interface_update(ri);
+
+ return NB_OK;
+}
+
/*
* XPath: /frr-interface:lib/interface/frr-ripd:rip/authentication-key-chain
*/
#include "linklist.h"
#include "frrevent.h"
#include "memory.h"
+#include "table.h"
#include "ripd/ripd.h"
+#include "ripd/rip_bfd.h"
DEFINE_MTYPE_STATIC(RIPD, RIP_PEER, "RIP peer");
return XCALLOC(MTYPE_RIP_PEER, sizeof(struct rip_peer));
}
-static void rip_peer_free(struct rip_peer *peer)
+void rip_peer_free(struct rip_peer *peer)
{
+ bfd_sess_free(&peer->bfd_session);
EVENT_OFF(peer->t_timeout);
XFREE(MTYPE_RIP_PEER, peer);
}
}
/* Get RIP peer. At the same time update timeout thread. */
-static struct rip_peer *rip_peer_get(struct rip *rip, struct in_addr *addr)
+static struct rip_peer *rip_peer_get(struct rip *rip, struct rip_interface *ri,
+ struct in_addr *addr)
{
struct rip_peer *peer;
} else {
peer = rip_peer_new();
peer->rip = rip;
+ peer->ri = ri;
peer->addr = *addr;
+ rip_bfd_session_update(peer);
listnode_add_sort(rip->peer_list, peer);
}
return peer;
}
-void rip_peer_update(struct rip *rip, struct sockaddr_in *from, uint8_t version)
+void rip_peer_update(struct rip *rip, struct rip_interface *ri,
+ struct sockaddr_in *from, uint8_t version)
{
struct rip_peer *peer;
- peer = rip_peer_get(rip, &from->sin_addr);
+ peer = rip_peer_get(rip, ri, &from->sin_addr);
peer->version = version;
}
-void rip_peer_bad_route(struct rip *rip, struct sockaddr_in *from)
+void rip_peer_bad_route(struct rip *rip, struct rip_interface *ri,
+ struct sockaddr_in *from)
{
struct rip_peer *peer;
- peer = rip_peer_get(rip, &from->sin_addr);
+ peer = rip_peer_get(rip, ri, &from->sin_addr);
peer->recv_badroutes++;
}
-void rip_peer_bad_packet(struct rip *rip, struct sockaddr_in *from)
+void rip_peer_bad_packet(struct rip *rip, struct rip_interface *ri,
+ struct sockaddr_in *from)
{
struct rip_peer *peer;
- peer = rip_peer_get(rip, &from->sin_addr);
+ peer = rip_peer_get(rip, ri, &from->sin_addr);
peer->recv_badpackets++;
}
char timebuf[RIP_UPTIME_LEN];
for (ALL_LIST_ELEMENTS(rip->peer_list, node, nnode, peer)) {
- vty_out(vty, " %-16pI4 %9d %9d %9d %s\n",
+ vty_out(vty, " %-17pI4 %9d %9d %9d %11s\n",
&peer->addr, peer->recv_badpackets,
peer->recv_badroutes, ZEBRA_RIP_DISTANCE_DEFAULT,
rip_peer_uptime(peer, timebuf, RIP_UPTIME_LEN));
{
rip_peer_free(arg);
}
+
+void rip_peer_delete_routes(const struct rip_peer *peer)
+{
+ struct route_node *route_node;
+
+ for (route_node = route_top(peer->rip->table); route_node;
+ route_node = route_next(route_node)) {
+ struct rip_info *route_entry;
+ struct listnode *listnode;
+ struct listnode *listnode_next;
+ struct list *list;
+
+ list = route_node->info;
+ if (list == NULL)
+ continue;
+
+ for (ALL_LIST_ELEMENTS(list, listnode, listnode_next,
+ route_entry)) {
+ if (!rip_route_rte(route_entry))
+ continue;
+ if (route_entry->from.s_addr != peer->addr.s_addr)
+ continue;
+
+ if (listcount(list) == 1) {
+ EVENT_OFF(route_entry->t_timeout);
+ EVENT_OFF(route_entry->t_garbage_collect);
+ listnode_delete(list, route_entry);
+ if (list_isempty(list)) {
+ list_delete((struct list **)&route_node
+ ->info);
+ route_unlock_node(route_node);
+ }
+ rip_info_free(route_entry);
+
+ /* Signal the output process to trigger an
+ * update (see section 2.5). */
+ rip_event(peer->rip, RIP_TRIGGERED_UPDATE, 0);
+ } else
+ rip_ecmp_delete(peer->rip, route_entry);
+ break;
+ }
+ }
+}
#include "zclient.h"
#include "log.h"
#include "vrf.h"
+#include "bfd.h"
#include "ripd/ripd.h"
#include "ripd/rip_debug.h"
#include "ripd/rip_interface.h"
vrf->name, vrf->vrf_id);
zclient_send_reg_requests(zclient, vrf->vrf_id);
+ bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, vrf->vrf_id);
}
void rip_zebra_vrf_deregister(struct vrf *vrf)
vrf->name, vrf->vrf_id);
zclient_send_dereg_requests(zclient, vrf->vrf_id);
+ bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_DEREGISTER, vrf->vrf_id);
}
static void rip_zebra_connected(struct zclient *zclient)
{
zclient_send_reg_requests(zclient, VRF_DEFAULT);
+ bfd_client_sendmsg(zclient, ZEBRA_BFD_CLIENT_REGISTER, VRF_DEFAULT);
}
zclient_handler *const rip_handlers[] = {
if (from->sin_port != htons(RIP_PORT_DEFAULT)) {
zlog_info("response doesn't come from RIP port: %d",
from->sin_port);
- rip_peer_bad_packet(rip, from);
+ rip_peer_bad_packet(rip, ri, from);
return;
}
zlog_info(
"This datagram doesn't come from a valid neighbor: %pI4",
&from->sin_addr);
- rip_peer_bad_packet(rip, from);
+ rip_peer_bad_packet(rip, ri, from);
return;
}
; /* Alredy done in rip_read () */
/* Update RIP peer. */
- rip_peer_update(rip, from, packet->version);
+ rip_peer_update(rip, ri, from, packet->version);
/* Set RTE pointer. */
rte = packet->rte;
if (!rip_destination_check(rte->prefix)) {
zlog_info(
"Network is net 0 or net 127 or it is not unicast network");
- rip_peer_bad_route(rip, from);
+ rip_peer_bad_route(rip, ri, from);
continue;
}
/* - is the metric valid (i.e., between 1 and 16, inclusive) */
if (!(rte->metric >= 1 && rte->metric <= 16)) {
zlog_info("Route's metric is not in the 1-16 range.");
- rip_peer_bad_route(rip, from);
+ rip_peer_bad_route(rip, ri, from);
continue;
}
&& rte->nexthop.s_addr != INADDR_ANY) {
zlog_info("RIPv1 packet with nexthop value %pI4",
&rte->nexthop);
- rip_peer_bad_route(rip, from);
+ rip_peer_bad_route(rip, ri, from);
continue;
}
zlog_warn(
"RIPv2 address %pI4 is not mask /%d applied one",
&rte->prefix, ip_masklen(rte->mask));
- rip_peer_bad_route(rip, from);
+ rip_peer_bad_route(rip, ri, from);
continue;
}
return;
/* RIP peer update. */
- rip_peer_update(rip, from, packet->version);
+ rip_peer_update(rip, ri, from, packet->version);
lim = ((caddr_t)packet) + size;
rte = packet->rte;
socklen_t fromlen;
struct interface *ifp = NULL;
struct connected *ifc;
- struct rip_interface *ri;
+ struct rip_interface *ri = NULL;
struct prefix p;
/* Fetch socket then register myself. */
/* Which interface is this packet comes from. */
ifc = if_lookup_address((void *)&from.sin_addr, AF_INET,
rip->vrf->vrf_id);
- if (ifc)
+ if (ifc) {
ifp = ifc->ifp;
+ ri = ifp->info;
+ }
/* RIP packet received */
if (IS_RIP_DEBUG_EVENT)
ifp ? ifp->name : "unknown", rip->vrf_name);
/* If this packet come from unknown interface, ignore it. */
- if (ifp == NULL) {
+ if (ifp == NULL || ri == NULL) {
zlog_info(
"%s: cannot find interface for packet from %pI4 port %d (VRF %s)",
__func__, &from.sin_addr, ntohs(from.sin_port),
if (len < RIP_PACKET_MINSIZ) {
zlog_warn("packet size %d is smaller than minimum size %d", len,
RIP_PACKET_MINSIZ);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
if (len > RIP_PACKET_MAXSIZ) {
zlog_warn("packet size %d is larger than max size %d", len,
RIP_PACKET_MAXSIZ);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
if ((len - RIP_PACKET_MINSIZ) % 20) {
zlog_warn("packet size %d is wrong for RIP packet alignment",
len);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
if (packet->version == 0) {
zlog_info("version 0 with command %d received.",
packet->command);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
packet->version = RIPv2;
/* Is RIP running or is this RIP neighbor ?*/
- ri = ifp->info;
if (!ri->running && !rip_neighbor_lookup(rip, &from)) {
if (IS_RIP_DEBUG_EVENT)
zlog_debug("RIP is not enabled on interface %s.",
ifp->name);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
zlog_debug(
" packet's v%d doesn't fit to if version spec",
packet->version);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
"packet RIPv%d is dropped because authentication disabled",
packet->version);
ripd_notif_send_auth_type_failure(ifp->name);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
zlog_debug(
"RIPv1 dropped because authentication enabled");
ripd_notif_send_auth_type_failure(ifp->name);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
} else if (ri->auth_type != RIP_NO_AUTH) {
zlog_debug(
"RIPv2 authentication failed: no auth RTE in packet");
ripd_notif_send_auth_type_failure(ifp->name);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
zlog_debug(
"RIPv2 dropped because authentication enabled");
ripd_notif_send_auth_type_failure(ifp->name);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
zlog_debug("RIPv2 %s authentication failure",
auth_desc);
ripd_notif_send_auth_failure(ifp->name);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
return;
}
}
zlog_info(
"Obsolete command %s received, please sent it to routed",
lookup_msg(rip_msg, packet->command, NULL));
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
break;
case RIP_POLL_ENTRY:
zlog_info("Obsolete command %s received",
lookup_msg(rip_msg, packet->command, NULL));
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
break;
default:
zlog_info("Unknown RIP command %d received", packet->command);
- rip_peer_bad_packet(rip, &from);
+ rip_peer_bad_packet(rip, ri, &from);
break;
}
}
route_table_finish(rip->distance_table);
RB_REMOVE(rip_instance_head, &rip_instances, rip);
+ XFREE(MTYPE_TMP, rip->default_bfd_profile);
XFREE(MTYPE_RIP_VRF_NAME, rip->vrf_name);
XFREE(MTYPE_RIP, rip);
}
#include "nexthop.h"
#include "distribute.h"
#include "memory.h"
+#include "bfd.h"
/* RIP version number. */
#define RIPv1 1
/* RIP queries. */
long queries;
} counters;
+
+ /* Default BFD profile to use with BFD sessions. */
+ char *default_bfd_profile;
};
RB_HEAD(rip_instance_head, rip);
RB_PROTOTYPE(rip_instance_head, rip, entry, rip_instance_compare)
/* Parent routing instance. */
struct rip *rip;
+ /* Interface data from zebra. */
+ struct interface *ifp;
+
/* RIP is enabled on this interface. */
int enable_network;
int enable_interface;
/* Passive interface. */
int passive;
+
+ /* BFD information. */
+ struct {
+ bool enabled;
+ char *profile;
+ } bfd;
};
/* RIP peer information. */
/* Parent routing instance. */
struct rip *rip;
+ /* Back-pointer to RIP interface. */
+ struct rip_interface *ri;
+
/* Peer address. */
struct in_addr addr;
/* Timeout thread. */
struct event *t_timeout;
+
+ /* BFD information */
+ struct bfd_session_params *bfd_session;
};
struct rip_distance {
extern int rip_show_network_config(struct vty *vty, struct rip *rip);
extern void rip_show_redistribute_config(struct vty *vty, struct rip *rip);
-extern void rip_peer_update(struct rip *rip, struct sockaddr_in *from,
- uint8_t version);
-extern void rip_peer_bad_route(struct rip *rip, struct sockaddr_in *from);
-extern void rip_peer_bad_packet(struct rip *rip, struct sockaddr_in *from);
+extern void rip_peer_free(struct rip_peer *peer);
+extern void rip_peer_update(struct rip *rip, struct rip_interface *ri,
+ struct sockaddr_in *from, uint8_t version);
+extern void rip_peer_bad_route(struct rip *rip, struct rip_interface *ri,
+ struct sockaddr_in *from);
+extern void rip_peer_bad_packet(struct rip *rip, struct rip_interface *ri,
+ struct sockaddr_in *from);
extern void rip_peer_display(struct vty *vty, struct rip *rip);
extern struct rip_peer *rip_peer_lookup(struct rip *rip, struct in_addr *addr);
extern struct rip_peer *rip_peer_lookup_next(struct rip *rip,
struct in_addr *addr);
extern int rip_peer_list_cmp(struct rip_peer *p1, struct rip_peer *p2);
extern void rip_peer_list_del(void *arg);
+void rip_peer_delete_routes(const struct rip_peer *peer);
extern void rip_info_free(struct rip_info *);
extern struct rip *rip_info_get_instance(const struct rip_info *rinfo);
endif
ripd_ripd_SOURCES = \
+ ripd/rip_bfd.c \
ripd/rip_cli.c \
ripd/rip_debug.c \
ripd/rip_errors.c \
# end
clippy_scan += \
+ ripd/rip_bfd.c \
ripd/rip_cli.c \
# end
noinst_HEADERS += \
+ ripd/rip_bfd.h \
ripd/rip_debug.h \
ripd/rip_errors.h \
ripd/rip_interface.h \
ts_hash("init", "df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119");
+#if !IS_ATOMIC(REALTYPE)
+ assert(!list_member(&head, &itm[0]));
+ assert(!list_member(&head, &itm[1]));
+#endif
+
#if IS_SORTED(REALTYPE)
prng = prng_new(0);
k = 0;
reset_config_on_routers(tgen)
logger.info(
- "[Phase 1] : Test Setup " "[Disable Mode]R1-----R2[Restart Mode] initialized "
+ "[Phase 1] : Test Setup " "[Disable Mode]R1-----R2[Helper Mode] initialized "
)
# Configure graceful-restart
tc_name, result
)
- logger.info("[Phase 2] : R2 Goes from Disable to Restart Mode ")
+ logger.info("[Phase 2] : R1 Goes from Disable to Restart Mode ")
# Configure graceful-restart
input_dict = {
},
}
- # here the verify_graceful_restart fro the neighbor would be
- # "NotReceived" as the latest GR config is not yet applied.
- for addr_type in ADDR_TYPES:
- result = verify_graceful_restart(
- tgen, topo, addr_type, input_dict, dut="r1", peer="r2"
- )
- assert result is True, "Testcase {} : Failed \n Error {}".format(
- tc_name, result
- )
-
- for addr_type in ADDR_TYPES:
- # Verifying RIB routes
- next_hop = next_hop_per_address_family(
- tgen, dut, peer, addr_type, NEXT_HOP_IP_2
- )
- input_topo = {key: topo["routers"][key] for key in ["r2"]}
- result = verify_rib(tgen, addr_type, dut, input_topo, next_hop, protocol)
- assert result is True, "Testcase {} : Failed \n Error {}".format(
- tc_name, result
- )
-
- logger.info("[Phase 6] : R1 is about to come up now ")
- start_router_daemons(tgen, "r1", ["bgpd"])
-
- logger.info("[Phase 4] : R1 is UP now, so time to collect GR stats ")
+ logger.info("[Phase 4] : R1 is UP and GR state is correct ")
for addr_type in ADDR_TYPES:
result = verify_graceful_restart(
--- /dev/null
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
+router bgp 65001
+ bgp router-id 192.168.1.1
+ no bgp ebgp-requires-policy
+ no bgp network import-check
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.3 remote-as external
+ neighbor 192.168.1.4 remote-as external
+ address-family ipv4 unicast
+ network 10.10.10.10/32
+ neighbor 192.168.1.2 route-map rmap out
+ neighbor 192.168.1.3 route-map rmap out
+ neighbor 192.168.1.4 route-map rmap out
+ exit-address-family
+!
+route-map rmap permit 10
+ set extcommunity nt 192.168.1.3:0 192.168.1.4:0
+exit
--- /dev/null
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
+router bgp 65002
+ bgp router-id 192.168.1.2
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
--- /dev/null
+!
+int r3-eth0
+ ip address 192.168.1.3/24
+!
+router bgp 65003
+ bgp router-id 192.168.1.3
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
--- /dev/null
+!
+int r4-eth0
+ ip address 192.168.1.4/24
+!
+router bgp 65004
+ bgp router-id 192.168.1.4
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Test if Node Target Extended Communities works.
+
+At r1 we set NT to 192.168.1.3 and 192.168.1.4 (this is the R3/R4 router-id),
+and that means 10.10.10.10/32 MUST be installed on R3 and R4, but not on R2,
+because this route does not have NT:192.168.1.2.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_node_target_extended_communities():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+ r3 = tgen.gears["r3"]
+ r4 = tgen.gears["r4"]
+
+ def _bgp_converge():
+ output = json.loads(r1.vtysh_cmd("show bgp summary json"))
+ expected = {
+ "ipv4Unicast": {
+ "peers": {
+ "192.168.1.2": {
+ "pfxSnt": 1,
+ "state": "Established",
+ },
+ "192.168.1.3": {
+ "pfxSnt": 1,
+ "state": "Established",
+ },
+ "192.168.1.4": {
+ "pfxSnt": 1,
+ "state": "Established",
+ },
+ }
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "Failed announcing 10.10.10.10/32 to r2, r3, and r4"
+
+ def _bgp_check_route(router, exists):
+ output = json.loads(router.vtysh_cmd("show bgp ipv4 unicast json"))
+ if exists:
+ expected = {
+ "routes": {
+ "10.10.10.10/32": [
+ {
+ "valid": True,
+ }
+ ]
+ }
+ }
+ else:
+ expected = {
+ "routes": {
+ "10.10.10.10/32": None,
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_check_route, r3, True)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "10.10.10.10/32 is not installed, but SHOULD be"
+
+ test_func = functools.partial(_bgp_check_route, r4, True)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "10.10.10.10/32 is not installed, but SHOULD be"
+
+ test_func = functools.partial(_bgp_check_route, r2, False)
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ assert result is None, "10.10.10.10/32 is installed, but SHOULD NOT be"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
"""
Topotest conftest.py file.
"""
# pylint: disable=consider-using-f-string
import glob
+import logging
import os
-import pdb
import re
import resource
import subprocess
import sys
import time
-import pytest
-
import lib.fixtures
-from lib import topolog
+import pytest
from lib.micronet_compat import Mininet
from lib.topogen import diagnose_env, get_topogen
-from lib.topolog import logger
-from lib.topotest import g_extra_config as topotest_extra_config
+from lib.topolog import get_test_logdir, logger
from lib.topotest import json_cmp_result
+from munet import cli
from munet.base import Commander, proc_error
from munet.cleanup import cleanup_current, cleanup_previous
-from munet import cli
+from munet.config import ConfigOptionsProxy
+from munet.testing.util import pause_test
+
+from lib import topolog, topotest
+
+try:
+ # Used by munet native tests
+ from munet.testing.fixtures import event_loop, unet # pylint: disable=all # noqa
+
+ @pytest.fixture(scope="module")
+ def rundir_module(pytestconfig):
+ d = os.path.join(pytestconfig.option.rundir, get_test_logdir())
+ logging.debug("rundir_module: test module rundir %s", d)
+ return d
+
+except (AttributeError, ImportError):
+ pass
def pytest_addoption(parser):
help="Comma-separated list of routers to spawn gdb on, or 'all'",
)
+ parser.addoption(
+ "--logd",
+ action="append",
+ metavar="DAEMON[,ROUTER[,...]",
+ help=(
+ "Tail-F the DAEMON log file on all or a subset of ROUTERs."
+ " Option can be given multiple times."
+ ),
+ )
+
parser.addoption(
"--pause",
action="store_true",
help="Pause after each test",
)
+ parser.addoption(
+ "--pause-at-end",
+ action="store_true",
+ help="Pause before taking munet down",
+ )
+
parser.addoption(
"--pause-on-error",
action="store_true",
help="Do not pause after (disables default when --shell or -vtysh given)",
)
+ parser.addoption(
+ "--pcap",
+ default="",
+ metavar="NET[,NET...]",
+ help="Comma-separated list of networks to capture packets on, or 'all'",
+ )
+
+ parser.addoption(
+ "--perf",
+ action="append",
+ metavar="DAEMON[,ROUTER[,...]",
+ help=(
+ "Collect performance data from given DAEMON on all or a subset of ROUTERs."
+ " Option can be given multiple times."
+ ),
+ )
+
+ parser.addoption(
+ "--perf-options",
+ metavar="OPTS",
+ default="-g",
+ help="Options to pass to `perf record`.",
+ )
+
rundir_help = "directory for running in and log files"
parser.addini("rundir", rundir_help, default="/tmp/topotests")
parser.addoption("--rundir", metavar="DIR", help=rundir_help)
def check_for_memleaks():
- assert topotest_extra_config["valgrind_memleaks"]
+ assert topotest.g_pytest_config.option.valgrind_memleaks
leaks = []
tgen = get_topogen() # pylint: disable=redefined-outer-name
@pytest.fixture(autouse=True, scope="module")
def module_check_memtest(request):
- del request # disable unused warning
yield
- if topotest_extra_config["valgrind_memleaks"]:
+ if request.config.option.valgrind_memleaks:
if get_topogen() is not None:
check_for_memleaks()
def pytest_runtest_logstart(nodeid, location):
# location is (filename, lineno, testname)
- topolog.logstart(nodeid, location, topotest_extra_config["rundir"])
+ topolog.logstart(nodeid, location, topotest.g_pytest_config.option.rundir)
def pytest_runtest_logfinish(nodeid, location):
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item: pytest.Item) -> None:
"Hook the function that is called to execute the test."
- del item # disable unused warning
# For topology only run the CLI then exit
- if topotest_extra_config["topology_only"]:
+ if item.config.option.topology_only:
get_topogen().cli()
pytest.exit("exiting after --topology-only")
yield
# Check for leaks if requested
- if topotest_extra_config["valgrind_memleaks"]:
+ if item.config.option.valgrind_memleaks:
check_for_memleaks()
"""
Assert that the environment is correctly configured, and get extra config.
"""
+ topotest.g_pytest_config = ConfigOptionsProxy(config)
if config.getoption("--collect-only"):
return
# Set some defaults for the pytest.ini [pytest] section
# ---------------------------------------------------
- rundir = config.getoption("--rundir")
+ rundir = config.option.rundir
if not rundir:
rundir = config.getini("rundir")
if not rundir:
rundir = "/tmp/topotests"
+ config.option.rundir = rundir
+
if not config.getoption("--junitxml"):
config.option.xmlpath = os.path.join(rundir, "topotests.xml")
xmlpath = config.option.xmlpath
mv_path = commander.get_exec_path("mv")
commander.cmd_status([mv_path, xmlpath, xmlpath + suffix])
- topotest_extra_config["rundir"] = rundir
-
# Set the log_file (exec) to inside the rundir if not specified
if not config.getoption("--log-file") and not config.getini("log_file"):
config.option.log_file = os.path.join(rundir, "exec.log")
elif b and not is_xdist and not have_windows:
pytest.exit("{} use requires byobu/TMUX/SCREEN/XTerm".format(feature))
- # ---------------------------------------
- # Record our options in global dictionary
- # ---------------------------------------
-
- topotest_extra_config["rundir"] = rundir
-
- asan_abort = config.getoption("--asan-abort")
- topotest_extra_config["asan_abort"] = asan_abort
-
- gdb_routers = config.getoption("--gdb-routers")
- gdb_routers = gdb_routers.split(",") if gdb_routers else []
- topotest_extra_config["gdb_routers"] = gdb_routers
-
- gdb_daemons = config.getoption("--gdb-daemons")
- gdb_daemons = gdb_daemons.split(",") if gdb_daemons else []
- topotest_extra_config["gdb_daemons"] = gdb_daemons
- assert_feature_windows(gdb_routers or gdb_daemons, "GDB")
-
- gdb_breakpoints = config.getoption("--gdb-breakpoints")
- gdb_breakpoints = gdb_breakpoints.split(",") if gdb_breakpoints else []
- topotest_extra_config["gdb_breakpoints"] = gdb_breakpoints
-
- cli_on_error = config.getoption("--cli-on-error")
- topotest_extra_config["cli_on_error"] = cli_on_error
- assert_feature_windows(cli_on_error, "--cli-on-error")
-
- shell = config.getoption("--shell")
- topotest_extra_config["shell"] = shell.split(",") if shell else []
- assert_feature_windows(shell, "--shell")
-
- strace = config.getoption("--strace-daemons")
- topotest_extra_config["strace_daemons"] = strace.split(",") if strace else []
-
- shell_on_error = config.getoption("--shell-on-error")
- topotest_extra_config["shell_on_error"] = shell_on_error
- assert_feature_windows(shell_on_error, "--shell-on-error")
-
- topotest_extra_config["valgrind_extra"] = config.getoption("--valgrind-extra")
- topotest_extra_config["valgrind_memleaks"] = config.getoption("--valgrind-memleaks")
-
- vtysh = config.getoption("--vtysh")
- topotest_extra_config["vtysh"] = vtysh.split(",") if vtysh else []
- assert_feature_windows(vtysh, "--vtysh")
-
- vtysh_on_error = config.getoption("--vtysh-on-error")
- topotest_extra_config["vtysh_on_error"] = vtysh_on_error
- assert_feature_windows(vtysh_on_error, "--vtysh-on-error")
-
- pause_on_error = vtysh or shell or config.getoption("--pause-on-error")
- if config.getoption("--no-pause-on-error"):
- pause_on_error = False
-
- topotest_extra_config["pause_on_error"] = pause_on_error
- assert_feature_windows(pause_on_error, "--pause-on-error")
-
- pause = config.getoption("--pause")
- topotest_extra_config["pause"] = pause
- assert_feature_windows(pause, "--pause")
-
- topology_only = config.getoption("--topology-only")
- if topology_only and is_xdist:
+ #
+ # Check for window capability if given options that require window
+ #
+ assert_feature_windows(config.option.gdb_routers, "GDB")
+ assert_feature_windows(config.option.gdb_daemons, "GDB")
+ assert_feature_windows(config.option.cli_on_error, "--cli-on-error")
+ assert_feature_windows(config.option.shell, "--shell")
+ assert_feature_windows(config.option.shell_on_error, "--shell-on-error")
+ assert_feature_windows(config.option.vtysh, "--vtysh")
+ assert_feature_windows(config.option.vtysh_on_error, "--vtysh-on-error")
+
+ if config.option.topology_only and is_xdist:
pytest.exit("Cannot use --topology-only with distributed test mode")
- topotest_extra_config["topology_only"] = topology_only
# Check environment now that we have config
if not diagnose_env(rundir):
# We want to pause, if requested, on any error not just test cases
# (e.g., call.when == "setup")
if not pause:
- pause = (
- topotest_extra_config["pause_on_error"]
- or topotest_extra_config["pause"]
- )
+ pause = item.config.option.pause_on_error or item.config.option.pause
# (topogen) Set topology error to avoid advancing in the test.
tgen = get_topogen() # pylint: disable=redefined-outer-name
isatty = sys.stdout.isatty()
error_cmd = None
- if error and topotest_extra_config["vtysh_on_error"]:
+ if error and item.config.option.vtysh_on_error:
error_cmd = commander.get_exec_path(["vtysh"])
- elif error and topotest_extra_config["shell_on_error"]:
+ elif error and item.config.option.shell_on_error:
error_cmd = os.getenv("SHELL", commander.get_exec_path(["bash"]))
if error_cmd:
if p.wait():
logger.warning("xterm proc failed: %s:", proc_error(p, o, e))
- if error and topotest_extra_config["cli_on_error"]:
+ if error and item.config.option.cli_on_error:
# Really would like something better than using this global here.
# Not all tests use topogen though so get_topogen() won't work.
if Mininet.g_mnet_inst:
else:
logger.error("Could not launch CLI b/c no mininet exists yet")
- while pause and isatty:
- try:
- user = raw_input(
- 'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: '
- )
- except NameError:
- user = input('PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ')
- user = user.strip()
-
- if user == "cli":
- cli.cli(Mininet.g_mnet_inst)
- elif user == "pdb":
- pdb.set_trace() # pylint: disable=forgotten-debug-statement
- elif user:
- print('Unrecognized input: "%s"' % user)
- else:
- break
+ if pause and isatty:
+ pause_test()
#
--- /dev/null
+version: 1
+topology:
+ ipv6-enable: true
+ networks-autonumber: true
+ networks:
+ - name: net1
+ - name: net2
+ nodes:
+ - name: r1
+ kind: frr
+ connections: ["net1"]
+ - name: r2
+ kind: frr
+ connections: ["net1", "net2"]
+ - name: r3
+ kind: frr
+ connections: ["net2"]
--- /dev/null
+zebra=1
+staticd=1
+vtysh_enable=1
+watchfrr_enable=1
+zebra_options="-d -F traditional --log=file:/var/log/frr/zebra.log"
+staticd_options="-d -F traditional --log=file:/var/log/frr/staticd.log"
--- /dev/null
+log file /var/log/frr/frr.log
+service integrated-vtysh-config
+
+interface eth0
+ ip address 10.0.1.1/24
+
+ip route 10.0.0.0/8 blackhole
--- /dev/null
+service integrated-vtysh-config
\ No newline at end of file
--- /dev/null
+zebra=1
+staticd=1
+vtysh_enable=1
+watchfrr_enable=1
+zebra_options="-d -F traditional --log=file:/var/log/frr/zebra.log"
+staticd_options="-d -F traditional --log=file:/var/log/frr/staticd.log"
--- /dev/null
+log file /var/log/frr/frr.log
+service integrated-vtysh-config
+
+interface eth0
+ ip address 10.0.1.2/24
+
+interface eth1
+ ip address 10.0.2.2/24
+
+ip route 10.0.0.0/8 blackhole
--- /dev/null
+service integrated-vtysh-config
\ No newline at end of file
--- /dev/null
+zebra=1
+staticd=1
+vtysh_enable=1
+watchfrr_enable=1
+zebra_options="-d -F traditional --log=file:/var/log/frr/zebra.log"
+staticd_options="-d -F traditional --log=file:/var/log/frr/staticd.log"
--- /dev/null
+log file /var/log/frr/frr.log
+service integrated-vtysh-config
+
+interface eth0
+ ip address 10.0.2.3/24
+
+ip route 10.0.0.0/8 blackhole
--- /dev/null
+service integrated-vtysh-config
\ No newline at end of file
--- /dev/null
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# April 23 2023, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2023, LabN Consulting, L.L.C.
+#
+async def test_native_test(unet):
+ o = unet.hosts["r1"].cmd_nostatus("ip addr")
+ print(o)
--- /dev/null
+version: 1
+kinds:
+ - name: frr
+ cmd: |
+ chown frr:frr -R /var/run/frr
+ chown frr:frr -R /var/log/frr
+ /usr/lib/frr/frrinit.sh start
+ tail -F /var/log/frr/frr.log
+ cleanup-cmd: |
+ /usr/lib/frr/frrinit.sh stop
+ volumes:
+ - "./%NAME%:/etc/frr"
+ - "%RUNDIR%/var.log.frr:/var/log/frr"
+ - "%RUNDIR%/var.run.frr:/var/run/frr"
+ cap-add:
+ - SYS_ADMIN
+ - AUDIT_WRITE
+ merge: ["volumes"]
+cli:
+ commands:
+ - name: ""
+ exec: "vtysh -c '{}'"
+ format: "[ROUTER ...] COMMAND"
+ help: "execute vtysh COMMAND on the router[s]"
+ kinds: ["frr"]
+ - name: "vtysh"
+ exec: "/usr/bin/vtysh"
+ format: "vtysh ROUTER [ROUTER ...]"
+ new-window: true
+ kinds: ["frr"]
"""Node (mininet compat)."""
def __init__(self, name, rundir=None, **kwargs):
-
nkwargs = {}
if "unet" in kwargs:
nkwargs["unet"] = kwargs["unet"]
if "private_mounts" in kwargs:
nkwargs["private_mounts"] = kwargs["private_mounts"]
+ if "logger" in kwargs:
+ nkwargs["logger"] = kwargs["logger"]
# This is expected by newer munet CLI code
self.config_dirname = ""
g_mnet_inst = None
- def __init__(self, rundir=None):
+ def __init__(self, rundir=None, pytestconfig=None):
"""
Create a Micronet.
"""
# to set permissions to root:frr 770 to make this unneeded in that case
# os.umask(0)
- super(Mininet, self).__init__(pid=False, rundir=rundir)
+ super(Mininet, self).__init__(
+ pid=False, rundir=rundir, pytestconfig=pytestconfig
+ )
# From munet/munet/native.py
with open(os.path.join(self.rundir, "nspid"), "w", encoding="ascii") as f:
cli.add_cli_config(self, cdict)
- # shellopt = (
- # self.pytest_config.getoption("--shell") if self.pytest_config else None
- # )
- # shellopt = shellopt if shellopt is not None else ""
- # if shellopt == "all" or "." in shellopt.split(","):
- # self.run_in_window("bash")
+ shellopt = self.cfgopt.get_option_list("--shell")
+ if "all" in shellopt or "." in shellopt:
+ self.run_in_window("bash")
# This is expected by newer munet CLI code
self.config_dirname = ""
def start(self):
"""Start the micronet topology."""
+ pcapopt = self.cfgopt.get_option_list("--pcap")
+ if "all" in pcapopt:
+ pcapopt = self.switches.keys()
+ for pcap in pcapopt:
+ if ":" in pcap:
+ host, intf = pcap.split(":")
+ pcap = f"{host}-{intf}"
+ host = self.hosts[host]
+ else:
+ host = self
+ intf = pcap
+ pcapfile = f"{self.rundir}/capture-{pcap}.pcap"
+ host.run_in_window(
+ f"tshark -s 9200 -i {intf} -P -w {pcapfile}",
+ background=True,
+ title=f"cap:{pcap}",
+ )
+
self.logger.debug("%s: Starting (no-op).", self)
def stop(self):
* After running stop Mininet with: tgen.stop_topology()
"""
+import configparser
import grp
import inspect
import json
import sys
from collections import OrderedDict
-if sys.version_info[0] > 2:
- import configparser
-else:
- import ConfigParser as configparser
-
import lib.topolog as topolog
from lib.micronet import Commander
from lib.micronet_compat import Mininet
from lib.topolog import logger
-from lib.topotest import g_extra_config
+from munet.testing.util import pause_test
from lib import topotest
self._load_config()
# Create new log directory
- self.logdir = topotest.get_logs_path(g_extra_config["rundir"])
+ self.logdir = topotest.get_logs_path(topotest.g_pytest_config.option.rundir)
subprocess.check_call(
"mkdir -p {0} && chmod 1777 {0}".format(self.logdir), shell=True
)
# Mininet(Micronet) to build the actual topology.
assert not inspect.isclass(topodef)
- self.net = Mininet(rundir=self.logdir)
+ self.net = Mininet(rundir=self.logdir, pytestconfig=topotest.g_pytest_config)
# Adjust the parent namespace
topotest.fix_netns_limits(self.net)
first is a simple kill with no sleep, the second will sleep if not
killed and try with a different signal.
"""
+ pause = bool(self.net.cfgopt.get_option("--pause-at-end"))
+ pause = pause or bool(self.net.cfgopt.get_option("--pause"))
+ if pause:
+ try:
+ pause_test("Before MUNET delete")
+ except KeyboardInterrupt:
+ print("^C...continuing")
+ except Exception as error:
+ self.logger.error("\n...continuing after error: %s", error)
+
logger.info("stopping topology: {}".format(self.modname))
+
errors = ""
for gear in self.gears.values():
errors += gear.stop()
# Network Device Education Foundation, Inc. ("NetDEF")
#
+import configparser
import difflib
import errno
import functools
import glob
import json
import os
-import pdb
import platform
import re
import resource
import sys
import tempfile
import time
+from collections.abc import Mapping
from copy import deepcopy
import lib.topolog as topolog
+from lib.micronet_compat import Node
from lib.topolog import logger
-
-if sys.version_info[0] > 2:
- import configparser
- from collections.abc import Mapping
-else:
- import ConfigParser as configparser
- from collections import Mapping
+from munet.base import Timeout
from lib import micronet
-from lib.micronet_compat import Node
-g_extra_config = {}
+g_pytest_config = None
def get_logs_path(rundir):
)
-def pid_exists(pid):
- "Check whether pid exists in the current process table."
-
- if pid <= 0:
- return False
- try:
- os.waitpid(pid, os.WNOHANG)
- except:
- pass
- try:
- os.kill(pid, 0)
- except OSError as err:
- if err.errno == errno.ESRCH:
- # ESRCH == No such process
- return False
- elif err.errno == errno.EPERM:
- # EPERM clearly means there's a process to deny access to
- return True
- else:
- # According to "man 2 kill" possible error values are
- # (EINVAL, EPERM, ESRCH)
- raise
- else:
- return True
-
-
def get_textdiff(text1, text2, title1="", title2="", **opts):
"Returns empty string if same or formatted diff"
# No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
if logdir:
- filepattern = logdir + "/" + router + "/" + component + ".asan.*"
+ filepattern = logdir + "/" + router + ".asan." + component + ".*"
logger.debug(
"Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
)
def fix_netns_limits(ns):
-
# Maximum read and write socket buffer sizes
- sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
- sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
+ sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2**20])
+ sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2**20])
sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
# Maximum read and write socket buffer sizes
- sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
- sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
+ sysctl_atleast(None, "net.core.rmem_max", 16 * 2**20)
+ sysctl_atleast(None, "net.core.wmem_max", 16 * 2**20)
# Garbage Collection Settings for ARP and Neighbors
sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
def setup_node_tmpdir(logdir, name):
# Cleanup old log, valgrind, and core files.
subprocess.check_call(
- "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
+ "rm -rf {0}/{1}.valgrind.* {0}/{1}.asan.* {0}/{1}/".format(logdir, name),
+ shell=True,
)
# Setup the per node directory.
"A Node with IPv4/IPv6 forwarding enabled"
def __init__(self, name, *posargs, **params):
-
# Backward compatibility:
# Load configuration defaults like topogen.
self.config_defaults = configparser.ConfigParser(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
)
+ self.perf_daemons = {}
+
# If this topology is using old API and doesn't have logdir
# specified, then attempt to generate an unique logdir.
self.logdir = params.get("logdir")
if self.logdir is None:
- self.logdir = get_logs_path(g_extra_config["rundir"])
+ self.logdir = get_logs_path(g_pytest_config.getoption("--rundir"))
if not params.get("logger"):
# If logger is present topogen has already set this up
)
except Exception as ex:
logger.error("%s can't remove IPs %s", self, str(ex))
- # pdb.set_trace()
+ # breakpoint()
# assert False, "can't remove IPs %s" % str(ex)
def checkCapability(self, daemon, param):
if (daemon == "zebra") and (self.daemons["mgmtd"] == 0):
# Add mgmtd with zebra - if it exists
- try:
- mgmtd_path = os.path.join(self.daemondir, "mgmtd")
- except:
- pdb.set_trace()
+ mgmtd_path = os.path.join(self.daemondir, "mgmtd")
if os.path.isfile(mgmtd_path):
self.daemons["mgmtd"] = 1
self.daemons_options["mgmtd"] = ""
if (daemon == "zebra") and (self.daemons["staticd"] == 0):
# Add staticd with zebra - if it exists
- try:
- staticd_path = os.path.join(self.daemondir, "staticd")
- except:
- pdb.set_trace()
-
+ staticd_path = os.path.join(self.daemondir, "staticd")
if os.path.isfile(staticd_path):
self.daemons["staticd"] = 1
self.daemons_options["staticd"] = ""
# used
self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
- shell_routers = g_extra_config["shell"]
- if "all" in shell_routers or self.name in shell_routers:
+ if g_pytest_config.name_in_option_list(self.name, "--shell"):
self.run_in_window(os.getenv("SHELL", "bash"), title="sh-%s" % self.name)
if self.daemons["eigrpd"] == 1:
status = self.startRouterDaemons(tgen=tgen)
- vtysh_routers = g_extra_config["vtysh"]
- if "all" in vtysh_routers or self.name in vtysh_routers:
+ if g_pytest_config.name_in_option_list(self.name, "--vtysh"):
self.run_in_window("vtysh", title="vt-%s" % self.name)
if self.unified_config:
def startRouterDaemons(self, daemons=None, tgen=None):
"Starts FRR daemons for this router."
- asan_abort = g_extra_config["asan_abort"]
- gdb_breakpoints = g_extra_config["gdb_breakpoints"]
- gdb_daemons = g_extra_config["gdb_daemons"]
- gdb_routers = g_extra_config["gdb_routers"]
- valgrind_extra = g_extra_config["valgrind_extra"]
- valgrind_memleaks = g_extra_config["valgrind_memleaks"]
- strace_daemons = g_extra_config["strace_daemons"]
+ asan_abort = bool(g_pytest_config.option.asan_abort)
+ gdb_breakpoints = g_pytest_config.get_option_list("--gdb-breakpoints")
+ gdb_daemons = g_pytest_config.get_option_list("--gdb-daemons")
+ gdb_routers = g_pytest_config.get_option_list("--gdb-routers")
+ valgrind_extra = bool(g_pytest_config.option.valgrind_extra)
+ valgrind_memleaks = bool(g_pytest_config.option.valgrind_memleaks)
+ strace_daemons = g_pytest_config.get_option_list("--strace-daemons")
# Get global bundle data
if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
self.reportCores = True
# XXX: glue code forward ported from removed function.
- if self.version == None:
+ if self.version is None:
self.version = self.cmd(
os.path.join(self.daemondir, "bgpd") + " -v"
).split()[2]
logger.info("{}: running version: {}".format(self.name, self.version))
+
+ perfds = {}
+ perf_options = g_pytest_config.get_option("--perf-options", "-g")
+ for perf in g_pytest_config.get_option("--perf", []):
+ if "," in perf:
+ daemon, routers = perf.split(",", 1)
+ perfds[daemon] = routers.split(",")
+ else:
+ daemon = perf
+ perfds[daemon] = ["all"]
+
+ logd_options = {}
+ for logd in g_pytest_config.get_option("--logd", []):
+ if "," in logd:
+ daemon, routers = logd.split(",", 1)
+ logd_options[daemon] = routers.split(",")
+ else:
+ daemon = logd
+ logd_options[daemon] = ["all"]
+
# If `daemons` was specified then some upper API called us with
# specific daemons, otherwise just use our own configuration.
daemons_list = []
if self.daemons[daemon] == 1:
daemons_list.append(daemon)
+ tail_log_files = []
+ check_daemon_files = []
+
def start_daemon(daemon, extra_opts=None):
daemon_opts = self.daemons_options.get(daemon, "")
+
+ # get pid and vty filenames and remove the files
+ m = re.match(r"(.* |^)-n (\d+)( ?.*|$)", daemon_opts)
+ dfname = daemon if not m else "{}-{}".format(daemon, m.group(2))
+ runbase = "/var/run/{}/{}".format(self.routertype, dfname)
+ # If this is a new system bring-up remove the pid/vty files, otherwise
+ # do not since apparently presence of the pidfile impacts BGP GR
+ self.cmd_status("rm -f {0}.pid {0}.vty".format(runbase))
+
rediropt = " > {0}.out 2> {0}.err".format(daemon)
if daemon == "snmpd":
binary = "/usr/sbin/snmpd"
cmdenv = ""
cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
daemon_opts
- ) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
+ ) + "{}.pid -x /etc/frr/agentx".format(runbase)
+ # check_daemon_files.append(runbase + ".pid")
else:
binary = os.path.join(self.daemondir, daemon)
+ check_daemon_files.extend([runbase + ".pid", runbase + ".vty"])
cmdenv = "ASAN_OPTIONS="
if asan_abort:
- cmdenv = "abort_on_error=1:"
- cmdenv += "log_path={0}/{1}.{2}.asan ".format(
+ cmdenv += "abort_on_error=1:"
+ cmdenv += "log_path={0}/{1}.asan.{2} ".format(
self.logdir, self.name, daemon
)
daemon, self.logdir, self.name
)
- cmdopt = "{} --command-log-always --log file:{}.log --log-level debug".format(
- daemon_opts, daemon
- )
+ cmdopt = "{} --command-log-always ".format(daemon_opts)
+ cmdopt += "--log file:{}.log --log-level debug".format(daemon)
+
+ if daemon in logd_options:
+ logdopt = logd_options[daemon]
+ if "all" in logdopt or self.name in logdopt:
+ tail_log_files.append(
+ "{}/{}/{}.log".format(self.logdir, self.name, daemon)
+ )
if extra_opts:
cmdopt += " " + extra_opts
logger.info(
"%s: %s %s launched in gdb window", self, self.routertype, daemon
)
- # Need better check for daemons running.
- time.sleep(5)
+ elif daemon in perfds and (self.name in perfds[daemon] or "all" in perfds[daemon]):
+ cmdopt += rediropt
+ cmd = " ".join(["perf record {} --".format(perf_options), binary, cmdopt])
+ p = self.popen(cmd)
+ self.perf_daemons[daemon] = p
+ if p.poll() and p.returncode:
+ self.logger.error(
+ '%s: Failed to launch "%s" (%s) with perf using: %s',
+ self,
+ daemon,
+ p.returncode,
+ cmd,
+ )
+ else:
+ logger.debug(
+ "%s: %s %s started with perf", self, self.routertype, daemon
+ )
else:
if daemon != "snmpd":
cmdopt += " -d "
start_daemon(daemon)
# Check if daemons are running.
- rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
- if re.search(r"No such file or directory", rundaemons):
- return "Daemons are not running"
+ wait_time = 30 if (gdb_routers or gdb_daemons) else 10
+ timeout = Timeout(wait_time)
+ for remaining in timeout:
+ if not check_daemon_files:
+ break
+ check = check_daemon_files[0]
+ if self.path_exists(check):
+ check_daemon_files.pop(0)
+ continue
+ self.logger.debug("Waiting {}s for {} to appear".format(remaining, check))
+ time.sleep(0.5)
+
+ if check_daemon_files:
+ assert False, "Timeout({}) waiting for {} to appear on {}".format(
+ wait_time, check_daemon_files[0], self.name
+ )
# Update the permissions on the log files
self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
+ if "frr" in logd_options:
+ logdopt = logd_options["frr"]
+ if "all" in logdopt or self.name in logdopt:
+ tail_log_files.append("{}/{}/frr.log".format(self.logdir, self.name))
+
+ for tailf in tail_log_files:
+ self.run_in_window("tail -f " + tailf, title=tailf, background=True)
+
return ""
+ def pid_exists(self, pid):
+ if pid <= 0:
+ return False
+ try:
+ # If we are not using PID namespaces then we will be a parent of the pid,
+ # otherwise the init process of the PID namespace will have reaped the proc.
+ os.waitpid(pid, os.WNOHANG)
+ except Exception:
+ pass
+
+ rc, o, e = self.cmd_status("kill -0 " + str(pid), warn=False)
+ return rc == 0 or "No such process" not in e
+
def killRouterDaemons(
self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
):
if re.search(r"%s" % daemon, d):
daemonpidfile = d.rstrip()
daemonpid = self.cmd("cat %s" % daemonpidfile).rstrip()
- if daemonpid.isdigit() and pid_exists(int(daemonpid)):
+ if daemonpid.isdigit() and self.pid_exists(int(daemonpid)):
logger.debug(
"{}: killing {}".format(
self.name,
os.path.basename(daemonpidfile.rsplit(".", 1)[0]),
)
)
- os.kill(int(daemonpid), signal.SIGKILL)
- if pid_exists(int(daemonpid)):
+ self.cmd_status("kill -KILL {}".format(daemonpid))
+ if self.pid_exists(int(daemonpid)):
numRunning += 1
while wait and numRunning > 0:
sleep(
for d in dmns[:-1]:
if re.search(r"%s" % daemon, d):
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
- if daemonpid.isdigit() and pid_exists(
+ if daemonpid.isdigit() and self.pid_exists(
int(daemonpid)
):
logger.info(
),
)
)
- os.kill(int(daemonpid), signal.SIGKILL)
- if daemonpid.isdigit() and not pid_exists(
+ self.cmd_status(
+ "kill -KILL {}".format(daemonpid)
+ )
+ if daemonpid.isdigit() and not self.pid_exists(
int(daemonpid)
):
numRunning -= 1
from pathlib import Path
from typing import Union
+from . import config as munet_config
from . import linux
if unet and unet.nsenter_fork:
assert not unet.unshare_inline
# Need child pid of p.pid
- pgrep = roothost.get_exec_path("pgrep")
+ pgrep = unet.rootcmd.get_exec_path("pgrep")
# a sing fork was done
- child_pid = roothost.cmd_raises([pgrep, "-o", "-P", str(p.pid)])
+ child_pid = unet.rootcmd.cmd_raises([pgrep, "-o", "-P", str(p.pid)])
self.pid = int(child_pid.strip())
self.logger.debug("%s: child of namespace process: %s", self, pid)
# this will fail if running inside the namespace with PID
if pid:
- o = self.cmd_status_nsonly("ls -l /proc/1/ns")
+ o = self.cmd_nostatus_nsonly("ls -l /proc/1/ns")
else:
- o = self.cmd_nostatus_nsonly(cmd=shlex.split("/usr/bin/ls -l /proc/self"))
- o = self.cmd_nostatus_nsonly(cmd=shlex.split("ls -l /proc/self/ns"))
+ o = self.cmd_nostatus_nsonly("ls -l /proc/self/ns")
self.logger.debug("namespaces:\n %s", o)
class BaseMunet(LinuxNamespace):
"""Munet."""
- def __init__(self, name="munet", isolated=True, pid=True, rundir=None, **kwargs):
+ def __init__(
+ self,
+ name="munet",
+ isolated=True,
+ pid=True,
+ rundir=None,
+ pytestconfig=None,
+ **kwargs,
+ ):
"""Create a Munet."""
# logging.warning("BaseMunet: %s", name)
if not self.isolated:
self.rootcmd = commander
+ elif not pid:
+ nsflags = (
+ f"--mount={self.proc_path / '1/ns/mnt'}",
+ f"--net={self.proc_path / '1/ns/net'}",
+ f"--uts={self.proc_path / '1/ns/uts'}",
+ # f"--ipc={self.proc_path / '1/ns/ipc'}",
+ # f"--time={self.proc_path / '1/ns/time'}",
+ # f"--cgroup={self.proc_path / '1/ns/cgroup'}",
+ )
+ self.rootcmd = SharedNamespace("root", pid=1, nsflags=nsflags)
else:
# XXX user
nsflags = (
- f"--pid={self.proc_path / '1/ns/pid_for_children'}",
+ # XXX Backing up PID namespace just doesn't work.
+ # f"--pid={self.proc_path / '1/ns/pid_for_children'}",
f"--mount={self.proc_path / '1/ns/mnt'}",
f"--net={self.proc_path / '1/ns/net'}",
f"--uts={self.proc_path / '1/ns/uts'}",
roothost = self.rootcmd
+ self.cfgopt = munet_config.ConfigOptionsProxy(pytestconfig)
+
super().__init__(
name, mount=True, net=isolated, uts=isolated, pid=pid, unet=None, **kwargs
)
import termios
import tty
-from . import linux
-from .config import list_to_dict_with_key
+
+try:
+ from . import linux
+ from .config import list_to_dict_with_key
+except ImportError:
+ # We cannot use relative imports and still run this module directly as a script, and
+ # there are some use cases where we want to run this file as a script.
+ sys.path.append(os.path.dirname(os.path.realpath(__file__)))
+ import linux
+
+ from config import list_to_dict_with_key
ENDMARKER = b"\x00END\x00"
return True
await run_command(
- unet, outf, nline, execfmt, banner, hosts, toplevel, kinds, ns_only, interactive
+ unet,
+ outf,
+ nline,
+ execfmt,
+ banner,
+ hosts,
+ toplevel,
+ kinds,
+ ns_only,
+ interactive,
)
return True
async def local_cli(unet, outf, prompt, histfile, background):
"""Implement the user-side CLI for local munet."""
- if unet:
- completer = Completer(unet)
- readline.parse_and_bind("tab: complete")
- readline.set_completer(completer.complete)
+ assert unet is not None
+ completer = Completer(unet)
+ readline.parse_and_bind("tab: complete")
+ readline.set_completer(completer.complete)
print("\n--- Munet CLI Starting ---\n\n")
while True:
if line is None:
return
- assert unet is not None
-
if not await doline(unet, line, outf, background):
return
except KeyboardInterrupt:
break
line = line.decode("utf-8").strip()
- # def writef(x):
- # writer.write(x.encode("utf-8"))
+ class EncodingFile:
+ """Wrap a writer to encode in utf-8."""
+
+ def __init__(self, writer):
+ self.writer = writer
+
+ def write(self, x):
+ self.writer.write(x.encode("utf-8"))
+
+ def flush(self):
+ self.writer.flush()
- if not await doline(unet, line, writer, background, notty=True):
+ if not await doline(unet, line, EncodingFile(writer), background, notty=True):
logging.debug("server closing cli connection")
return
class PytestConfig:
"""Pytest config duck-type-compatible object using argprase args."""
+ class Namespace:
+ """A namespace defined by a dictionary of values."""
+
+ def __init__(self, args):
+ self.args = args
+
+ def __getattr__(self, attr):
+ return self.args[attr] if attr in self.args else None
+
def __init__(self, args):
self.args = vars(args)
+ self.option = PytestConfig.Namespace(self.args)
def getoption(self, name, default=None, skip=False):
assert not skip
if k not in new:
new[k] = config[k]
return new
+
+
+def cli_opt_list(option_list):
+ if not option_list:
+ return []
+ if isinstance(option_list, str):
+ return [x for x in option_list.split(",") if x]
+ return [x for x in option_list if x]
+
+
+def name_in_cli_opt_str(name, option_list):
+ ol = cli_opt_list(option_list)
+ return name in ol or "all" in ol
+
+
+class ConfigOptionsProxy:
+ """Proxy options object to fill in for any missing pytest config."""
+
+ class DefNoneObject:
+ """An object that returns None for any attribute access."""
+
+ def __getattr__(self, attr):
+ return None
+
+ def __init__(self, pytestconfig=None):
+ if isinstance(pytestconfig, ConfigOptionsProxy):
+ self.config = pytestconfig.config
+ self.option = self.config.option
+ else:
+ self.config = pytestconfig
+ if self.config:
+ self.option = self.config.option
+ else:
+ self.option = ConfigOptionsProxy.DefNoneObject()
+
+ def getoption(self, opt, default=None):
+ if not self.config:
+ return default
+
+ try:
+ value = self.config.getoption(opt)
+ return value if value is not None else default
+ except ValueError:
+ return default
+
+ def get_option(self, opt, default=None):
+ return self.getoption(opt, default)
+
+ def get_option_list(self, opt):
+ value = self.get_option(opt, "")
+ return cli_opt_list(value)
+
+ def name_in_option_list(self, name, opt):
+ optlist = self.get_option_list(opt)
+ return "all" in optlist or name in optlist
# No exec so we are the "child".
new_process_group()
+ # Reap children as init process
+ vdebug("installing local handler for SIGCHLD")
+ signal.signal(signal.SIGCHLD, sig_sigchld)
+
while True:
- logging.info("parent: waiting to reap zombies")
+ logging.info("init: waiting to reap zombies")
linux.pause()
# NOTREACHED
stdout: file-like object with a ``name`` attribute, or a path to a file.
stderr: file-like object with a ``name`` attribute, or a path to a file.
"""
- if not self.unet or not self.unet.pytest_config:
+ if not self.unet:
return
- outopt = self.unet.pytest_config.getoption("--stdout")
+ outopt = self.unet.cfgopt.getoption("--stdout")
outopt = outopt if outopt is not None else ""
if outopt == "all" or self.name in outopt.split(","):
outname = stdout.name if hasattr(stdout, "name") else stdout
self.run_in_window(f"tail -F {outname}", title=f"O:{self.name}")
if stderr:
- erropt = self.unet.pytest_config.getoption("--stderr")
+ erropt = self.unet.cfgopt.getoption("--stderr")
erropt = erropt if erropt is not None else ""
if erropt == "all" or self.name in erropt.split(","):
errname = stderr.name if hasattr(stderr, "name") else stderr
self.run_in_window(f"tail -F {errname}", title=f"E:{self.name}")
def pytest_hook_open_shell(self):
- if not self.unet or not self.unet.pytest_config:
+ if not self.unet:
return
gdbcmd = self.config.get("gdb-cmd")
- shellopt = self.unet.pytest_config.getoption("--gdb", "")
+ shellopt = self.unet.cfgopt.getoption("--gdb", "")
should_gdb = gdbcmd and (shellopt == "all" or self.name in shellopt.split(","))
- use_emacs = self.unet.pytest_config.getoption("--gdb-use-emacs", False)
+ use_emacs = self.unet.cfgopt.getoption("--gdb-use-emacs", False)
if should_gdb and not use_emacs:
cmds = self.config.get("gdb-target-cmds", [])
for cmd in cmds:
gdbcmd += f" '-ex={cmd}'"
- bps = self.unet.pytest_config.getoption("--gdb-breakpoints", "").split(",")
+ bps = self.unet.cfgopt.getoption("--gdb-breakpoints", "").split(",")
for bp in bps:
gdbcmd += f" '-ex=b {bp}'"
]
)
- bps = self.unet.pytest_config.getoption("--gdb-breakpoints", "").split(",")
+ bps = self.unet.cfgopt.getoption("--gdb-breakpoints", "").split(",")
for bp in bps:
cmd = f"br {bp}"
self.cmd_raises(
)
gdbcmd += f" '-ex={cmd}'"
- shellopt = self.unet.pytest_config.getoption("--shell")
- shellopt = shellopt if shellopt is not None else ""
+ shellopt = self.unet.cfgopt.getoption("--shell")
+ shellopt = shellopt if shellopt else ""
if shellopt == "all" or self.name in shellopt.split(","):
self.run_in_window("bash")
con.cmd_raises(f"ip -6 route add default via {switch.ip6_address}")
con.cmd_raises("ip link set lo up")
- if self.unet.pytest_config and self.unet.pytest_config.getoption("--coverage"):
+ if self.unet.cfgopt.getoption("--coverage"):
con.cmd_raises("mount -t debugfs none /sys/kernel/debug")
async def gather_coverage_data(self):
self,
rundir=None,
config=None,
- pytestconfig=None,
pid=True,
logger=None,
**kwargs,
self.config_pathname = ""
self.config_dirname = ""
- self.pytest_config = pytestconfig
-
# Done in BaseMunet now
# # We need some way to actually get back to the root namespace
# if not self.isolated:
# # Let's hide podman details
# self.tmpfs_mount("/var/lib/containers/storage/overlay-containers")
- shellopt = (
- self.pytest_config.getoption("--shell") if self.pytest_config else None
- )
- shellopt = shellopt if shellopt is not None else ""
+ shellopt = self.cfgopt.getoption("--shell")
+ shellopt = shellopt if shellopt else ""
if shellopt == "all" or "." in shellopt.split(","):
self.run_in_window("bash")
x for x in hosts if hasattr(x, "has_ready_cmd") and x.has_ready_cmd()
]
- if not self.pytest_config:
- pcapopt = ""
- else:
- pcapopt = self.pytest_config.getoption("--pcap")
- pcapopt = pcapopt if pcapopt else ""
+ pcapopt = self.cfgopt.getoption("--pcap")
+ pcapopt = pcapopt if pcapopt else ""
if pcapopt == "all":
pcapopt = self.switches.keys()
if pcapopt:
self.logger.debug("%s: deleting.", self)
- if self.pytest_config and self.pytest_config.getoption("--coverage"):
+ if self.cfgopt.getoption("--coverage"):
nodes = (
x for x in self.hosts.values() if hasattr(x, "gather_coverage_data")
)
except Exception as error:
logging.warning("Error gathering coverage data: %s", error)
- if not self.pytest_config:
- pause = False
- else:
- pause = bool(self.pytest_config.getoption("--pause-at-end"))
- pause = pause or bool(self.pytest_config.getoption("--pause"))
+ pause = bool(self.cfgopt.getoption("--pause-at-end"))
+ pause = pause or bool(self.cfgopt.getoption("--pause"))
if pause:
try:
await async_pause_test("Before MUNET delete")
if search is None:
search = [cwd]
with importlib.resources.path("munet", "kinds.yaml") as datapath:
- search.append(str(datapath.parent))
+ search.insert(0, str(datapath.parent))
configs = []
if args_config:
# Skip pytests example directory
[pytest]
+asyncio_mode = auto
+
# We always turn this on inside conftest.py, default shown
# addopts = --junitxml=<rundir>/topotests.xml
junit_logging = all
junit_log_passing_tests = true
-norecursedirs = .git example_test example_topojson_test lib munet docker
+norecursedirs = .git example_munet example_test example_topojson_test lib munet docker
# Directory to store test results and run logs in, default shown
# rundir = /tmp/topotests
--- /dev/null
+bfd
+ profile slow
+ receive-interval 1000
+ transmit-interval 1000
+ exit
+exit
--- /dev/null
+interface r1-eth0
+ ip rip bfd
+ ip rip bfd profile slow
+exit
+!
+interface r1-eth1
+ ip rip bfd
+ ip rip bfd profile slow
+exit
+!
+router rip
+ allow-ecmp
+ network 192.168.0.1/24
+ network 192.168.1.1/24
+ redistribute connected
+ timers basic 10 40 30
+exit
--- /dev/null
+interface r1-eth0
+ ip address 192.168.0.1/24
+exit
+!
+interface r1-eth1
+ ip address 192.168.1.1/24
+exit
+!
+interface lo
+ ip address 10.254.254.1/32
+exit
--- /dev/null
+bfd
+ profile slow
+ receive-interval 1000
+ transmit-interval 1000
+ exit
+exit
--- /dev/null
+interface r2-eth0
+ ip rip bfd
+exit
+!
+router rip
+ bfd default-profile slow
+ network 192.168.0.2/24
+ redistribute connected
+ redistribute static
+ timers basic 10 40 30
+exit
--- /dev/null
+ip route 10.254.254.100/32 lo
--- /dev/null
+interface r2-eth0
+ ip address 192.168.0.2/24
+exit
+!
+interface lo
+ ip address 10.254.254.2/32
+exit
+
--- /dev/null
+bfd
+ profile slow
+ receive-interval 1000
+ transmit-interval 1000
+ exit
+exit
--- /dev/null
+interface r3-eth0
+ ip rip bfd
+ ip rip bfd profile slow
+exit
+!
+router rip
+ network 192.168.1.2/24
+ redistribute connected
+ redistribute static
+ timers basic 10 40 30
+exit
--- /dev/null
+ip route 10.254.254.100/32 lo
--- /dev/null
+interface r3-eth0
+ ip address 192.168.1.2/24
+exit
+!
+interface lo
+ ip address 10.254.254.3/32
+exit
--- /dev/null
+## Color coding:
+#########################
+## Main FRR: #f08080 red
+## Switches: #d0e0d0 gray
+## RIP: #19e3d9 Cyan
+## RIPng: #fcb314 dark yellow
+## OSPFv2: #32b835 Green
+## OSPFv3: #19e3d9 Cyan
+## ISIS IPv4 #fcb314 dark yellow
+## ISIS IPv6 #9a81ec purple
+## BGP IPv4 #eee3d3 beige
+## BGP IPv6 #fdff00 yellow
+##### Colors (see http://www.color-hex.com/)
+
+graph template {
+ label="rip_bfd_topo1";
+
+ # Routers
+ r1 [
+ shape=doubleoctagon,
+ label="r1",
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r2 [
+ shape=doubleoctagon
+ label="r2",
+ fillcolor="#f08080",
+ style=filled,
+ ];
+ r3 [
+ shape=doubleoctagon
+ label="r3",
+ fillcolor="#f08080",
+ style=filled,
+ ];
+
+ # Switches
+ s1 [
+ shape=oval,
+ label="s1\n192.168.0.0/24",
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+ s2 [
+ shape=oval,
+ label="s1\n192.168.1.0/24",
+ fillcolor="#d0e0d0",
+ style=filled,
+ ];
+
+ # Connections
+ r1 -- s1 [label="r1-eth0\n.1"];
+ r2 -- s1 [label="r2-eth0\n.2"];
+
+ r1 -- s2 [label="r1-eth1\n.1"];
+ r3 -- s2 [label="r1-eth0\n.2"];
+}
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+#
+# test_rip_bfd_topo1.py
+# Part of NetDEF Topology Tests
+#
+# Copyright (c) 2023 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+
+"""
+test_rip_bfd_topo1.py: Test RIP BFD integration.
+"""
+
+import sys
+import re
+import pytest
+
+from functools import partial
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter
+from lib.topolog import logger
+
+pytestmark = [
+ pytest.mark.bfdd,
+ pytest.mark.ripd,
+]
+
+
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
+
+ topodef = {
+ "s1": ("r1", "r2"),
+ "s2": ("r1", "r3")
+ }
+ tgen = Topogen(topodef, request.module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for router_name, router in router_list.items():
+ router.load_config(TopoRouter.RD_BFD, "bfdd.conf")
+ router.load_config(TopoRouter.RD_RIP, "ripd.conf")
+ router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ if router_name in ["r2", "r3"]:
+ router.load_config(TopoRouter.RD_STATIC, "staticd.conf")
+
+ tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
+
+
+@pytest.fixture(autouse=True)
+def skip_on_failure(tgen):
+ "Test if routers is still running otherwise skip tests"
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of previous test failure")
+
+
+def show_rip_json(router):
+ "Get router 'show ip rip' JSON output"
+ output = router.vtysh_cmd("show ip rip")
+ routes = output.splitlines()[6:]
+ json = {}
+
+ for route in routes:
+ match = re.match(
+ r"(.)\((.)\)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)", route)
+ if match is None:
+ continue
+
+ route_entry = {
+ "code": match[1],
+ "subCode": match[2],
+ "nextHop": match[4],
+ "metric": int(match[5]),
+ "from": match[6],
+ }
+
+ if json.get(match[3]) is None:
+ json[match[3]] = []
+
+ json[match[3]].append(route_entry)
+
+ return json
+
+
+def expect_routes(router, routes, time_amount):
+ "Expect 'routes' in 'router'."
+
+ def test_function():
+ "Internal test function."
+ return topotest.json_cmp(show_rip_json(router), routes)
+
+ _, result = topotest.run_and_expect(test_function,
+ None,
+ count=time_amount,
+ wait=1)
+ assert result is None, "Unexpected routing table in {}".format(
+ router.name)
+
+
+def expect_bfd_peers(router, peers):
+ "Expect 'peers' in 'router' BFD status."
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show bfd peers json",
+ peers,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=1)
+ assert result is None, "{} BFD peer status mismatch".format(router)
+
+
+def test_rip_convergence(tgen):
+ "Test that RIP learns the neighbor routes."
+
+ expect_routes(
+ tgen.gears["r1"], {
+ "10.254.254.2/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.0.2"
+ }],
+ "10.254.254.3/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.1.2"
+ }],
+ "10.254.254.100/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.0.2",
+ }, {
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.1.2",
+ }]
+ }, 40)
+
+ expect_bfd_peers(tgen.gears["r1"], [{
+ "peer": "192.168.0.2",
+ "status": "up",
+ "receive-interval": 1000,
+ "transmit-interval": 1000,
+ }, {
+ "peer": "192.168.1.2",
+ "status": "up",
+ "receive-interval": 1000,
+ "transmit-interval": 1000,
+ }])
+
+ expect_routes(
+ tgen.gears["r2"], {
+ "10.254.254.1/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.0.1"
+ }],
+ "10.254.254.3/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.0.1"
+ }],
+ "10.254.254.100/32": [{
+ "code": "S",
+ "subCode": "r",
+ "from": "self"
+ }]
+ }, 40)
+
+ expect_bfd_peers(tgen.gears["r2"], [{
+ "peer": "192.168.0.1",
+ "status": "up",
+ "receive-interval": 1000,
+ "transmit-interval": 1000,
+ }])
+
+ expect_routes(
+ tgen.gears["r3"], {
+ "10.254.254.1/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.1.1"
+ }],
+ "10.254.254.2/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.1.1"
+ }],
+ "10.254.254.100/32": [{
+ "code": "S",
+ "subCode": "r",
+ "from": "self"
+ }]
+ }, 40)
+
+ expect_bfd_peers(tgen.gears["r3"], [{
+ "peer": "192.168.1.1",
+ "status": "up",
+ "receive-interval": 1000,
+ "transmit-interval": 1000,
+ }])
+
+
+def test_rip_bfd_convergence(tgen):
+ "Test that RIP drop the gone neighbor routes."
+
+ tgen.gears["r3"].link_enable("r3-eth0", False)
+
+ expect_routes(
+ tgen.gears["r1"], {
+ "10.254.254.2/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.0.2"
+ }],
+ "10.254.254.3/32": None,
+ "10.254.254.100/32": [{
+ "code": "R",
+ "subCode": "n",
+ "from": "192.168.0.2",
+ }]
+ }, 6)
+
+ expect_routes(
+ tgen.gears["r3"], {
+ "10.254.254.1/32": None,
+ "10.254.254.2/32": None,
+ "10.254.254.100/32": [{
+ "code": "S",
+ "subCode": "r",
+ "from": "self"
+ }]
+ }, 6)
+
+
+def test_memory_leak(tgen):
+ "Run the memory leak test and report results."
+
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
+router rip
+ allow-ecmp
+ network 192.168.1.0/24
+ timers basic 5 15 10
+exit
--- /dev/null
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
--- /dev/null
+!
+int lo
+ ip address 10.10.10.1/32
+!
+int r3-eth0
+ ip address 192.168.1.3/24
+!
+router rip
+ network 192.168.1.0/24
+ network 10.10.10.1/32
+ timers basic 5 15 10
+exit
+
--- /dev/null
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+
+# Copyright (c) 2023 by
+# Donatas Abraitis <donatas@opensourcerouting.org>
+#
+
+"""
+Test if RIP `passive-interface default` and `no passive-interface IFNAME`
+combination works as expected.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.ripd]
+
+
+def setup_module(mod):
+ topodef = {"s1": ("r1", "r2", "r3")}
+ tgen = Topogen(topodef, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)))
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_rip_passive_interface():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ def _show_routes(nh_num):
+ output = json.loads(r1.vtysh_cmd("show ip route 10.10.10.1/32 json"))
+ expected = {
+ "10.10.10.1/32": [
+ {
+ "internalNextHopNum": nh_num,
+ "internalNextHopActiveNum": nh_num,
+ }
+ ]
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_show_routes, 2)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Got 10.10.10.1/32, but the next-hop count is not 2"
+
+ step("Configure `passive-interface default` on r2")
+ r2.vtysh_cmd(
+ """
+ configure terminal
+ router rip
+ passive-interface default
+ """
+ )
+
+ test_func = functools.partial(_show_routes, 1)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Got 10.10.10.1/32, but the next-hop count is not 1"
+
+ step("Configure `no passive-interface r2-eth0` on r2 towards r1")
+ r2.vtysh_cmd(
+ """
+ configure terminal
+ router rip
+ no passive-interface r2-eth0
+ """
+ )
+
+ test_func = functools.partial(_show_routes, 2)
+ _, result = topotest.run_and_expect(test_func, None, count=60, wait=1)
+ assert result is None, "Got 10.10.10.1/32, but the next-hop count is not 2"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
r1-eth3
Routing Information Sources:
Gateway BadPackets BadRoutes Distance Last Update
- 193.1.1.2 0 0 120 XX:XX:XX
+ 193.1.1.2 0 0 120 XX:XX:XX
Distance: (default is 120)
193.1.2.0/24
Routing Information Sources:
Gateway BadPackets BadRoutes Distance Last Update
- 193.1.1.1 0 0 120 XX:XX:XX
- 193.1.2.2 0 0 120 XX:XX:XX
+ 193.1.1.1 0 0 120 XX:XX:XX
+ 193.1.2.2 0 0 120 XX:XX:XX
Distance: (default is 120)
193.1.2.0/24
Routing Information Sources:
Gateway BadPackets BadRoutes Distance Last Update
- 193.1.2.1 0 0 120 XX:XX:XX
+ 193.1.2.1 0 0 120 XX:XX:XX
Distance: (default is 120)
"Set BGP extended community attribute";
}
+ identity set-extcommunity-nt {
+ base frr-route-map:rmap-set-type;
+ description
+ "Set BGP extended community attribute";
+ }
+
identity set-extcommunity-soo {
base frr-route-map:rmap-set-type;
description
}
}
+ case extcommunity-nt {
+ when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:set-extcommunity-nt')";
+ description
+ "Value of the ext-community";
+ leaf extcommunity-nt {
+ type string;
+ description
+ "Set BGP ext-community node-target attribute";
+ }
+ }
+
case extcommunity-soo {
when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:set-extcommunity-soo')";
description
import frr-if-rmap {
prefix frr-if-rmap;
}
+ import frr-bfdd {
+ prefix frr-bfdd;
+ }
import frr-interface {
prefix frr-interface;
}
}
}
+ leaf default-bfd-profile {
+ description
+ "Use this BFD profile for all peers by default.";
+ type frr-bfdd:profile-ref;
+ }
+
/*
* Operational data.
*/
"Key-chain name.";
}
}
+
+ container bfd-monitoring {
+ presence
+ "Present if BFD is configured for RIP peers in this interface.";
+
+ leaf enable {
+ type boolean;
+ description
+ "Enable/disable BFD monitoring.";
+ default false;
+ }
+
+ leaf profile {
+ type frr-bfdd:profile-ref;
+ description
+ "BFD profile to use.";
+ }
+ }
}
}
atomic_fetch_add_explicit(
&zdplane_info.dg_gre_set_errors, 1,
memory_order_relaxed);
- if (ctx)
- dplane_ctx_free(&ctx);
+ dplane_ctx_free(&ctx);
result = ZEBRA_DPLANE_REQUEST_FAILURE;
}
return result;
char flag_buf[MACIP_BUF_SIZE];
zlog_debug(
- "Send MACIP %s f %s MAC %pEA IP %pIA seq %u L2-VNI %u ESI %s to %s",
+ "Send MACIP %s f %s state %u MAC %pEA IP %pIA seq %u L2-VNI %u ESI %s to %s",
(cmd == ZEBRA_MACIP_ADD) ? "Add" : "Del",
zclient_evpn_dump_macip_flags(flags, flag_buf,
sizeof(flag_buf)),
- macaddr, ip, seq, vni,
- es ? es->esi_str : "-",
+ state, macaddr, ip, seq, vni, es ? es->esi_str : "-",
zebra_route_string(client->proto));
}
int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr,
uint32_t flags, bool force)
{
+ int state = ZEBRA_NEIGH_ACTIVE;
+
if (!force) {
if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL_INACTIVE)
&& !CHECK_FLAG(flags, ZEBRA_MAC_ES_PEER_ACTIVE))
/* the host was not advertised - nothing to delete */
return 0;
+
+ /* MAC is LOCAL and DUP_DETECTED, this local mobility event
+ * is not known to bgpd. Upon receiving local delete
+ * ask bgp to reinstall the best route (remote entry).
+ */
+ if (CHECK_FLAG(flags, ZEBRA_MAC_LOCAL) &&
+ CHECK_FLAG(flags, ZEBRA_MAC_DUPLICATE))
+ state = ZEBRA_NEIGH_INACTIVE;
}
return zebra_evpn_macip_send_msg_to_client(
- vni, macaddr, NULL, 0 /* flags */, 0 /* seq */,
- ZEBRA_NEIGH_ACTIVE, NULL, ZEBRA_MACIP_DEL);
+ vni, macaddr, NULL, 0 /* flags */, 0 /* seq */, state, NULL,
+ ZEBRA_MACIP_DEL);
}
/*
/* Remove MAC from BGP. */
zebra_evpn_mac_send_del_to_client(zevpn->vni, &mac->macaddr, mac->flags,
- false /* force */);
+ clear_static /* force */);
zebra_evpn_es_mac_deref_entry(mac);
(protocol) == ZEBRA_ROUTE_OSPF6 || (protocol) == ZEBRA_ROUTE_ISIS || \
(protocol) == ZEBRA_ROUTE_PIM || \
(protocol) == ZEBRA_ROUTE_OPENFABRIC || \
- (protocol) == ZEBRA_ROUTE_STATIC)
+ (protocol) == ZEBRA_ROUTE_STATIC || (protocol) == ZEBRA_ROUTE_RIP)
void zebra_ptm_init(void);
void zebra_ptm_finish(void);