((c-mode . ((indent-tabs-mode . t)
(show-trailing-whitespace . t)
- (c-basic-offset . 8)
- )))
+ (c-basic-offset . 8)))
+ (json-mode . ((js-indent-level 4))))
--- /dev/null
+[MASTER]
+init-hook="import sys; sys.path.insert(0, '..')"
+signature-mutators=common_config.retry,retry
+
+[MESSAGES CONTROL]
+disable=I,C,R,W
expat fakeroot flex fortify-headers gdbm git gmp isl json-c-dev kmod
lddtree libacl libatomic libattr libblkid libburn libbz2 libc-dev
libcap-dev libcurl libedit libffi libgcc libgomp libisoburn libisofs
- libltdl libressl libssh2 libstdc++ libtool libuuid libyang-dev
+ libltdl libressl libssh2 libstdc++ libtool libuuid
linux-headers lzip lzo m4 make mkinitfs mpc1 mpfr4 mtools musl-dev
ncurses-libs ncurses-terminfo ncurses-terminfo-base patch pax-utils pcre
perl pkgconf python3 python3-dev readline readline-dev sqlite-libs
cd "$builddir"
make DESTDIR="$pkgdir" install
- install -Dm644 "$builddir"/tools/etc/frr/daemons "$pkgdir"$_sysconfdir
+ install -Dm644 "$builddir"/tools/etc/frr/daemons "$pkgdir"$_sysconfdir/daemons
install -d "$pkgdir"/etc/init.d
ln -s ${_sbindir}/frr "$pkgdir"/etc/init.d/frr
}
write++;
}
}
- vty_endframe (vty, "!\n");
+ vty_endframe (vty, "exit\n!\n");
write++;
}
return write;
case 0:
break;
default:
- frr_help_exit (1);
- break;
+ frr_help_exit(1);
}
}
lines += config_write_distribute (vty, babel_routing_process->distribute_ctx);
+ vty_out (vty, "exit\n");
+
return lines;
}
install_element(BABEL_NODE, &babel_ipv6_distribute_list_cmd);
install_element(BABEL_NODE, &babel_no_ipv6_distribute_list_cmd);
- vrf_cmd_init(NULL, &babeld_privs);
+ vrf_cmd_init(NULL);
babel_if_init();
default:
frr_help_exit(1);
- break;
}
}
void bfd_cli_show_header_end(struct vty *vty,
struct lyd_node *dnode __attribute__((__unused__)))
{
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
void bfd_cli_show_peer_end(struct vty *vty,
struct lyd_node *dnode __attribute__((__unused__)))
{
+ vty_out(vty, " exit\n");
vty_out(vty, " !\n");
}
key = jhash(attr->mp_nexthop_local.s6_addr, IPV6_MAX_BYTELEN, key);
MIX3(attr->nh_ifindex, attr->nh_lla_ifindex, attr->distance);
MIX(attr->rmap_table_id);
+ MIX(attr->nh_type);
+ MIX(attr->bh_type);
return key;
}
&& attr1->distance == attr2->distance
&& srv6_l3vpn_same(attr1->srv6_l3vpn, attr2->srv6_l3vpn)
&& srv6_vpn_same(attr1->srv6_vpn, attr2->srv6_vpn)
- && attr1->srte_color == attr2->srte_color)
+ && attr1->srte_color == attr2->srte_color
+ && attr1->nh_type == attr2->nh_type
+ && attr1->bh_type == attr2->bh_type)
return true;
}
args->total);
}
- attr->ecommunity =
- ecommunity_parse(stream_pnt(peer->curr), length);
+ attr->ecommunity = ecommunity_parse(
+ stream_pnt(peer->curr), length,
+ CHECK_FLAG(peer->flags,
+ PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE));
/* XXX: fix ecommunity_parse to use stream API */
stream_forward_getp(peer->curr, length);
args->total);
}
- ipv6_ecomm = ecommunity_parse_ipv6(stream_pnt(peer->curr), length);
+ ipv6_ecomm = ecommunity_parse_ipv6(
+ stream_pnt(peer->curr), length,
+ CHECK_FLAG(peer->flags,
+ PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE));
bgp_attr_set_ipv6_ecommunity(attr, ipv6_ecomm);
/* XXX: fix ecommunity_parse to use stream API */
/* EVPN DF preference and algorithm for DF election on local ESs */
uint16_t df_pref;
uint8_t df_alg;
+
+ /* Nexthop type */
+ enum nexthop_types_t nh_type;
+
+ /* If NEXTHOP_TYPE_BLACKHOLE, then blackhole type */
+ enum blackhole_type bh_type;
};
/* rmap_change_flags definition */
frr_each (bmp_actives, &bt->actives, ba)
vty_out(vty, " bmp connect %s port %u min-retry %u max-retry %u\n",
ba->hostname, ba->port, ba->minretry, ba->maxretry);
+
+ vty_out(vty, " exit\n");
}
return 0;
new = ecommunity_new();
new->unit_size = ecom_size;
+ new->disable_ieee_floating = ecom->disable_ieee_floating;
for (i = 0; i < ecom->size; i++) {
eval = (void *)(ecom->val + (i * ecom_size));
/* Parse Extended Communites Attribute in BGP packet. */
static struct ecommunity *ecommunity_parse_internal(uint8_t *pnt,
- unsigned short length,
- unsigned short size_ecom)
+ unsigned short length,
+ unsigned short size_ecom,
+ bool disable_ieee_floating)
{
struct ecommunity tmp;
struct ecommunity *new;
Attribute. */
tmp.size = length / size_ecom;
tmp.val = pnt;
+ tmp.disable_ieee_floating = disable_ieee_floating;
/* Create a new Extended Communities Attribute by uniq and sort each
Extended Communities value */
return ecommunity_intern(new);
}
-struct ecommunity *ecommunity_parse(uint8_t *pnt,
- unsigned short length)
+struct ecommunity *ecommunity_parse(uint8_t *pnt, unsigned short length,
+ bool disable_ieee_floating)
{
- return ecommunity_parse_internal(pnt, length, ECOMMUNITY_SIZE);
+ return ecommunity_parse_internal(pnt, length, ECOMMUNITY_SIZE,
+ disable_ieee_floating);
}
-struct ecommunity *ecommunity_parse_ipv6(uint8_t *pnt,
- unsigned short length)
+struct ecommunity *ecommunity_parse_ipv6(uint8_t *pnt, unsigned short length,
+ bool disable_ieee_floating)
{
- return ecommunity_parse_internal(pnt, length,
- IPV6_ECOMMUNITY_SIZE);
+ return ecommunity_parse_internal(pnt, length, IPV6_ECOMMUNITY_SIZE,
+ disable_ieee_floating);
}
/* Duplicate the Extended Communities Attribute structure. */
ECOMMUNITY_SIZE);
}
-static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt)
+/* Helper function to convert IEEE-754 Floating Point to uint32 */
+static uint32_t ieee_float_uint32_to_uint32(uint32_t u)
+{
+ union {
+ float r;
+ uint32_t d;
+ } f = {.d = u};
+
+ return (uint32_t)f.r;
+}
+
+static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt,
+ bool disable_ieee_floating)
{
int len = 0;
as_t as;
- uint32_t bw;
+ uint32_t bw_tmp, bw;
char bps_buf[20] = {0};
#define ONE_GBPS_BYTES (1000 * 1000 * 1000 / 8)
as = (*pnt++ << 8);
as |= (*pnt++);
- (void)ptr_get_be32(pnt, &bw);
+ (void)ptr_get_be32(pnt, &bw_tmp);
+
+ bw = disable_ieee_floating ? bw_tmp
+ : ieee_float_uint32_to_uint32(bw_tmp);
+
if (bw >= ONE_GBPS_BYTES)
snprintf(bps_buf, sizeof(bps_buf), "%.3f Gbps",
(float)(bw / ONE_GBPS_BYTES));
} else if (sub_type ==
ECOMMUNITY_LINK_BANDWIDTH &&
type == ECOMMUNITY_ENCODE_AS) {
- ecommunity_lb_str(encbuf,
- sizeof(encbuf), pnt);
+ ecommunity_lb_str(
+ encbuf, sizeof(encbuf), pnt,
+ ecom->disable_ieee_floating);
} else
unk_ecom = 1;
} else {
} else if (type == ECOMMUNITY_ENCODE_AS_NON_TRANS) {
sub_type = *pnt++;
if (sub_type == ECOMMUNITY_LINK_BANDWIDTH)
- ecommunity_lb_str(encbuf, sizeof(encbuf), pnt);
+ ecommunity_lb_str(encbuf, sizeof(encbuf), pnt,
+ ecom->disable_ieee_floating);
else
unk_ecom = 1;
} else {
pnt = ptr_get_be32(pnt, &bwval);
(void)pnt; /* consume value */
if (bw)
- *bw = bwval;
+ *bw = ecom->disable_ieee_floating
+ ? bwval
+ : ieee_float_uint32_to_uint32(
+ bwval);
return eval;
}
}
}
-struct ecommunity *ecommunity_replace_linkbw(as_t as,
- struct ecommunity *ecom,
- uint64_t cum_bw)
+struct ecommunity *ecommunity_replace_linkbw(as_t as, struct ecommunity *ecom,
+ uint64_t cum_bw,
+ bool disable_ieee_floating)
{
struct ecommunity *new;
struct ecommunity_val lb_eval;
*/
if (cum_bw > 0xFFFFFFFF)
cum_bw = 0xFFFFFFFF;
- encode_lb_extcomm(as > BGP_AS_MAX ? BGP_AS_TRANS : as, cum_bw,
- false, &lb_eval);
+ encode_lb_extcomm(as > BGP_AS_MAX ? BGP_AS_TRANS : as, cum_bw, false,
+ &lb_eval, disable_ieee_floating);
new = ecommunity_dup(ecom);
ecommunity_add_val(new, &lb_eval, true, true);
/* Human readable format string. */
char *str;
+
+ /* Disable IEEE floating-point encoding for extended community */
+ bool disable_ieee_floating;
};
struct ecommunity_as {
char val[IPV6_ECOMMUNITY_SIZE];
};
-enum ecommunity_lb_type {
- EXPLICIT_BANDWIDTH,
- CUMULATIVE_BANDWIDTH,
- COMPUTED_BANDWIDTH
-};
-
#define ecom_length_size(X, Y) ((X)->size * (Y))
/*
eval->val[7] = val & 0xff;
}
+/* Helper function to convert uint32 to IEEE-754 Floating Point */
+static uint32_t uint32_to_ieee_float_uint32(uint32_t u)
+{
+ union {
+ float r;
+ uint32_t d;
+ } f = {.r = (float)u};
+
+ return f.d;
+}
+
/*
* Encode BGP Link Bandwidth extended community
* bandwidth (bw) is in bytes-per-sec
*/
static inline void encode_lb_extcomm(as_t as, uint32_t bw, bool non_trans,
- struct ecommunity_val *eval)
+ struct ecommunity_val *eval,
+ bool disable_ieee_floating)
{
+ uint32_t bandwidth =
+ disable_ieee_floating ? bw : uint32_to_ieee_float_uint32(bw);
+
memset(eval, 0, sizeof(*eval));
eval->val[0] = ECOMMUNITY_ENCODE_AS;
if (non_trans)
eval->val[1] = ECOMMUNITY_LINK_BANDWIDTH;
eval->val[2] = (as >> 8) & 0xff;
eval->val[3] = as & 0xff;
- eval->val[4] = (bw >> 24) & 0xff;
- eval->val[5] = (bw >> 16) & 0xff;
- eval->val[6] = (bw >> 8) & 0xff;
- eval->val[7] = bw & 0xff;
+ eval->val[4] = (bandwidth >> 24) & 0xff;
+ eval->val[5] = (bandwidth >> 16) & 0xff;
+ eval->val[6] = (bandwidth >> 8) & 0xff;
+ eval->val[7] = bandwidth & 0xff;
}
extern void ecommunity_init(void);
extern void ecommunity_finish(void);
extern void ecommunity_free(struct ecommunity **);
-extern struct ecommunity *ecommunity_parse(uint8_t *, unsigned short);
+extern struct ecommunity *ecommunity_parse(uint8_t *, unsigned short,
+ bool disable_ieee_floating);
extern struct ecommunity *ecommunity_parse_ipv6(uint8_t *pnt,
- unsigned short length);
+ unsigned short length,
+ bool disable_ieee_floating);
extern struct ecommunity *ecommunity_dup(struct ecommunity *);
extern struct ecommunity *ecommunity_merge(struct ecommunity *,
struct ecommunity *);
extern const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom,
uint32_t *bw);
extern struct ecommunity *ecommunity_replace_linkbw(as_t as,
- struct ecommunity *ecom, uint64_t cum_bw);
+ struct ecommunity *ecom,
+ uint64_t cum_bw,
+ bool disable_ieee_floating);
static inline void ecommunity_strip_rts(struct ecommunity *ecom)
{
originator_ip, sizeof(originator_ip)));
json_object_string_add(json, "advertiseGatewayMacip", "n/a");
json_object_string_add(json, "advertiseSviMacIp", "n/a");
- json_object_to_json_string_ext(json,
- JSON_C_TO_STRING_NOSLASHESCAPE);
json_object_string_add(json, "advertisePip",
bgp_vrf->evpn_info->advertise_pip ?
"Enabled" : "Disabled");
json_object_string_add(json_vni, "advertiseGatewayMacip",
"n/a");
json_object_string_add(json_vni, "advertiseSviMacIp", "n/a");
- json_object_to_json_string_ext(json_vni,
- JSON_C_TO_STRING_NOSLASHESCAPE);
json_object_string_add(
json_vni, "advertisePip",
bgp->evpn_info->advertise_pip ? "Enabled" : "Disabled");
}
if (uj) {
- vty_out(vty, "%s\n", json_object_to_json_string_ext(
- json, JSON_C_TO_STRING_PRETTY));
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json,
+ JSON_C_TO_STRING_PRETTY
+ | JSON_C_TO_STRING_NOSLASHESCAPE));
json_object_free(json);
}
status start timer is on unless peer is shutdown or peer is
inactive. All other timer must be turned off */
if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(peer)
- || (peer->bgp->inst_type != BGP_INSTANCE_TYPE_VIEW &&
- peer->bgp->vrf_id == VRF_UNKNOWN)) {
+ || peer->bgp->vrf_id == VRF_UNKNOWN) {
BGP_TIMER_OFF(peer->t_start);
} else {
BGP_TIMER_ON(peer->t_start, bgp_start_timer,
"No AFI/SAFI activated for peer",
"AS Set config change",
"Waiting for peer OPEN",
- "Reached received prefix count"};
+ "Reached received prefix count",
+ "Socket Error"};
static int bgp_graceful_restart_timer_expire(struct thread *thread)
{
return 0;
}
- if (peer->bgp->inst_type != BGP_INSTANCE_TYPE_VIEW &&
- peer->bgp->vrf_id == VRF_UNKNOWN) {
+ if (peer->bgp->vrf_id == VRF_UNKNOWN) {
if (bgp_debug_neighbor_events(peer))
flog_err(
EC_BGP_FSM,
case OpenConfirm:
case Established:
if (!has_valid_nexthops
- && (peer->gtsm_hops == BGP_GTSM_HOPS_CONNECTED))
+ && (peer->gtsm_hops == BGP_GTSM_HOPS_CONNECTED
+ || peer->bgp->fast_convergence))
BGP_EVENT_ADD(peer, TCP_fatal_error);
case Clearing:
case Deleted:
break;
default:
frr_help_exit(1);
- break;
}
}
if (skip_runas)
prev_mpath = cur_mpath;
mpath_count++;
if (ecommunity_linkbw_present(
- cur_mpath->attr->ecommunity, &bwval))
+ cur_mpath->attr->ecommunity,
+ &bwval))
cum_bw += bwval;
else
all_paths_lb = false;
mpath_changed = 1;
mpath_count++;
if (ecommunity_linkbw_present(
- new_mpath->attr->ecommunity, &bwval))
+ new_mpath->attr->ecommunity,
+ &bwval))
cum_bw += bwval;
else
all_paths_lb = false;
if (new_best) {
bgp_path_info_mpath_count_set(new_best, mpath_count - 1);
- if (mpath_count <= 1 ||
- !ecommunity_linkbw_present(
- new_best->attr->ecommunity, &bwval))
+ if (mpath_count <= 1
+ || !ecommunity_linkbw_present(new_best->attr->ecommunity,
+ &bwval))
all_paths_lb = false;
else
cum_bw += bwval;
* must be the default vrf or a view instance
*/
if (!listener->bgp) {
- if (peer->bgp->vrf_id != VRF_DEFAULT
- && peer->bgp->inst_type
- != BGP_INSTANCE_TYPE_VIEW)
+ if (peer->bgp->vrf_id != VRF_DEFAULT)
continue;
} else if (listener->bgp != peer->bgp)
continue;
/* BGP socket bind. */
static char *bgp_get_bound_name(struct peer *peer)
{
- char *name = NULL;
-
if (!peer)
return NULL;
* takes precedence over VRF. For IPv4 peering, explicit interface or
* VRF are the situations to bind.
*/
- if (peer->su.sa.sa_family == AF_INET6)
- name = (peer->conf_if ? peer->conf_if
- : (peer->ifname ? peer->ifname
- : peer->bgp->name));
- else
- name = peer->ifname ? peer->ifname : peer->bgp->name;
+ if (peer->su.sa.sa_family == AF_INET6 && peer->conf_if)
+ return peer->conf_if;
+
+ if (peer->ifname)
+ return peer->ifname;
+
+ if (peer->bgp->inst_type == BGP_INSTANCE_TYPE_VIEW)
+ return NULL;
- return name;
+ return peer->bgp->name;
}
static int bgp_update_address(struct interface *ifp, const union sockunion *dst,
ifindex_t ifindex = 0;
if (peer->conf_if && BGP_PEER_SU_UNSPEC(peer)) {
- zlog_debug("Peer address not learnt: Returning from connect");
+ if (bgp_debug_neighbor_events(peer))
+ zlog_debug("Peer address not learnt: Returning from connect");
return 0;
}
frr_with_privs(&bgpd_privs) {
peer->fd = vrf_sockunion_socket(&peer->su, peer->bgp->vrf_id,
bgp_get_bound_name(peer));
}
- if (peer->fd < 0)
+ if (peer->fd < 0) {
+ peer->last_reset = PEER_DOWN_SOCKET_ERROR;
+ if (bgp_debug_neighbor_events(peer))
+ zlog_debug("%s: Failure to create socket for connection to %s, error received: %s(%d)",
+ __func__, peer->host, safe_strerror(errno),
+ errno);
return -1;
+ }
set_nonblocking(peer->fd);
bgp_socket_set_buffer_size(peer->fd);
- if (bgp_set_socket_ttl(peer, peer->fd) < 0)
+ if (bgp_set_socket_ttl(peer, peer->fd) < 0) {
+ peer->last_reset = PEER_DOWN_SOCKET_ERROR;
+ if (bgp_debug_neighbor_events(peer))
+ zlog_debug("%s: Failure to set socket ttl for connection to %s, error received: %s(%d)",
+ __func__, peer->host, safe_strerror(errno),
+ errno);
return -1;
+ }
sockopt_reuseaddr(peer->fd);
sockopt_reuseport(peer->fd);
/* Update source bind. */
if (bgp_update_source(peer) < 0) {
+ peer->last_reset = PEER_DOWN_SOCKET_ERROR;
return connect_error;
}
listener->name = XSTRDUP(MTYPE_BGP_LISTENER, bgp->name);
/* this socket is in a vrf record bgp back pointer */
- if (bgp->vrf_id != VRF_DEFAULT
- && bgp->inst_type != BGP_INSTANCE_TYPE_VIEW)
+ if (bgp->vrf_id != VRF_DEFAULT)
listener->bgp = bgp;
memcpy(&listener->su, sa, salen);
sock = vrf_socket(ainfo->ai_family,
ainfo->ai_socktype,
ainfo->ai_protocol,
- (bgp->inst_type
- != BGP_INSTANCE_TYPE_VIEW
- ? bgp->vrf_id : VRF_DEFAULT),
+ bgp->vrf_id,
(bgp->inst_type
== BGP_INSTANCE_TYPE_VRF
? bgp->name : NULL));
FOREACH_AFI_SAFI (afi, safi) {
if (peer->afc[afi][safi]) {
+ bool adv_addpath_rx =
+ !CHECK_FLAG(peer->af_flags[afi][safi],
+ PEER_FLAG_DISABLE_ADDPATH_RX);
+ uint8_t flags = 0;
+
/* Convert AFI, SAFI to values for packet. */
bgp_map_afi_safi_int2iana(afi, safi, &pkt_afi,
&pkt_safi);
stream_putw(s, pkt_afi);
stream_putc(s, pkt_safi);
- if (adv_addpath_tx) {
- stream_putc(s, BGP_ADDPATH_RX | BGP_ADDPATH_TX);
+ if (adv_addpath_rx) {
+ SET_FLAG(flags, BGP_ADDPATH_RX);
SET_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ADDPATH_AF_RX_ADV);
+ } else {
+ UNSET_FLAG(peer->af_cap[afi][safi],
+ PEER_CAP_ADDPATH_AF_RX_ADV);
+ }
+
+ if (adv_addpath_tx) {
+ SET_FLAG(flags, BGP_ADDPATH_TX);
SET_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ADDPATH_AF_TX_ADV);
} else {
- stream_putc(s, BGP_ADDPATH_RX);
- SET_FLAG(peer->af_cap[afi][safi],
- PEER_CAP_ADDPATH_AF_RX_ADV);
UNSET_FLAG(peer->af_cap[afi][safi],
PEER_CAP_ADDPATH_AF_TX_ADV);
}
+
+ stream_putc(s, flags);
}
}
return BGP_Stop;
}
+ /* Send notification message when Hold Time received in the OPEN message
+ * is smaller than configured minimum Hold Time. */
+ if (holdtime < peer->bgp->default_min_holdtime
+ && peer->bgp->default_min_holdtime != 0) {
+ bgp_notify_send_with_data(peer, BGP_NOTIFY_OPEN_ERR,
+ BGP_NOTIFY_OPEN_UNACEP_HOLDTIME,
+ (uint8_t *)holdtime_ptr, 2);
+ return BGP_Stop;
+ }
+
/* From the rfc: A reasonable maximum time between KEEPALIVE messages
would be one third of the Hold Time interval. KEEPALIVE messages
MUST NOT be sent more frequently than one per second. An
(cum_bw = bgp_path_info_mpath_cumbw(pi)) != 0 &&
!CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_LINK_BW_SET))
attr->ecommunity = ecommunity_replace_linkbw(
- bgp->as, attr->ecommunity, cum_bw);
+ bgp->as, attr->ecommunity, cum_bw,
+ CHECK_FLAG(peer->flags,
+ PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE));
return true;
}
void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
const union g_addr *nexthop, ifindex_t ifindex,
enum nexthop_types_t nhtype, uint8_t distance,
- uint32_t metric, uint8_t type,
- unsigned short instance, route_tag_t tag)
+ enum blackhole_type bhtype, uint32_t metric,
+ uint8_t type, unsigned short instance,
+ route_tag_t tag)
{
struct bgp_path_info *new;
struct bgp_path_info *bpi;
attr.mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL;
break;
}
+ attr.bh_type = bhtype;
break;
}
+ attr.nh_type = nhtype;
attr.nh_ifindex = ifindex;
attr.med = metric;
extern void bgp_redistribute_add(struct bgp *bgp, struct prefix *p,
const union g_addr *nexthop, ifindex_t ifindex,
enum nexthop_types_t nhtype, uint8_t distance,
- uint32_t metric, uint8_t type,
- unsigned short instance, route_tag_t tag);
+ enum blackhole_type bhtype, uint32_t metric,
+ uint8_t type, unsigned short instance,
+ route_tag_t tag);
extern void bgp_redistribute_delete(struct bgp *, struct prefix *, uint8_t,
unsigned short);
extern void bgp_redistribute_withdraw(struct bgp *, afi_t, int, unsigned short);
/* `set extcommunity rt COMMUNITY' */
+struct rmap_ecom_set {
+ struct ecommunity *ecom;
+ bool none;
+};
+
/* For community set mechanism. Used by _rt and _soo. */
static enum route_map_cmd_result_t
route_set_ecommunity(void *rule, const struct prefix *prefix, void *object)
{
- struct ecommunity *ecom;
+ struct rmap_ecom_set *rcs;
struct ecommunity *new_ecom;
struct ecommunity *old_ecom;
struct bgp_path_info *path;
+ struct attr *attr;
- ecom = rule;
+ rcs = rule;
path = object;
+ attr = path->attr;
- if (!ecom)
+ if (rcs->none) {
+ attr->flag &= ~(ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES));
+ attr->ecommunity = NULL;
+ return RMAP_OKAY;
+ }
+
+ if (!rcs->ecom)
return RMAP_OKAY;
/* We assume additive for Extended Community. */
old_ecom = path->attr->ecommunity;
if (old_ecom) {
- new_ecom = ecommunity_merge(ecommunity_dup(old_ecom), ecom);
+ new_ecom =
+ ecommunity_merge(ecommunity_dup(old_ecom), rcs->ecom);
/* old_ecom->refcnt = 1 => owned elsewhere, e.g.
* bgp_update_receive()
if (!old_ecom->refcnt)
ecommunity_free(&old_ecom);
} else
- new_ecom = ecommunity_dup(ecom);
+ new_ecom = ecommunity_dup(rcs->ecom);
/* will be intern()'d or attr_flush()'d by bgp_update_main() */
path->attr->ecommunity = new_ecom;
return RMAP_OKAY;
}
-/* Compile function for set community. */
+static void *route_set_ecommunity_none_compile(const char *arg)
+{
+ struct rmap_ecom_set *rcs;
+ bool none = false;
+
+ if (strncmp(arg, "none", 4) == 0)
+ none = true;
+
+ rcs = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(struct rmap_ecom_set));
+ rcs->ecom = NULL;
+ rcs->none = none;
+
+ return rcs;
+}
+
static void *route_set_ecommunity_rt_compile(const char *arg)
{
+ struct rmap_ecom_set *rcs;
struct ecommunity *ecom;
ecom = ecommunity_str2com(arg, ECOMMUNITY_ROUTE_TARGET, 0);
if (!ecom)
return NULL;
- return ecommunity_intern(ecom);
+
+ rcs = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(struct rmap_ecom_set));
+ rcs->ecom = ecommunity_intern(ecom);
+ rcs->none = false;
+
+ return rcs;
}
/* Free function for set community. Used by _rt and _soo */
static void route_set_ecommunity_free(void *rule)
{
- struct ecommunity *ecom = rule;
- ecommunity_unintern(&ecom);
+ struct rmap_ecom_set *rcs = rule;
+
+ if (rcs->ecom)
+ ecommunity_unintern(&rcs->ecom);
+
+ XFREE(MTYPE_ROUTE_MAP_COMPILED, rcs);
}
+static const struct route_map_rule_cmd route_set_ecommunity_none_cmd = {
+ "extcommunity",
+ route_set_ecommunity,
+ route_set_ecommunity_none_compile,
+ route_set_ecommunity_free,
+};
+
/* Set community rule structure. */
static const struct route_map_rule_cmd route_set_ecommunity_rt_cmd = {
"extcommunity rt",
/* Compile function for set community. */
static void *route_set_ecommunity_soo_compile(const char *arg)
{
+ struct rmap_ecom_set *rcs;
struct ecommunity *ecom;
ecom = ecommunity_str2com(arg, ECOMMUNITY_SITE_ORIGIN, 0);
if (!ecom)
return NULL;
- return ecommunity_intern(ecom);
+ rcs = XCALLOC(MTYPE_ROUTE_MAP_COMPILED, sizeof(struct rmap_ecom_set));
+ rcs->ecom = ecommunity_intern(ecom);
+ rcs->none = false;
+
+ return rcs;
}
/* Set community rule structure. */
bw_bytes *= mpath_count;
}
- encode_lb_extcomm(as, bw_bytes, rels->non_trans, &lb_eval);
+ encode_lb_extcomm(as, bw_bytes, rels->non_trans, &lb_eval,
+ CHECK_FLAG(peer->flags,
+ PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE));
/* add to route or merge with existing */
old_ecom = path->attr->ecommunity;
"GP extended community attribute\n"
"Site-of-Origin extended community\n")
+DEFUN_YANG(set_ecommunity_none, set_ecommunity_none_cmd,
+ "set extcommunity none",
+ SET_STR
+ "BGP extended community attribute\n"
+ "No extended community attribute\n")
+{
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-extcommunity-none']";
+ char xpath_value[XPATH_MAXLEN];
+
+ nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL);
+
+ snprintf(xpath_value, sizeof(xpath_value),
+ "%s/rmap-set-action/frr-bgp-route-map:extcommunity-none",
+ xpath);
+ nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, "true");
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFUN_YANG(no_set_ecommunity_none, no_set_ecommunity_none_cmd,
+ "no set extcommunity none",
+ NO_STR SET_STR
+ "BGP extended community attribute\n"
+ "No extended community attribute\n")
+{
+ const char *xpath =
+ "./set-action[action='frr-bgp-route-map:set-extcommunity-none']";
+ nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
DEFUN_YANG (set_ecommunity_lb,
set_ecommunity_lb_cmd,
"set extcommunity bandwidth <(1-25600)|cumulative|num-multipaths> [non-transitive]",
route_map_install_set(&route_set_ecommunity_rt_cmd);
route_map_install_set(&route_set_ecommunity_soo_cmd);
route_map_install_set(&route_set_ecommunity_lb_cmd);
+ route_map_install_set(&route_set_ecommunity_none_cmd);
route_map_install_set(&route_set_tag_cmd);
route_map_install_set(&route_set_label_index_cmd);
install_element(RMAP_NODE, &set_ecommunity_lb_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_lb_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_lb_short_cmd);
+ install_element(RMAP_NODE, &set_ecommunity_none_cmd);
+ install_element(RMAP_NODE, &no_set_ecommunity_none_cmd);
#ifdef KEEP_OLD_VPN_COMMANDS
install_element(RMAP_NODE, &set_vpn_nexthop_cmd);
install_element(RMAP_NODE, &no_set_vpn_nexthop_cmd);
.destroy = lib_route_map_entry_set_action_rmap_set_action_comm_list_name_destroy,
}
},
+ {
+ .xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-none",
+ .cbs = {
+ .modify = lib_route_map_entry_set_action_rmap_set_action_extcommunity_none_modify,
+ .destroy = lib_route_map_entry_set_action_rmap_set_action_extcommunity_none_destroy,
+ }
+ },
{
.xpath = "/frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-lb",
.cbs = {
int lib_route_map_entry_set_action_rmap_set_action_extcommunity_lb_bandwidth_destroy(struct nb_cb_destroy_args *args);
int lib_route_map_entry_set_action_rmap_set_action_extcommunity_lb_two_octet_as_specific_modify(struct nb_cb_modify_args *args);
int lib_route_map_entry_set_action_rmap_set_action_extcommunity_lb_two_octet_as_specific_destroy(struct nb_cb_destroy_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_none_modify(
+ struct nb_cb_modify_args *args);
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_none_destroy(
+ struct nb_cb_destroy_args *args);
int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv4_modify(
struct nb_cb_modify_args *args);
int lib_route_map_entry_set_action_rmap_set_action_evpn_gateway_ip_ipv4_destroy(
#include "lib/routemap.h"
#include "bgpd/bgpd.h"
#include "bgpd/bgp_routemap_nb.h"
-#include "bgpd/bgp_ecommunity.h"
/* Add bgp route map rule. */
static int bgp_route_match_add(struct route_map_index *index,
return lib_route_map_entry_set_destroy(args);
}
+/*
+ * XPath:
+ * /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:extcommunity-none
+ */
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_none_modify(
+ struct nb_cb_modify_args *args)
+{
+ struct routemap_hook_context *rhc;
+ bool none = false;
+ int rv;
+
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ /* Add configuration. */
+ rhc = nb_running_get_entry(args->dnode, NULL, true);
+ none = yang_dnode_get_bool(args->dnode, NULL);
+
+ /* Set destroy information. */
+ rhc->rhc_shook = generic_set_delete;
+ rhc->rhc_rule = "extcommunity";
+ rhc->rhc_event = RMAP_EVENT_SET_DELETED;
+
+ if (none) {
+ rv = generic_set_add(rhc->rhc_rmi, "extcommunity",
+ "none", args->errmsg,
+ args->errmsg_len);
+ if (rv != CMD_SUCCESS) {
+ rhc->rhc_shook = NULL;
+ return NB_ERR_INCONSISTENCY;
+ }
+ return NB_OK;
+ }
+
+ return NB_ERR_INCONSISTENCY;
+ }
+
+ return NB_OK;
+}
+
+int lib_route_map_entry_set_action_rmap_set_action_extcommunity_none_destroy(
+ struct nb_cb_destroy_args *args)
+{
+ switch (args->event) {
+ case NB_EV_VALIDATE:
+ case NB_EV_PREPARE:
+ case NB_EV_ABORT:
+ break;
+ case NB_EV_APPLY:
+ return lib_route_map_entry_set_destroy(args);
+ }
+
+ return NB_OK;
+}
+
/*
* XPath:
* /frr-route-map:lib/route-map/entry/set-action/rmap-set-action/frr-bgp-route-map:evpn-gateway-ip-ipv4
const char *username, const char *client_privkey_path,
const char *client_pubkey_path,
const char *server_pubkey_path,
- const uint8_t preference);
+ const uint8_t preference, const char *bindaddr);
#endif
static struct rtr_socket *create_rtr_socket(struct tr_socket *tr_socket);
static struct cache *find_cache(const uint8_t preference);
static int add_tcp_cache(const char *host, const char *port,
- const uint8_t preference);
+ const uint8_t preference, const char *bindaddr);
static void print_record(const struct pfx_record *record, struct vty *vty);
static int is_synchronized(void);
static int is_running(void);
}
static int add_tcp_cache(const char *host, const char *port,
- const uint8_t preference)
+ const uint8_t preference, const char *bindaddr)
{
struct rtr_socket *rtr_socket;
struct tr_tcp_config *tcp_config =
tcp_config->host = XSTRDUP(MTYPE_BGP_RPKI_CACHE, host);
tcp_config->port = XSTRDUP(MTYPE_BGP_RPKI_CACHE, port);
- tcp_config->bindaddr = NULL;
+ if (bindaddr)
+ tcp_config->bindaddr = XSTRDUP(MTYPE_BGP_RPKI_CACHE, bindaddr);
+ else
+ tcp_config->bindaddr = NULL;
rtr_socket = create_rtr_socket(tr_socket);
const char *username, const char *client_privkey_path,
const char *client_pubkey_path,
const char *server_pubkey_path,
- const uint8_t preference)
+ const uint8_t preference, const char *bindaddr)
{
struct tr_ssh_config *ssh_config =
XCALLOC(MTYPE_BGP_RPKI_CACHE, sizeof(struct tr_ssh_config));
ssh_config->port = port;
ssh_config->host = XSTRDUP(MTYPE_BGP_RPKI_CACHE, host);
- ssh_config->bindaddr = NULL;
+ if (bindaddr)
+ ssh_config->bindaddr = XSTRDUP(MTYPE_BGP_RPKI_CACHE, bindaddr);
+ else
+ ssh_config->bindaddr = NULL;
ssh_config->username = XSTRDUP(MTYPE_BGP_RPKI_CACHE, username);
ssh_config->client_privkey_path =
if (cache->type == TCP) {
XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.tcp_config->host);
XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.tcp_config->port);
+ if (cache->tr_config.tcp_config->bindaddr)
+ XFREE(MTYPE_BGP_RPKI_CACHE,
+ cache->tr_config.tcp_config->bindaddr);
XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.tcp_config);
}
#if defined(FOUND_SSH)
cache->tr_config.ssh_config->client_privkey_path);
XFREE(MTYPE_BGP_RPKI_CACHE,
cache->tr_config.ssh_config->server_hostkey_path);
+ if (cache->tr_config.ssh_config->bindaddr)
+ XFREE(MTYPE_BGP_RPKI_CACHE,
+ cache->tr_config.ssh_config->bindaddr);
XFREE(MTYPE_BGP_RPKI_CACHE, cache->tr_config.ssh_config);
}
#endif
tcp_config = cache->tr_config.tcp_config;
vty_out(vty, " rpki cache %s %s ", tcp_config->host,
tcp_config->port);
+ if (tcp_config->bindaddr)
+ vty_out(vty, "source %s ",
+ tcp_config->bindaddr);
break;
#if defined(FOUND_SSH)
case SSH:
ssh_config->server_hostkey_path != NULL
? ssh_config->server_hostkey_path
: " ");
+ if (ssh_config->bindaddr)
+ vty_out(vty, "source %s ",
+ ssh_config->bindaddr);
break;
#endif
default:
vty_out(vty, "preference %hhu\n", cache->preference);
}
- vty_out(vty, " exit\n");
+ vty_out(vty, "exit\n");
return 1;
}
return CMD_SUCCESS;
}
-DEFPY (rpki_cache,
- rpki_cache_cmd,
- "rpki cache <A.B.C.D|WORD><TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY SSH_PUBKEY [SERVER_PUBKEY]> preference (1-255)",
- RPKI_OUTPUT_STRING
- "Install a cache server to current group\n"
- "IP address of cache server\n Hostname of cache server\n"
- "TCP port number\n"
- "SSH port number\n"
- "SSH user name\n"
- "Path to own SSH private key\n"
- "Path to own SSH public key\n"
- "Path to Public key of cache server\n"
- "Preference of the cache server\n"
- "Preference value\n")
+DEFPY(rpki_cache, rpki_cache_cmd,
+ "rpki cache <A.B.C.D|WORD> <TCPPORT|(1-65535)$sshport SSH_UNAME SSH_PRIVKEY SSH_PUBKEY [SERVER_PUBKEY]> [source <A.B.C.D>$bindaddr] preference (1-255)",
+ RPKI_OUTPUT_STRING
+ "Install a cache server to current group\n"
+ "IP address of cache server\n Hostname of cache server\n"
+ "TCP port number\n"
+ "SSH port number\n"
+ "SSH user name\n"
+ "Path to own SSH private key\n"
+ "Path to own SSH public key\n"
+ "Path to Public key of cache server\n"
+ "Configure source IP address of RPKI connection\n"
+ "Define a Source IP Address\n"
+ "Preference of the cache server\n"
+ "Preference value\n")
{
int return_value;
struct listnode *cache_node;
// use ssh connection
if (ssh_uname) {
#if defined(FOUND_SSH)
- return_value =
- add_ssh_cache(cache, sshport, ssh_uname, ssh_privkey,
- ssh_pubkey, server_pubkey, preference);
+ return_value = add_ssh_cache(
+ cache, sshport, ssh_uname, ssh_privkey, ssh_pubkey,
+ server_pubkey, preference, bindaddr_str);
#else
return_value = SUCCESS;
vty_out(vty,
"ssh sockets are not supported. Please recompile rtrlib and frr with ssh support. If you want to use it\n");
#endif
} else { // use tcp connection
- return_value = add_tcp_cache(cache, tcpport, preference);
+ return_value =
+ add_tcp_cache(cache, tcpport, preference, bindaddr_str);
}
if (return_value == ERROR) {
}
if (peer->default_rmap[afi][safi].name) {
+ struct bgp_path_info tmp_pi = {0};
+
+ tmp_pi.peer = bgp->peer_self;
+
SET_FLAG(bgp->peer_self->rmap_type, PEER_RMAP_TYPE_DEFAULT);
/* Iterate over the RIB to see if we can announce
for (pi = bgp_dest_get_bgp_path_info(dest); pi;
pi = pi->next) {
- struct attr tmp_attr;
- struct bgp_path_info tmp_pi;
- struct bgp_path_info_extra tmp_pie;
+ struct attr tmp_attr = attr;
- tmp_attr = *pi->attr;
- tmp_attr.aspath = attr.aspath;
+ tmp_pi.attr = &tmp_attr;
- prep_for_rmap_apply(&tmp_pi, &tmp_pie, dest, pi,
- pi->peer, &tmp_attr);
-
- ret = route_map_apply(
+ ret = route_map_apply_ext(
peer->default_rmap[afi][safi].map,
- bgp_dest_get_prefix(dest), &tmp_pi);
+ bgp_dest_get_prefix(dest), pi, &tmp_pi);
if (ret == RMAP_DENYMATCH) {
- /* The aspath belongs to 'attr' */
- tmp_attr.aspath = NULL;
- bgp_attr_flush(&tmp_attr);
+ bgp_attr_undup(&tmp_attr, &attr);
continue;
} else {
new_attr = bgp_attr_intern(&tmp_attr);
subgroup_default_update_packet(subgrp, new_attr, from);
}
}
+
+ aspath_unintern(&attr.aspath);
}
/*
} else {
as = strtoul(argv[idx_asn]->arg, NULL, 10);
- if (argc > 4)
+ if (argc > 4) {
name = argv[idx_vrf]->arg;
+ if (strmatch(argv[idx_vrf - 1]->text, "vrf")
+ && strmatch(name, VRF_DEFAULT_NAME))
+ name = NULL;
+ }
/* Lookup bgp structure. */
bgp = bgp_lookup(as, name);
DEFUN (bgp_confederation_identifier,
bgp_confederation_identifier_cmd,
"bgp confederation identifier (1-4294967295)",
- "BGP specific commands\n"
+ BGP_STR
"AS confederation parameters\n"
"AS number\n"
"Set routing domain confederation AS\n")
no_bgp_confederation_identifier_cmd,
"no bgp confederation identifier [(1-4294967295)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"AS confederation parameters\n"
"AS number\n"
"Set routing domain confederation AS\n")
DEFUN (bgp_confederation_peers,
bgp_confederation_peers_cmd,
"bgp confederation peers (1-4294967295)...",
- "BGP specific commands\n"
+ BGP_STR
"AS confederation parameters\n"
"Peer ASs in BGP confederation\n"
AS_STR)
no_bgp_confederation_peers_cmd,
"no bgp confederation peers (1-4294967295)...",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"AS confederation parameters\n"
"Peer ASs in BGP confederation\n"
AS_STR)
return CMD_SUCCESS;
}
+/* BGP minimum holdtime. */
+
+DEFUN(bgp_minimum_holdtime, bgp_minimum_holdtime_cmd,
+ "bgp minimum-holdtime (1-65535)",
+ "BGP specific commands\n"
+ "BGP minimum holdtime\n"
+ "Seconds\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ int idx_number = 2;
+ unsigned long min_holdtime;
+
+ min_holdtime = strtoul(argv[idx_number]->arg, NULL, 10);
+
+ bgp->default_min_holdtime = min_holdtime;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN(no_bgp_minimum_holdtime, no_bgp_minimum_holdtime_cmd,
+ "no bgp minimum-holdtime [(1-65535)]",
+ NO_STR
+ "BGP specific commands\n"
+ "BGP minimum holdtime\n"
+ "Seconds\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+
+ bgp->default_min_holdtime = 0;
+
+ return CMD_SUCCESS;
+}
DEFUN (bgp_client_to_client_reflection,
bgp_client_to_client_reflection_cmd,
"bgp client-to-client reflection",
- "BGP specific commands\n"
+ BGP_STR
"Configure client to client route reflection\n"
"reflection of routes allowed\n")
{
no_bgp_client_to_client_reflection_cmd,
"no bgp client-to-client reflection",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Configure client to client route reflection\n"
"reflection of routes allowed\n")
{
DEFUN (bgp_always_compare_med,
bgp_always_compare_med_cmd,
"bgp always-compare-med",
- "BGP specific commands\n"
+ BGP_STR
"Allow comparing MED from different neighbors\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
no_bgp_always_compare_med_cmd,
"no bgp always-compare-med",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Allow comparing MED from different neighbors\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN(bgp_ebgp_requires_policy, bgp_ebgp_requires_policy_cmd,
"bgp ebgp-requires-policy",
- "BGP specific commands\n"
+ BGP_STR
"Require in and out policy for eBGP peers (RFC8212)\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN(no_bgp_ebgp_requires_policy, no_bgp_ebgp_requires_policy_cmd,
"no bgp ebgp-requires-policy",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Require in and out policy for eBGP peers (RFC8212)\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN(bgp_suppress_duplicates, bgp_suppress_duplicates_cmd,
"bgp suppress-duplicates",
- "BGP specific commands\n"
+ BGP_STR
"Suppress duplicate updates if the route actually not changed\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN(no_bgp_suppress_duplicates, no_bgp_suppress_duplicates_cmd,
"no bgp suppress-duplicates",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Suppress duplicate updates if the route actually not changed\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN(bgp_reject_as_sets, bgp_reject_as_sets_cmd,
"bgp reject-as-sets",
- "BGP specific commands\n"
+ BGP_STR
"Reject routes with AS_SET or AS_CONFED_SET flag\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN(no_bgp_reject_as_sets, no_bgp_reject_as_sets_cmd,
"no bgp reject-as-sets",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Reject routes with AS_SET or AS_CONFED_SET flag\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN (bgp_deterministic_med,
bgp_deterministic_med_cmd,
"bgp deterministic-med",
- "BGP specific commands\n"
+ BGP_STR
"Pick the best-MED path among paths advertised from the neighboring AS\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
no_bgp_deterministic_med_cmd,
"no bgp deterministic-med",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Pick the best-MED path among paths advertised from the neighboring AS\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN (bgp_graceful_restart,
bgp_graceful_restart_cmd,
"bgp graceful-restart",
- "BGP specific commands\n"
+ BGP_STR
GR_CMD
)
{
no_bgp_graceful_restart_cmd,
"no bgp graceful-restart",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
NO_GR_CMD
)
{
DEFUN (bgp_graceful_restart_stalepath_time,
bgp_graceful_restart_stalepath_time_cmd,
"bgp graceful-restart stalepath-time (1-4095)",
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Set the max time to hold onto restarting peer's stale paths\n"
"Delay value (seconds)\n")
DEFUN (bgp_graceful_restart_restart_time,
bgp_graceful_restart_restart_time_cmd,
"bgp graceful-restart restart-time (1-4095)",
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Set the time to wait to delete stale routes before a BGP open message is received\n"
"Delay value (seconds)\n")
DEFUN (bgp_graceful_restart_select_defer_time,
bgp_graceful_restart_select_defer_time_cmd,
"bgp graceful-restart select-defer-time (0-3600)",
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Set the time to defer the BGP route selection after restart\n"
"Delay value (seconds, 0 - disable)\n")
no_bgp_graceful_restart_stalepath_time_cmd,
"no bgp graceful-restart stalepath-time [(1-4095)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Set the max time to hold onto restarting peer's stale paths\n"
"Delay value (seconds)\n")
no_bgp_graceful_restart_restart_time_cmd,
"no bgp graceful-restart restart-time [(1-4095)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Set the time to wait to delete stale routes before a BGP open message is received\n"
"Delay value (seconds)\n")
no_bgp_graceful_restart_select_defer_time_cmd,
"no bgp graceful-restart select-defer-time [(0-3600)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Set the time to defer the BGP route selection after restart\n"
"Delay value (seconds)\n")
DEFUN (bgp_graceful_restart_preserve_fw,
bgp_graceful_restart_preserve_fw_cmd,
"bgp graceful-restart preserve-fw-state",
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Sets F-bit indication that fib is preserved while doing Graceful Restart\n")
{
no_bgp_graceful_restart_preserve_fw_cmd,
"no bgp graceful-restart preserve-fw-state",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart capability parameters\n"
"Unsets F-bit indication that fib is preserved while doing Graceful Restart\n")
{
DEFUN (bgp_graceful_restart_disable,
bgp_graceful_restart_disable_cmd,
"bgp graceful-restart-disable",
- "BGP specific commands\n"
+ BGP_STR
GR_DISABLE)
{
int ret = BGP_GR_FAILURE;
no_bgp_graceful_restart_disable_cmd,
"no bgp graceful-restart-disable",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
NO_GR_DISABLE
)
{
DEFUN_HIDDEN (bgp_graceful_restart_disable_eor,
bgp_graceful_restart_disable_eor_cmd,
"bgp graceful-restart disable-eor",
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart configuration parameters\n"
"Disable EOR Check\n")
{
no_bgp_graceful_restart_disable_eor_cmd,
"no bgp graceful-restart disable-eor",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart configuration parameters\n"
"Disable EOR Check\n")
{
DEFUN (bgp_graceful_restart_rib_stale_time,
bgp_graceful_restart_rib_stale_time_cmd,
"bgp graceful-restart rib-stale-time (1-3600)",
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart configuration parameters\n"
"Specify the stale route removal timer in rib\n"
"Delay value (seconds)\n")
no_bgp_graceful_restart_rib_stale_time_cmd,
"no bgp graceful-restart rib-stale-time [(1-3600)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Graceful restart configuration parameters\n"
"Specify the stale route removal timer in rib\n"
"Delay value (seconds)\n")
DEFUN (bgp_bestpath_compare_router_id,
bgp_bestpath_compare_router_id_cmd,
"bgp bestpath compare-routerid",
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"Compare router-id for identical EBGP paths\n")
{
no_bgp_bestpath_compare_router_id_cmd,
"no bgp bestpath compare-routerid",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"Compare router-id for identical EBGP paths\n")
{
DEFUN (bgp_bestpath_aspath_ignore,
bgp_bestpath_aspath_ignore_cmd,
"bgp bestpath as-path ignore",
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"AS-path attribute\n"
"Ignore as-path length in selecting a route\n")
no_bgp_bestpath_aspath_ignore_cmd,
"no bgp bestpath as-path ignore",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"AS-path attribute\n"
"Ignore as-path length in selecting a route\n")
DEFUN (bgp_bestpath_aspath_confed,
bgp_bestpath_aspath_confed_cmd,
"bgp bestpath as-path confed",
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"AS-path attribute\n"
"Compare path lengths including confederation sets & sequences in selecting a route\n")
no_bgp_bestpath_aspath_confed_cmd,
"no bgp bestpath as-path confed",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"AS-path attribute\n"
"Compare path lengths including confederation sets & sequences in selecting a route\n")
DEFUN (bgp_bestpath_aspath_multipath_relax,
bgp_bestpath_aspath_multipath_relax_cmd,
"bgp bestpath as-path multipath-relax [<as-set|no-as-set>]",
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"AS-path attribute\n"
"Allow load sharing across routes that have different AS paths (but same length)\n"
no_bgp_bestpath_aspath_multipath_relax_cmd,
"no bgp bestpath as-path multipath-relax [<as-set|no-as-set>]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"AS-path attribute\n"
"Allow load sharing across routes that have different AS paths (but same length)\n"
DEFUN (bgp_log_neighbor_changes,
bgp_log_neighbor_changes_cmd,
"bgp log-neighbor-changes",
- "BGP specific commands\n"
+ BGP_STR
"Log neighbor up/down and reset reason\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
no_bgp_log_neighbor_changes_cmd,
"no bgp log-neighbor-changes",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Log neighbor up/down and reset reason\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
DEFUN (bgp_bestpath_med,
bgp_bestpath_med_cmd,
"bgp bestpath med <confed [missing-as-worst]|missing-as-worst [confed]>",
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"MED attribute\n"
"Compare MED among confederation paths\n"
no_bgp_bestpath_med_cmd,
"no bgp bestpath med <confed [missing-as-worst]|missing-as-worst [confed]>",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"MED attribute\n"
"Compare MED among confederation paths\n"
DEFPY (bgp_bestpath_bw,
bgp_bestpath_bw_cmd,
"bgp bestpath bandwidth <ignore|skip-missing|default-weight-for-missing>$bw_cfg",
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"Link Bandwidth attribute\n"
"Ignore link bandwidth (i.e., do regular ECMP, not weighted)\n"
no_bgp_bestpath_bw_cmd,
"no bgp bestpath bandwidth [<ignore|skip-missing|default-weight-for-missing>$bw_cfg]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Change the default bestpath selection\n"
"Link Bandwidth attribute\n"
"Ignore link bandwidth (i.e., do regular ECMP, not weighted)\n"
"ipv6-flowspec|"
"l2vpn-evpn>$afi_safi",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"Activate ipv4-unicast for a peer by default\n"
"Activate ipv4-multicast for a peer by default\n"
DEFUN (bgp_default_show_hostname,
bgp_default_show_hostname_cmd,
"bgp default show-hostname",
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"Show hostname in certain command outputs\n")
{
no_bgp_default_show_hostname_cmd,
"no bgp default show-hostname",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"Show hostname in certain command outputs\n")
{
DEFUN (bgp_default_show_nexthop_hostname,
bgp_default_show_nexthop_hostname_cmd,
"bgp default show-nexthop-hostname",
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"Show hostname for nexthop in certain command outputs\n")
{
no_bgp_default_show_nexthop_hostname_cmd,
"no bgp default show-nexthop-hostname",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"Show hostname for nexthop in certain command outputs\n")
{
DEFUN (bgp_network_import_check,
bgp_network_import_check_cmd,
"bgp network import-check",
- "BGP specific commands\n"
+ BGP_STR
"BGP network command\n"
"Check BGP network route exists in IGP\n")
{
ALIAS_HIDDEN(bgp_network_import_check, bgp_network_import_check_exact_cmd,
"bgp network import-check exact",
- "BGP specific commands\n"
+ BGP_STR
"BGP network command\n"
"Check BGP network route exists in IGP\n"
"Match route precisely\n")
no_bgp_network_import_check_cmd,
"no bgp network import-check",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"BGP network command\n"
"Check BGP network route exists in IGP\n")
{
DEFUN (bgp_default_local_preference,
bgp_default_local_preference_cmd,
"bgp default local-preference (0-4294967295)",
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"local preference (higher=more preferred)\n"
"Configure default local preference value\n")
no_bgp_default_local_preference_cmd,
"no bgp default local-preference [(0-4294967295)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"local preference (higher=more preferred)\n"
"Configure default local preference value\n")
DEFUN (bgp_default_subgroup_pkt_queue_max,
bgp_default_subgroup_pkt_queue_max_cmd,
"bgp default subgroup-pkt-queue-max (20-100)",
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"subgroup-pkt-queue-max\n"
"Configure subgroup packet queue max\n")
no_bgp_default_subgroup_pkt_queue_max_cmd,
"no bgp default subgroup-pkt-queue-max [(20-100)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP defaults\n"
"subgroup-pkt-queue-max\n"
"Configure subgroup packet queue max\n")
DEFUN (bgp_rr_allow_outbound_policy,
bgp_rr_allow_outbound_policy_cmd,
"bgp route-reflector allow-outbound-policy",
- "BGP specific commands\n"
+ BGP_STR
"Allow modifications made by out route-map\n"
"on ibgp neighbors\n")
{
no_bgp_rr_allow_outbound_policy_cmd,
"no bgp route-reflector allow-outbound-policy",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Allow modifications made by out route-map\n"
"on ibgp neighbors\n")
{
DEFUN (bgp_listen_limit,
bgp_listen_limit_cmd,
"bgp listen limit (1-65535)",
- "BGP specific commands\n"
+ BGP_STR
"BGP Dynamic Neighbors listen commands\n"
"Maximum number of BGP Dynamic Neighbors that can be created\n"
"Configure Dynamic Neighbors listen limit value\n")
no_bgp_listen_limit_cmd,
"no bgp listen limit [(1-65535)]",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"BGP Dynamic Neighbors listen commands\n"
"Maximum number of BGP Dynamic Neighbors that can be created\n"
"Configure Dynamic Neighbors listen limit value\n")
DEFUN (bgp_listen_range,
bgp_listen_range_cmd,
"bgp listen range <A.B.C.D/M|X:X::X:X/M> peer-group PGNAME",
- "BGP specific commands\n"
+ BGP_STR
"Configure BGP dynamic neighbors listen range\n"
"Configure BGP dynamic neighbors listen range\n"
NEIGHBOR_ADDR_STR
no_bgp_listen_range_cmd,
"no bgp listen range <A.B.C.D/M|X:X::X:X/M> peer-group PGNAME",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Unconfigure BGP dynamic neighbors listen range\n"
"Unconfigure BGP dynamic neighbors listen range\n"
NEIGHBOR_ADDR_STR
DEFUN (bgp_disable_connected_route_check,
bgp_disable_connected_route_check_cmd,
"bgp disable-ebgp-connected-route-check",
- "BGP specific commands\n"
+ BGP_STR
"Disable checking if nexthop is connected on ebgp sessions\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
no_bgp_disable_connected_route_check_cmd,
"no bgp disable-ebgp-connected-route-check",
NO_STR
- "BGP specific commands\n"
+ BGP_STR
"Disable checking if nexthop is connected on ebgp sessions\n")
{
VTY_DECLVAR_CONTEXT(bgp, bgp);
return peer_remote_as_vty(vty, argv[idx_peer]->arg,
argv[idx_remote_as]->arg);
}
+/* Enable fast convergence of bgp sessions. If this is enabled, bgp
+ * sessions do not wait for hold timer expiry to bring down the sessions
+ * when nexthop becomes unreachable
+ */
+DEFUN(bgp_fast_convergence, bgp_fast_convergence_cmd, "bgp fast-convergence",
+ BGP_STR "Fast convergence for bgp sessions\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ bgp->fast_convergence = true;
+
+ return CMD_SUCCESS;
+}
+
+DEFUN(no_bgp_fast_convergence, no_bgp_fast_convergence_cmd,
+ "no bgp fast-convergence",
+ NO_STR BGP_STR "Fast convergence for bgp sessions\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ bgp->fast_convergence = false;
+
+ return CMD_SUCCESS;
+}
static int peer_conf_interface_get(struct vty *vty, const char *conf_if,
int v6only,
PEER_FLAG_DISABLE_CONNECTED_CHECK);
}
+/* disable-link-bw-encoding-ieee */
+DEFUN(neighbor_disable_link_bw_encoding_ieee,
+ neighbor_disable_link_bw_encoding_ieee_cmd,
+ "neighbor <A.B.C.D|X:X::X:X|WORD> disable-link-bw-encoding-ieee",
+ NEIGHBOR_STR NEIGHBOR_ADDR_STR2
+ "Disable IEEE floating-point encoding for extended community bandwidth\n")
+{
+ int idx_peer = 1;
+
+ return peer_flag_set_vty(vty, argv[idx_peer]->arg,
+ PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE);
+}
+
+DEFUN(no_neighbor_disable_link_bw_encoding_ieee,
+ no_neighbor_disable_link_bw_encoding_ieee_cmd,
+ "no neighbor <A.B.C.D|X:X::X:X|WORD> disable-link-bw-encoding-ieee",
+ NO_STR NEIGHBOR_STR NEIGHBOR_ADDR_STR2
+ "Disable IEEE floating-point encoding for extended community bandwidth\n")
+{
+ int idx_peer = 2;
+
+ return peer_flag_unset_vty(vty, argv[idx_peer]->arg,
+ PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE);
+}
+
/* enforce-first-as */
DEFUN (neighbor_enforce_first_as,
{
int idx_ip = 1;
int idx_word = 3;
+
return peer_interface_vty(vty, argv[idx_ip]->arg, argv[idx_word]->arg);
}
DEFUN (no_neighbor_interface,
no_neighbor_interface_cmd,
- "no neighbor <A.B.C.D|X:X::X:X|WORD> interface WORD",
+ "no neighbor <A.B.C.D|X:X::X:X> interface WORD",
NO_STR
NEIGHBOR_STR
- NEIGHBOR_ADDR_STR2
+ NEIGHBOR_ADDR_STR
"Interface\n"
"Interface name\n")
{
int idx_peer = 2;
+
return peer_interface_vty(vty, argv[idx_peer]->arg, NULL);
}
return bgp_vty_return(vty, peer_ttl_security_hops_unset(peer));
}
+/* disable-addpath-rx */
+DEFUN(neighbor_disable_addpath_rx,
+ neighbor_disable_addpath_rx_cmd,
+ "neighbor <A.B.C.D|X:X::X:X|WORD> disable-addpath-rx",
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Do not accept additional paths\n")
+{
+ char *peer_str = argv[1]->arg;
+ struct peer *peer;
+ afi_t afi = bgp_node_afi(vty);
+ safi_t safi = bgp_node_safi(vty);
+
+ peer = peer_and_group_lookup_vty(vty, peer_str);
+ if (!peer)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return peer_af_flag_set_vty(vty, peer_str, afi, safi,
+ PEER_FLAG_DISABLE_ADDPATH_RX);
+}
+
+DEFUN(no_neighbor_disable_addpath_rx,
+ no_neighbor_disable_addpath_rx_cmd,
+ "no neighbor <A.B.C.D|X:X::X:X|WORD> disable-addpath-rx",
+ NO_STR
+ NEIGHBOR_STR
+ NEIGHBOR_ADDR_STR2
+ "Do not accept additional paths\n")
+{
+ char *peer_str = argv[2]->arg;
+ struct peer *peer;
+ afi_t afi = bgp_node_afi(vty);
+ safi_t safi = bgp_node_safi(vty);
+
+ peer = peer_and_group_lookup_vty(vty, peer_str);
+ if (!peer)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ return peer_af_flag_unset_vty(vty, peer_str, afi, safi,
+ PEER_FLAG_DISABLE_ADDPATH_RX);
+}
+
DEFUN (neighbor_addpath_tx_all_paths,
neighbor_addpath_tx_all_paths_cmd,
"neighbor <A.B.C.D|X:X::X:X|WORD> addpath-tx-all-paths",
}
}
-/* If the peer's description includes whitespaces
- * then return the first occurrence. Also strip description
- * to the given size if needed.
- */
+/* Strip peer's description to the given size. */
static char *bgp_peer_description_stripped(char *desc, uint32_t size)
{
static char stripped[BUFSIZ];
- char *pnt;
uint32_t len = size > strlen(desc) ? strlen(desc) : size;
- pnt = strchr(desc, ' ');
- if (pnt)
- len = size > (uint32_t)(pnt - desc) ? (uint32_t)(pnt - desc)
- : size;
-
strlcpy(stripped, desc, len + 1);
return stripped;
return false;
}
-/* Show BGP peer's summary information. */
+/* Show BGP peer's summary information.
+ *
+ * Peer's description is stripped according to if `wide` option is given
+ * or not.
+ *
+ * When adding new columns to `show bgp summary` output, please make
+ * sure `Desc` is the lastest column to show because it can contain
+ * whitespaces and the whole output will be tricky.
+ */
static int bgp_show_summary(struct vty *vty, struct bgp *bgp, int afi, int safi,
struct peer *fpeer, int as_type, as_t as,
uint16_t show_flags)
vty_out(vty, " %8u", 0);
}
+ /* Make sure `Desc` column is the lastest in
+ * the output.
+ */
if (peer->desc)
vty_out(vty, " %s",
bgp_peer_description_stripped(
[safi],
PEER_CAP_ADDPATH_AF_TX_ADV))
vty_out(vty,
- "advertised %s",
- get_afi_safi_str(
- afi,
- safi,
- false));
+ "advertised");
if (CHECK_FLAG(
p->af_cap
[safi],
PEER_CAP_ADDPATH_AF_RX_ADV))
vty_out(vty,
- "advertised %s",
- get_afi_safi_str(
- afi,
- safi,
- false));
+ "advertised");
if (CHECK_FLAG(
p->af_cap
if (peergroup_flag_check(peer, PEER_FLAG_DISABLE_CONNECTED_CHECK))
vty_out(vty, " neighbor %s disable-connected-check\n", addr);
+ /* link-bw-encoding-ieee */
+ if (peergroup_flag_check(peer, PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE))
+ vty_out(vty, " neighbor %s disable-link-bw-encoding-ieee\n",
+ addr);
+
/* enforce-first-as */
if (peergroup_flag_check(peer, PEER_FLAG_ENFORCE_FIRST_AS))
vty_out(vty, " neighbor %s enforce-first-as\n", addr);
}
}
+ if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_DISABLE_ADDPATH_RX))
+ vty_out(vty, " neighbor %s disable-addpath-rx\n", addr);
+
/* ORF capability. */
if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_ORF_PREFIX_SM)
|| peergroup_af_flag_check(peer, afi, safi,
vty_out(vty, " timers bgp %u %u\n",
bgp->default_keepalive, bgp->default_holdtime);
+ /* BGP minimum holdtime configuration. */
+ if (bgp->default_min_holdtime != SAVE_BGP_HOLDTIME
+ && bgp->default_min_holdtime != 0)
+ vty_out(vty, " bgp minimum-holdtime %u\n",
+ bgp->default_min_holdtime);
+
/* Conditional advertisement timer configuration */
if (bgp->condition_check_period
!= DEFAULT_CONDITIONAL_ROUTES_POLL_TIME)
if (CHECK_FLAG(bgp->flags, BGP_FLAG_SHUTDOWN))
vty_out(vty, " bgp shutdown\n");
+ if (bgp->fast_convergence)
+ vty_out(vty, " bgp fast-convergence\n");
+
if (bgp->srv6_enabled) {
vty_frame(vty, " !\n segment-routing srv6\n");
if (strlen(bgp->srv6_locator_name))
bgp_rfapi_cfg_write(vty, bgp);
#endif
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
return 0;
install_element(CONFIG_NODE, &bgp_set_route_map_delay_timer_cmd);
install_element(CONFIG_NODE, &no_bgp_set_route_map_delay_timer_cmd);
+ /* bgp fast-convergence command */
+ install_element(BGP_NODE, &bgp_fast_convergence_cmd);
+ install_element(BGP_NODE, &no_bgp_fast_convergence_cmd);
+
/* global bgp update-delay command */
install_element(CONFIG_NODE, &bgp_global_update_delay_cmd);
install_element(CONFIG_NODE, &no_bgp_global_update_delay_cmd);
install_element(BGP_NODE, &bgp_timers_cmd);
install_element(BGP_NODE, &no_bgp_timers_cmd);
+ /* "minimum-holdtime" commands. */
+ install_element(BGP_NODE, &bgp_minimum_holdtime_cmd);
+ install_element(BGP_NODE, &no_bgp_minimum_holdtime_cmd);
+
/* route-map delay-timer commands - per instance for backwards compat.
*/
install_element(BGP_NODE, &bgp_set_route_map_delay_timer_cmd);
install_element(BGP_FLOWSPECV6_NODE,
&no_neighbor_route_server_client_cmd);
+ /* "neighbor disable-addpath-rx" commands. */
+ install_element(BGP_IPV4_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV4_NODE, &no_neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV4M_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV4M_NODE, &no_neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV4L_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV4L_NODE, &no_neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV6_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV6_NODE, &no_neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV6M_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV6M_NODE, &no_neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV6L_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_IPV6L_NODE, &no_neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_VPNV4_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_VPNV4_NODE, &no_neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_VPNV6_NODE, &neighbor_disable_addpath_rx_cmd);
+ install_element(BGP_VPNV6_NODE, &no_neighbor_disable_addpath_rx_cmd);
+
/* "neighbor addpath-tx-all-paths" commands.*/
install_element(BGP_NODE, &neighbor_addpath_tx_all_paths_hidden_cmd);
install_element(BGP_NODE, &no_neighbor_addpath_tx_all_paths_hidden_cmd);
install_element(BGP_NODE, &neighbor_disable_connected_check_cmd);
install_element(BGP_NODE, &no_neighbor_disable_connected_check_cmd);
+ /* "neighbor disable-link-bw-encoding-ieee" commands. */
+ install_element(BGP_NODE, &neighbor_disable_link_bw_encoding_ieee_cmd);
+ install_element(BGP_NODE,
+ &no_neighbor_disable_link_bw_encoding_ieee_cmd);
+
/* "neighbor enforce-first-as" commands. */
install_element(BGP_NODE, &neighbor_enforce_first_as_cmd);
install_element(BGP_NODE, &no_neighbor_enforce_first_as_cmd);
static int zebra_read_route(ZAPI_CALLBACK_ARGS)
{
enum nexthop_types_t nhtype;
+ enum blackhole_type bhtype = BLACKHOLE_UNSPEC;
struct zapi_route api;
- union g_addr nexthop;
+ union g_addr nexthop = {};
ifindex_t ifindex;
int add, i;
struct bgp *bgp;
&& IN6_IS_ADDR_LINKLOCAL(&api.prefix.u.prefix6))
return 0;
- nexthop = api.nexthops[0].gate;
ifindex = api.nexthops[0].ifindex;
nhtype = api.nexthops[0].type;
+ /* api_nh structure has union of gate and bh_type */
+ if (nhtype == NEXTHOP_TYPE_BLACKHOLE) {
+ /* bh_type is only applicable if NEXTHOP_TYPE_BLACKHOLE*/
+ bhtype = api.nexthops[0].bh_type;
+ } else
+ nexthop = api.nexthops[0].gate;
+
add = (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD);
if (add) {
/*
/* Now perform the add/update. */
bgp_redistribute_add(bgp, &api.prefix, &nexthop, ifindex,
- nhtype, api.distance, api.metric, api.type,
- api.instance, api.tag);
+ nhtype, bhtype, api.distance, api.metric,
+ api.type, api.instance, api.tag);
} else {
bgp_redistribute_delete(bgp, &api.prefix, api.type,
api.instance);
if (direct)
v6_ll_avail = if_get_ipv6_local(
ifp, &nexthop->v6_local);
+ /*
+ * It's fine to not have a v6 LL when using
+ * update-source loopback/vrf
+ */
+ if (!v6_ll_avail && if_is_loopback_or_vrf(ifp))
+ v6_ll_avail = true;
} else
/* Link-local address. */
{
* a VRF (which are programmed as onlink on l3-vni SVI) as well as
* connected routes leaked into a VRF.
*/
- if (is_evpn) {
-
+ if (attr->nh_type == NEXTHOP_TYPE_BLACKHOLE) {
+ api_nh->type = attr->nh_type;
+ api_nh->bh_type = attr->bh_type;
+ } else if (is_evpn) {
/*
* If the nexthop is EVPN overlay index gateway IP,
* treat the nexthop as NEXTHOP_TYPE_IPV4
SET_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_ONLINK);
api_nh->ifindex = nh_bgp->l3vni_svi_ifindex;
}
- } else if (nh_othervrf &&
- api_nh->gate.ipv4.s_addr == INADDR_ANY) {
+ } else if (nh_othervrf && api_nh->gate.ipv4.s_addr == INADDR_ANY) {
api_nh->type = NEXTHOP_TYPE_IFINDEX;
api_nh->ifindex = attr->nh_ifindex;
} else
attr = pi->attr;
api_nh->vrf_id = nh_bgp->vrf_id;
- if (is_evpn) {
-
+ if (attr->nh_type == NEXTHOP_TYPE_BLACKHOLE) {
+ api_nh->type = attr->nh_type;
+ api_nh->bh_type = attr->bh_type;
+ } else if (is_evpn) {
/*
* If the nexthop is EVPN overlay index gateway IP,
* treat the nexthop as NEXTHOP_TYPE_IPV4
api_nh->ifindex = 0;
}
}
- if (nexthop)
+ /* api_nh structure has union of gate and bh_type */
+ if (nexthop && api_nh->type != NEXTHOP_TYPE_BLACKHOLE)
api_nh->gate.ipv6 = *nexthop;
return true;
label_buf[0] = '\0';
eth_buf[0] = '\0';
+ segs_buf[0] = '\0';
if (has_valid_label
&& !CHECK_FLAG(api.flags, ZEBRA_FLAG_EVPN_ROUTE))
snprintf(label_buf, sizeof(label_buf),
bgp->default_subgroup_pkt_queue_max =
BGP_DEFAULT_SUBGROUP_PKT_QUEUE_MAX;
bgp_timers_unset(bgp);
+ bgp->default_min_holdtime = 0;
bgp->restart_time = BGP_DEFAULT_RESTART_TIME;
bgp->stalepath_time = BGP_DEFAULT_STALEPATH_TIME;
bgp->select_defer_time = BGP_DEFAULT_SELECT_DEFERRAL_TIME;
bgp->reject_as_sets = false;
bgp->condition_check_period = DEFAULT_CONDITIONAL_ROUTES_POLL_TIME;
bgp_addpath_init_bgp_data(&bgp->tx_addpath);
-
+ bgp->fast_convergence = false;
bgp->as = *as;
#ifdef ENABLE_BGP_VNC
return ret;
bgp = bgp_create(as, name, inst_type);
- if (bgp_option_check(BGP_OPT_NO_ZEBRA) && name)
- bgp->vrf_id = vrf_generate_id();
+
+ /*
+ * view instances will never work inside of a vrf
+ * as such they must always be in the VRF_DEFAULT
+ * Also we must set this to something useful because
+ * of the vrf socket code needing an actual useful
+ * default value to send to the underlying OS.
+ *
+ * This code is currently ignoring vrf based
+ * code using the -Z option( and that is probably
+ * best addressed elsewhere in the code )
+ */
+ if (inst_type == BGP_INSTANCE_TYPE_VIEW)
+ bgp->vrf_id = VRF_DEFAULT;
+
bgp_router_id_set(bgp, &bgp->router_id_zebra, true);
bgp_address_init(bgp);
bgp_tip_hash_init(bgp);
{PEER_FLAG_LOCAL_AS_NO_PREPEND, 0, peer_change_none},
{PEER_FLAG_LOCAL_AS_REPLACE_AS, 0, peer_change_none},
{PEER_FLAG_UPDATE_SOURCE, 0, peer_change_none},
+ {PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE, 0, peer_change_none},
{0, 0, 0}};
static const struct peer_flag_action peer_af_flag_action_list[] = {
{PEER_FLAG_AS_OVERRIDE, 1, peer_change_reset_out},
{PEER_FLAG_REMOVE_PRIVATE_AS_ALL_REPLACE, 1, peer_change_reset_out},
{PEER_FLAG_WEIGHT, 0, peer_change_reset_in},
+ {PEER_FLAG_DISABLE_ADDPATH_RX, 0, peer_change_reset},
{0, 0, 0}};
/* Proper action set. */
uint32_t default_connect_retry;
uint32_t default_delayopen;
+ /* BGP minimum holdtime. */
+ uint16_t default_min_holdtime;
+
/* BGP graceful restart */
uint32_t restart_time;
uint32_t stalepath_time;
/* Process Queue for handling routes */
struct work_queue *process_queue;
+ bool fast_convergence;
+
/* BGP Conditional advertisement */
uint32_t condition_check_period;
uint32_t condition_filter_count;
#define PEER_FLAG_RTT_SHUTDOWN (1U << 26) /* shutdown rtt */
#define PEER_FLAG_TIMER_DELAYOPEN (1U << 27) /* delayopen timer */
#define PEER_FLAG_TCP_MSS (1U << 28) /* tcp-mss */
+/* Disable IEEE floating-point link bandwidth encoding in
+ * extended communities.
+ */
+#define PEER_FLAG_DISABLE_LINK_BW_ENCODING_IEEE (1U << 29)
/*
*GR-Disabled mode means unset PEER_FLAG_GRACEFUL_RESTART
#define PEER_FLAG_SEND_LARGE_COMMUNITY (1U << 26) /* Send large Communities */
#define PEER_FLAG_MAX_PREFIX_OUT (1U << 27) /* outgoing maximum prefix */
#define PEER_FLAG_MAX_PREFIX_FORCE (1U << 28) /* maximum-prefix <num> force */
+#define PEER_FLAG_DISABLE_ADDPATH_RX (1U << 29) /* disable-addpath-rx */
enum bgp_addpath_strat addpath_type[AFI_MAX][SAFI_MAX];
#define PEER_DOWN_AS_SETS_REJECT 31U /* Reject routes with AS_SET */
#define PEER_DOWN_WAITING_OPEN 32U /* Waiting for open to succeed */
#define PEER_DOWN_PFX_COUNT 33U /* Reached received prefix count */
+#define PEER_DOWN_SOCKET_ERROR 34U /* Some socket error happened */
/*
* Remember to update peer_down_str in bgp_fsm.c when you add
* a new value to the last_reset reason
rfg->routemap_redist_name
[ZEBRA_ROUTE_BGP_DIRECT_EXT]);
}
- vty_out(vty, " exit-vrf-policy\n");
+ vty_out(vty, " exit-vrf-policy\n");
vty_out(vty, "!\n");
}
if (hc->flags & BGP_VNC_CONFIG_ADV_UN_METHOD_ENCAP) {
vty, bgp->rfapi->rfp,
RFAPI_RFP_CFG_GROUP_L2,
rfgc->name, rfgc->rfp_cfg);
- vty_out(vty, " exit-vnc\n");
+ vty_out(vty, " exit-vnc\n");
vty_out(vty, "!\n");
}
}
vty, bgp->rfapi->rfp,
RFAPI_RFP_CFG_GROUP_DEFAULT, NULL,
bgp->rfapi_cfg->default_rfp_cfg);
- vty_out(vty, " exit-vnc\n");
+ vty_out(vty, " exit-vnc\n");
vty_out(vty, "!\n");
}
vty, bgp->rfapi->rfp,
RFAPI_RFP_CFG_GROUP_NVE,
rfg->name, rfg->rfp_cfg);
- vty_out(vty, " exit-vnc\n");
+ vty_out(vty, " exit-vnc\n");
vty_out(vty, "!\n");
}
} /* have listen ports */
AC_C_FLAG([-funwind-tables])
AC_C_FLAG([-Wall])
AC_C_FLAG([-Wextra])
+AC_C_FLAG([-Wstrict-prototypes])
AC_C_FLAG([-Wmissing-prototypes])
AC_C_FLAG([-Wmissing-declarations])
AC_C_FLAG([-Wpointer-arith])
AC_C_FLAG([-Wundef])
if test "$enable_gcc_ultra_verbose" = "yes" ; then
AC_C_FLAG([-Wcast-qual])
- AC_C_FLAG([-Wstrict-prototypes])
AC_C_FLAG([-Wmissing-noreturn])
AC_C_FLAG([-Wmissing-format-attribute])
AC_C_FLAG([-Wunreachable-code])
_LT_CONFIG_LIBTOOL([
patch -N -i "${srcdir}/m4/libtool-whole-archive.patch" libtool >&AS_MESSAGE_LOG_FD || \
AC_MSG_WARN([Could not patch libtool for static linking support. Loading modules into a statically linked daemon will fail.])
- sed -e 's%func_warning "relinking%true #\0%' -i libtool || true
- sed -e 's%func_warning "remember to run%true #\0%' -i libtool || true
- sed -e 's%func_warning ".*has not been installed in%true #\0%' -i libtool || true
+dnl the -i option is not POSIX sed and the BSDs implement it differently
+dnl cat'ing the output back instead of mv/cp keeps permissions on libtool intact
+ sed -e 's%func_warning "relinking%true #\0%' libtool > libtool.sed && cat libtool.sed > libtool
+ sed -e 's%func_warning "remember to run%true #\0%' libtool > libtool.sed && cat libtool.sed > libtool
+ sed -e 's%func_warning ".*has not been installed in%true #\0%' libtool > libtool.sed && cat libtool.sed > libtool
+ test -f libtool.sed && rm libtool.sed
])
if test "$enable_static_bin" = "yes"; then
AC_LDFLAGS_EXEC="-static"
true
else
# See https://old-en.opensuse.org/openSUSE:Packaging_Guidelines#Removing_Rpath
- sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
- sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool
+ sed -e 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool > libtool.sed && cat libtool.sed > libtool
+ sed -e 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool > libtool.sed && cat libtool.sed > libtool
+ test -f libtool.sed && rm libtool.sed
fi
echo "
Description: FRRouting suite of internet protocols (BGP, OSPF, IS-IS, ...)
FRRouting implements the routing protocols commonly used in the
internet and private networks to exchange information between routers.
- Both IP and IPv6 are supported, as are BGP, OSPF, IS-IS, BABEL, EIGRP,
- RIP, LDP, BFD, PIM and NHRP protocols.
+ Both IP and IPv6 are supported, as are BGP, OSPFv2, OSPFv3, IS-IS, BABEL,
+ EIGRP, RIP, RIPng, LDP, BFD, PIM, VRRP, PBR, and NHRP.
.
These protocols are used to turn your system into a dynamic router,
exchanging information about available connections with other routers
sudo apt update
sudo apt-get install \
git autoconf automake libtool make libreadline-dev texinfo \
- pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ pkg-config libpam0g-dev libjson-c-dev bison flex \
+ libc-ares-dev python3-dev python3-sphinx \
install-info build-essential libsnmp-dev perl libcap-dev \
libelf-dev
sudo apt update
sudo apt-get install \
git autoconf automake libtool make libreadline-dev texinfo \
- pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ pkg-config libpam0g-dev libjson-c-dev bison flex \
+ libc-ares-dev python3-dev python3-sphinx \
install-info build-essential libsnmp-dev perl \
libcap-dev python2 libelf-dev
selector: "<" `selector_seq_seq` ">" `varname_token`
: "{" `selector_seq_seq` "}" `varname_token`
: "[" `selector_seq_seq` "]" `varname_token`
+ : "![" `selector_seq_seq` "]" `varname_token`
selector_seq_seq: `selector_seq_seq` "|" `selector_token_seq`
: `selector_token_seq`
selector_token_seq: `selector_token_seq` `selector_token`
provide mutual exclusion. User input matches at most one option.
- ``[square brackets]`` -- Contains sequences of tokens that can be omitted.
``[<a|b>]`` can be shortened to ``[a|b]``.
+- ``![exclamation square brackets]`` -- same as ``[square brackets]``, but
+ only allow skipping the contents if the command input starts with ``no``.
+ (For cases where the positive command needs a parameter, but the parameter
+ is optional for the negative case.)
- ``{curly|braces}`` -- similar to angle brackets, but instead of mutual
exclusion, curly braces indicate that one or more of the pipe-separated
sequences may be provided in any order.
``ip`` partially matches ``ipv6`` but exactly matches ``ip``, so ``ip`` will
win.
+Adding a CLI Node
+-----------------
+
+To add a new CLI node, you should:
+
+- define a new numerical node constant
+- define a node structure in the relevant daemon
+- call ``install_node()`` in the relevant daemon
+- define and install the new node in vtysh
+- define corresponding node entry commands in daemon and vtysh
+- add a new entry to the ``ctx_keywords`` dictionary in ``tools/frr-reload.py``
+
+Defining the numerical node constant
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Add your new node value to the enum before ``NODE_TYPE_MAX`` in
+``lib/command.h``:
+
+.. code-block:: c
+
+ enum node_type {
+ AUTH_NODE, // Authentication mode of vty interface.
+ VIEW_NODE, // View node. Default mode of vty interface.
+ [...]
+ MY_NEW_NODE,
+ NODE_TYPE_MAX, // maximum
+ };
+
+Defining a node structure
+^^^^^^^^^^^^^^^^^^^^^^^^^
+In your daemon-specific code where you define your new commands that
+attach to the new node, add a node definition:
+
+.. code-block:: c
+
+ static struct cmd_node my_new_node = {
+ .name = "my new node name",
+ .node = MY_NEW_NODE, // enum node_type lib/command.h
+ .parent_node = CONFIG_NODE,
+ .prompt = "%s(my-new-node-prompt)# ",
+ .config_write = my_new_node_config_write,
+ };
+
+You will need to define ``my_new_node_config_write(struct vty \*vty)``
+(or omit this field if you have no relevant configuration to save).
+
+Calling ``install_node()``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+In the daemon's initialization function, before installing your new commands
+with ``install_element()``, add a call ``install_node(&my_new_node)``.
+
+Defining and installing the new node in vtysh
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The build tools automatically collect command definitions for vtysh.
+However, new nodes must be coded in vtysh specifically.
+
+In ``vtysh/vtysh.c``, define a stripped-down node structure and
+call ``install_node()``:
+
+.. code-block:: c
+
+ static struct cmd_node my_new_node = {
+ .name = "my new node name",
+ .node = MY_NEW_NODE, /* enum node_type lib/command.h */
+ .parent_node = CONFIG_NODE,
+ .prompt = "%s(my-new-node-prompt)# ",
+ };
+ [...]
+ void vtysh_init_vty(void)
+ {
+ [...]
+ install_node(&my_new_node)
+ [...]
+ }
+
+Defining corresponding node entry commands in daemon and vtysh
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The command that descends into the new node is typically programmed
+with ``VTY_PUSH_CONTEXT`` or equivalent in the daemon's CLI handler function.
+(If the CLI has been updated to use the new northbound architecture,
+``VTY_PUSH_XPATH`` is used instead.)
+
+In vtysh, you must implement a corresponding node change so that vtysh
+tracks the daemon's movement through the node tree.
+
+Although the build tools typically scan daemon code for CLI definitions
+to replicate their parsing in vtysh, the node-descent function in the
+daemon must be blocked from this replication so that a hand-coded
+skeleton can be written in ``vtysh.c``.
+
+Accordingly, use one of the ``*_NOSH`` macros such as ``DEFUN_NOSH``,
+``DEFPY_NOSH``, or ``DEFUN_YANG_NOSH`` for the daemon's node-descent
+CLI definition, and use ``DEFUNSH`` in ``vtysh.c`` for the vtysh equivalent.
+
+.. seealso:: :ref:`vtysh-special-defuns`
+
+Examples:
+
+``zebra_whatever.c``
+
+.. code-block:: c
+
+ DEFPY_NOSH(my_new_node,
+ my_new_node_cmd,
+ "my-new-node foo",
+ "New Thing\n"
+ "A foo\n")
+ {
+ [...]
+ VTY_PUSH_CONTEXT(MY_NEW_NODE, bar);
+ [...]
+ }
+
+
+``ripd_whatever.c``
+
+.. code-block:: c
+
+ DEFPY_YANG_NOSH(my_new_node,
+ my_new_node_cmd,
+ "my-new-node foo",
+ "New Thing\n"
+ "A foo\n")
+ {
+ [...]
+ VTY_PUSH_XPATH(MY_NEW_NODE, xbar);
+ [...]
+ }
+
+
+``vtysh.c``
+
+.. code-block:: c
+
+ DEFUNSH(VTYSH_ZEBRA, my_new_node,
+ my_new_node_cmd,
+ "my-new-node foo",
+ "New Thing\n"
+ "A foo\n")
+ {
+ vty->node = MY_NEW_NODE;
+ return CMD_SUCCESS;
+ }
+ [...]
+ install_element(CONFIG_NODE, &my_new_node_cmd);
+
+
+Adding a new entry to the ``ctx_keywords`` dictionary
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In file ``tools/frr-reload.py``, the ``ctx_keywords`` dictionary
+describes the various node relationships.
+Add a new node entry at the appropriate level in this dictionary.
+
+.. code-block:: python
+
+ ctx_keywords = {
+ [...]
+ "key chain ": {
+ "key ": {}
+ },
+ [...]
+ "my-new-node": {},
+ [...]
+ }
+
+
+
Inspection & Debugging
----------------------
Logging of test case executions
-------------------------------
-* The user can enable logging of testcases execution messages into log file by
- adding ``frrtest_log_dir = /tmp/topotests/`` in :file:`pytest.ini`.
-* Router's current configuration can be displyed on console or sent to logs by
- adding ``show_router_config = True`` in :file:`pytest.ini`.
+* The execution log for each test is saved in the test specific directory create
+ under `/tmp/topotests` (e.g.,
+ `/tmp/topotests/<testdirname.testfilename>/exec.log`)
-Log file name will be displayed when we start execution:
+* Additionally all test logs are captured in the `topotest.xml` results file.
+ This file will be saved in `/tmp/topotests/topotests.xml`. In order to extract
+ the logs for a particular test one can use the `analyze.py` utility found in
+ the topotests base directory.
-.. code-block:: console
-
- root@test:# python ./test_topo_json_single_link.py
-
- Logs will be sent to logfile:
- /tmp/topotests/test_topo_json_single_link_11:57:01.353797
+* Router's current configuration, as it is changed during the test, can be
+ displayed on console or sent to logs by adding ``show_router_config = True`` in
+ :file:`pytest.ini`.
Note: directory "/tmp/topotests/" is created by topotests by default, making
use of same directory to save execution logs.
This is the recommended test writing routine:
-* Create a json file , which will have routers and protocol configurations
-* Create topology from json
-* Create configuration from json
-* Write the tests
+* Create a json file which will have routers and protocol configurations
+* Write and debug the tests
* Format the new code using `black <https://github.com/psf/black>`_
* Create a Pull Request
.. Note::
- BGP tests MUST use generous convergence timeouts - you must ensure
- that any test involving BGP uses a convergence timeout of at least
- 130 seconds.
+ BGP tests MUST use generous convergence timeouts - you must ensure that any
+ test involving BGP uses a convergence timeout that is proportional to the
+ configured BGP timers. If the timers are not reduced from their defaults this
+ means 130 seconds; however, it is highly recommended that timers be reduced
+ from the default values unless the test requires they not be.
File Hierarchy
^^^^^^^^^^^^^^
.. code-block:: console
- $ cd path/to/topotests
+ $ cd frr/tests/topotests
$ find ./*
...
- ./example-topojson-test # the basic example test topology-1
- ./example-topojson-test/test_example_topojson.json # input json file, having
- topology, interfaces, bgp and other configuration
- ./example-topojson-test/test_example_topojson.py # test script to write and
- execute testcases
+ ./example_test/
+ ./example_test/test_template_json.json # input json file, having topology, interfaces, bgp and other configuration
+ ./example_test/test_template_json.py # test script to write and execute testcases
...
./lib # shared test/topology functions
- ./lib/topojson.py # library to create topology and configurations dynamically
- from json file
- ./lib/common_config.py # library to create protocol's common configurations ex-
- static_routes, prefix_lists, route_maps etc.
- ./lib/bgp.py # library to create only bgp configurations
+ ./lib/topojson.py # library to create topology and configurations dynamically from json file
+ ./lib/common_config.py # library to create protocol's common configurations ex- static_routes, prefix_lists, route_maps etc.
+ ./lib/bgp.py # library to create and test bgp configurations
Defining the Topology and initial configuration in JSON file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Building topology and configurations
""""""""""""""""""""""""""""""""""""
-Topology and initial configuration will be created in setup_module(). Following
-is the sample code::
+Topology and initial configuration as well as teardown are invoked through the
+use of a pytest fixture::
- class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
- # Building topology from json file
- build_topo_from_json(tgen, topo)
+ from lib import fixtures
- def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = pytest.fixture(fixtures.tgen_json, scope="module")
- # Starting topology, create tmp files which are loaded to routers
- # to start deamons and then start routers
- start_topology(tgen)
- # Creating configuration from JSON
- build_config_from_json(tgen, topo)
+ # tgen is defined above
+ # topo is a fixture defined in ../conftest.py and automatically available
+ def test_bgp_convergence(tgen, topo):
+ bgp_convergence = bgp.verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence
- def teardown_module(mod):
- tgen = get_topogen()
+The `fixtures.topo_json` function calls `topojson.setup_module_from_json()` to
+create and return a new `topogen.Topogen()` object using the JSON config file
+with the same base filename as the test (i.e., `test_file.py` ->
+`test_file.json`). Additionally, the fixture calls `tgen.stop_topology()` after
+all the tests have run to cleanup. The function is only invoked once per
+file/module (scope="module"), but the resulting object is passed to each
+function that has `tgen` as an argument.
- # Stop toplogy and Remove tmp files
- stop_topology(tgen)
+For more info on the powerful pytest fixtures feature please see `FIXTURES`_.
-
-* Note: Topology will be created in setup module but routers will not be
- started until we load zebra.conf and bgpd.conf to routers. For all routers
- dirs will be created in /tmp/topotests/<test_folder_name>/<router_name>
- zebra.conf and bgpd.conf empty files will be created and laoded to routers.
- All folder and files are deleted in teardown module..
+.. _FIXTURES: https://docs.pytest.org/en/6.2.x/fixture.html
Creating configuration files
""""""""""""""""""""""""""""
these configs can be used by any other protocols as it is.
BGP config will be specific to BGP protocol testing.
-* JSON file is passed to API build_config_from_json(), which looks for
- configuration tags in JSON file.
-* If tag is found in JSON, configuration is created as per input and written
- to file frr_json.conf
+* json file is passed to API Topogen() which saves the JSON object in
+ `self.json_topo`
+* The Topogen object is then passed to API build_config_from_json(), which looks
+ for configuration tags in new JSON object.
+* If tag is found in the JSON object, configuration is created as per input and
+ written to file frr_json.conf
* Once JSON parsing is over, frr_json.conf is loaded onto respective router.
Config loading is done using 'vtysh -f <file>'. Initial config at this point
is also saved frr_json_initial.conf. This file can be used to reset
"""""""""""""
Test topologies should always be bootstrapped from the
-example-test/test_example.py, because it contains important boilerplate code
-that can't be avoided, like:
-
-imports: os, sys, pytest, topotest/topogen and mininet topology class
-
-The global variable CWD (Current Working directory): which is most likely going
-to be used to reference the routers configuration file location
+`example_test/test_template_json.py` when possible in order to take advantage of
+the most recent infrastructure support code.
Example:
-* The topology class that inherits from Mininet Topo class;
-
- .. code-block:: python
-
- class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
- # topology build code
-
-
-* pytest setup_module() and teardown_module() to start the topology:
+* Define a module scoped fixture to setup/teardown and supply the tests with the
+ `Topogen` object.
- .. code-block:: python
+.. code-block:: python
- def setup_module(_m):
- tgen = Topogen(TemplateTopo)
+ import pytest
+ from lib import fixtures
- # Starting topology, create tmp files which are loaded to routers
- # to start deamons and then start routers
- start_topology(tgen, CWD)
+ tgen = pytest.fixture(fixtures.tgen_json, scope="module")
- def teardown_module(_m):
- tgen = get_topogen()
- # Stop toplogy and Remove tmp files
- stop_topology(tgen, CWD)
+* Define test functions using pytest fixtures
+.. code-block:: python
-* ``__main__`` initialization code (to support running the script directly)
+ from lib import bgp
- .. code-block:: python
+ # tgen is defined above
+ # topo is a global available fixture defined in ../conftest.py
+ def test_bgp_convergence(tgen, topo):
+ "Test for BGP convergence."
- if **name** == '\ **main**\ ':
- sys.exit(pytest.main(["-s"]))
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ bgp_convergence = bgp.verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence
Topotests
=========
-Topotests is a suite of topology tests for FRR built on top of Mininet.
+Topotests is a suite of topology tests for FRR built on top of micronet.
Installation and Setup
----------------------
-Only tested with Ubuntu 16.04 and Ubuntu 18.04 (which uses Mininet 2.2.x).
+Topotests run under python3. Additionally, for ExaBGP (which is used in some of
+the BGP tests) an older python2 version must be installed.
+
+Tested with Ubuntu 20.04 and Ubuntu 18.04 and Debian 11.
Instructions are the same for all setups (i.e. ExaBGP is only used for BGP
tests).
-Installing Mininet Infrastructure
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Installing Topotest Requirements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: shell
- apt-get install mininet
- apt-get install python-pip
- apt-get install iproute
- apt-get install iperf
- pip install ipaddr
- pip install "pytest<5"
- pip install "scapy>=2.4.2"
- pip install exabgp==3.4.17 (Newer 4.0 version of exabgp is not yet
- supported)
+ apt-get install iproute2
+ apt-get install net-tools
+ apt-get install python3-pip
+ python3 -m pip install wheel
+ python3 -m pip install 'pytest>=6.2.4'
+ python3 -m pip install 'pytest-xdist>=2.3.0'
+ python3 -m pip install 'scapy>=2.4.5'
+ python3 -m pip install xmltodict
+ # Use python2 pip to install older ExaBGP
+ python2 -m pip install 'exabgp<4.0.0'
useradd -d /var/run/exabgp/ -s /bin/false exabgp
+
Enable Coredumps
""""""""""""""""
Executing Tests
---------------
-Execute all tests with output to console
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Execute all tests in distributed test mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: shell
- py.test -s -v --tb=no
+ py.test -s -v -nauto --dist=loadfile
The above command must be executed from inside the topotests directory.
All test\_\* scripts in subdirectories are detected and executed (unless
-disabled in ``pytest.ini`` file).
+disabled in ``pytest.ini`` file). Pytest will execute up to N tests in parallel
+where N is based on the number of cores on the host.
+
+Analyze Test Results (``analyze.py``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default router and execution logs are saved in ``/tmp/topotests`` and an XML
+results file is saved in ``/tmp/topotests.xml``. An analysis tool ``analyze.py``
+is provided to archive and analyze these results after the run completes.
+
+After the test run completes one should pick an archive directory to store the
+results in and pass this value to ``analyze.py``. On first execution the results
+are copied to that directory from ``/tmp``, and subsequent runs use that
+directory for analyzing the results. Below is an example of this which also
+shows the default behavior which is to display all failed and errored tests in
+the run.
+
+.. code:: shell
+
+ ~/frr/tests/topotests# ./analyze.py -Ar run-save
+ bgp_multiview_topo1/test_bgp_multiview_topo1.py::test_bgp_converge
+ ospf_basic_functionality/test_ospf_lan.py::test_ospf_lan_tc1_p0
+ bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py::test_BGP_GR_10_p2
+ bgp_multiview_topo1/test_bgp_multiview_topo1.py::test_bgp_routingTable
+
+Here we see that 4 tests have failed. We an dig deeper by displaying the
+captured logs and errors. First let's redisplay the results enumerated by adding
+the ``-E`` flag
+
+.. code:: shell
+
+ ~/frr/tests/topotests# ./analyze.py -Ar run-save -E
+ 0 bgp_multiview_topo1/test_bgp_multiview_topo1.py::test_bgp_converge
+ 1 ospf_basic_functionality/test_ospf_lan.py::test_ospf_lan_tc1_p0
+ 2 bgp_gr_functionality_topo2/test_bgp_gr_functionality_topo2.py::test_BGP_GR_10_p2
+ 3 bgp_multiview_topo1/test_bgp_multiview_topo1.py::test_bgp_routingTable
+
+Now to look at the error message for a failed test we use ``-T N`` where N is
+the number of the test we are interested in along with ``--errmsg`` option.
+
+.. code:: shell
+
+ ~/frr/tests/topotests# ./analyze.py -Ar run-save -T0 --errmsg
+ bgp_multiview_topo1/test_bgp_multiview_topo1.py::test_bgp_converge: AssertionError: BGP did not converge:
+
+ IPv4 Unicast Summary (VIEW 1):
+ BGP router identifier 172.30.1.1, local AS number 100 vrf-id -1
+ BGP table version 1
+ RIB entries 1, using 184 bytes of memory
+ Peers 3, using 2169 KiB of memory
+
+ Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc
+ 172.16.1.1 4 65001 0 0 0 0 0 never Connect 0 N/A
+ 172.16.1.2 4 65002 0 0 0 0 0 never Connect 0 N/A
+ 172.16.1.5 4 65005 0 0 0 0 0 never Connect 0 N/A
+
+ Total number of neighbors 3
+
+ assert False
+
+Now to look at the full text of the error for a failed test we use ``-T N``
+where N is the number of the test we are interested in along with ``--errtext``
+option.
+
+.. code:: shell
+
+ ~/frr/tests/topotests# ./analyze.py -Ar run-save -T0 --errtext
+ bgp_multiview_topo1/test_bgp_multiview_topo1.py::test_bgp_converge: def test_bgp_converge():
+ "Check for BGP converged on all peers and BGP views"
+
+ global fatal_error
+ global net
+ [...]
+ else:
+ # Bail out with error if a router fails to converge
+ bgpStatus = net["r%s" % i].cmd('vtysh -c "show ip bgp view %s summary"' % view)
+ > assert False, "BGP did not converge:\n%s" % bgpStatus
+ E AssertionError: BGP did not converge:
+ E
+ E IPv4 Unicast Summary (VIEW 1):
+ E BGP router identifier 172.30.1.1, local AS number 100 vrf-id -1
+ [...]
+ E Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc
+ E 172.16.1.1 4 65001 0 0 0 0 0 never Connect 0 N/A
+ E 172.16.1.2 4 65002 0 0 0 0 0 never Connect 0 N/A
+ [...]
+
+To look at the full capture for a test including the stdout and stderr which
+includes full debug logs, just use the ``-T N`` option without the ``--errmsg``
+or ``--errtext`` options.
+
+.. code:: shell
+
+ ~/frr/tests/topotests# ./analyze.py -Ar run-save -T0
+ @classname: bgp_multiview_topo1.test_bgp_multiview_topo1
+ @name: test_bgp_converge
+ @time: 141.401
+ @message: AssertionError: BGP did not converge:
+ [...]
+ system-out: --------------------------------- Captured Log ---------------------------------
+ 2021-08-09 02:55:06,581 DEBUG: lib.micronet_compat.topo: Topo(unnamed): Creating
+ 2021-08-09 02:55:06,581 DEBUG: lib.micronet_compat.topo: Topo(unnamed): addHost r1
+ [...]
+ 2021-08-09 02:57:16,932 DEBUG: topolog.r1: LinuxNamespace(r1): cmd_status("['/bin/bash', '-c', 'vtysh -c "show ip bgp view 1 summary" 2> /dev/null | grep ^[0-9] | grep -vP " 11\\s+(\\d+)"']", kwargs: {'encoding': 'utf-8', 'stdout': -1, 'stderr': -2, 'shell': False})
+ 2021-08-09 02:57:22,290 DEBUG: topolog.r1: LinuxNamespace(r1): cmd_status("['/bin/bash', '-c', 'vtysh -c "show ip bgp view 1 summary" 2> /dev/null | grep ^[0-9] | grep -vP " 11\\s+(\\d+)"']", kwargs: {'encoding': 'utf-8', 'stdout': -1, 'stderr': -2, 'shell': False})
+ 2021-08-09 02:57:27,636 DEBUG: topolog.r1: LinuxNamespace(r1): cmd_status("['/bin/bash', '-c', 'vtysh -c "show ip bgp view 1 summary"']", kwargs: {'encoding': 'utf-8', 'stdout': -1, 'stderr': -2, 'shell': False})
+ --------------------------------- Captured Out ---------------------------------
+ system-err: --------------------------------- Captured Err ---------------------------------
-``--tb=no`` disables the python traceback which might be irrelevant unless the
-test script itself is debugged.
Execute single test
^^^^^^^^^^^^^^^^^^^
For the simulated topology, see the description in the python file.
-If you need to clear the mininet setup between tests (if it isn't cleanly
-shutdown), then use the ``mn -c`` command to clean up the environment.
-
StdErr log from daemos after exit
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Debugging Topotest Failures
^^^^^^^^^^^^^^^^^^^^^^^^^^^
-For the below debugging options which launch programs, if the topotest is run
-within screen_ or tmux_, ``gdb``, the shell or ``vtysh`` will be launched using
-that windowing program, otherwise mininet's ``xterm`` functionality will be used
-to launch the given program.
+Install and run tests inside ``tmux`` or ``byobu`` for best results.
-If you wish to force the use of ``xterm`` rather than ``tmux`` or ``screen``, or
-wish to use ``gnome-terminal`` instead of ``xterm``, set the environment
-variable ``FRR_TOPO_TERMINAL`` to either ``xterm`` or ``gnome-terminal``.
+``XTerm`` is also fully supported. GNU ``screen`` can be used in most
+situations; however, it does not work as well with launching ``vtysh`` or shell
+on error.
+
+For the below debugging options which launch programs or CLIs, topotest should
+be run within ``tmux`` (or ``screen``)_, as ``gdb``, the shell or ``vtysh`` will
+be launched using that windowing program, otherwise ``xterm`` will be attempted
+to launch the given programs.
.. _screen: https://www.gnu.org/software/screen/
.. _tmux: https://github.com/tmux/tmux/wiki
-Spawning ``vtysh`` or Shells on Routers
-"""""""""""""""""""""""""""""""""""""""
-
-Topotest can automatically launch a shell or ``vtysh`` for any or all routers in
-a test. This is enabled by specifying 1 of 2 CLI arguments ``--shell`` or
-``--vtysh``. Both of these options can be set to a single router value, multiple
-comma-seperated values, or ``all``.
-
-When either of these options are specified topotest will pause after each test
-to allow for inspection of the router state.
+Spawning Debugging CLI, ``vtysh`` or Shells on Routers on Test Failure
+""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
-Here's an example of launching ``vtysh`` on routers ``rt1`` and ``rt2``.
+One can have a debugging CLI invoked on test failures by specifying the
+``--cli-on-error`` CLI option as shown in the example below.
.. code:: shell
- pytest --vtysh=rt1,rt2 all-protocol-startup
-
-Spawning Mininet CLI, ``vtysh`` or Shells on Routers on Test Failure
-""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+ pytest --cli-on-error all-protocol-startup
-Similar to the previous section one can have ``vtysh`` or a shell launched on
-routers, but in this case only when a test fails. To launch the given process on
-each router after a test failure specify one of ``--shell-on-error`` or
-``--vtysh-on-error``.
+The debugging CLI can run shell or vtysh commands on any combination of routers
+It can also open shells or vtysh in their own windows for any combination of
+routers. This is usually the most useful option when debugging failures. Here is
+the help command from within a CLI launched on error:
+.. code:: shell
-Here's an example of having ``vtysh`` launched on test failure.
+ test_bgp_multiview_topo1/test_bgp_routingTable> help
+
+ Commands:
+ help :: this help
+ sh [hosts] <shell-command> :: execute <shell-command> on <host>
+ term [hosts] :: open shell terminals for hosts
+ vtysh [hosts] :: open vtysh terminals for hosts
+ [hosts] <vtysh-command> :: execute vtysh-command on hosts
+
+ test_bgp_multiview_topo1/test_bgp_routingTable> r1 show int br
+ ------ Host: r1 ------
+ Interface Status VRF Addresses
+ --------- ------ --- ---------
+ erspan0 down default
+ gre0 down default
+ gretap0 down default
+ lo up default
+ r1-eth0 up default 172.16.1.254/24
+ r1-stub up default 172.20.0.1/28
+
+ ----------------------
+ test_bgp_multiview_topo1/test_bgp_routingTable>
+
+Additionally, one can have ``vtysh`` or a shell launched on all routers when a
+test fails. To launch the given process on each router after a test failure
+specify one of ``--shell-on-error`` or ``--vtysh-on-error``.
-.. code:: shell
+Spawning ``vtysh`` or Shells on Routers
+"""""""""""""""""""""""""""""""""""""""
- pytest --vtysh-on-error all-protocol-startup
+Topotest can automatically launch a shell or ``vtysh`` for any or all routers in
+a test. This is enabled by specifying 1 of 2 CLI arguments ``--shell`` or
+``--vtysh``. Both of these options can be set to a single router value, multiple
+comma-seperated values, or ``all``.
+When either of these options are specified topotest will pause after setup and
+each test to allow for inspection of the router state.
-Additionally, one can have the mininet CLI invoked on test failures by
-specifying the ``--mininet-on-error`` CLI option as shown in the example below.
+Here's an example of launching ``vtysh`` on routers ``rt1`` and ``rt2``.
.. code:: shell
- pytest --mininet-on-error all-protocol-startup
+ pytest --vtysh=rt1,rt2 all-protocol-startup
Debugging with GDB
""""""""""""""""""
$ # Change to the top level directory of topotests.
$ cd path/to/topotests
- $ # Tests must be run as root, since Mininet requires it.
+ $ # Tests must be run as root, since micronet requires it.
$ sudo pytest
In order to run a specific test, you can use the following command:
- Avoid including unstable data in your test: don't rely on link-local
addresses or ifindex values, for example, because these can change
from run to run.
-- Using sleep is almost never appropriate to wait for some convergence
- event as the sole item done. As an example: if the test resets the peers
- in BGP, the test should look for the peers reconverging instead of just
- sleeping an arbitrary amount of time and continuing on. It is ok to
- use sleep in a tight loop with appropriate show commands to ensure that
- the protocol reaches the desired state. This should be bounded by
- appropriate timeouts for the protocol in question though. See
- verify_bgp_convergence as a good example of this. If you are having
- troubles figuring out what to look for, please do not be afraid to ask.
+- Using sleep is almost never appropriate. As an example: if the test resets the
+ peers in BGP, the test should look for the peers re-converging instead of just
+ sleeping an arbitrary amount of time and continuing on. See
+ `verify_bgp_convergence` as a good example of this. In particular look at it's
+ use of the `@retry` decorator. If you are having troubles figuring out what to
+ look for, please do not be afraid to ask.
+- Don't duplicate effort. There exists many protocol utility functions that can
+ be found in their eponymous module under `tests/topotests/lib/` (e.g.,
+ `ospf.py`)
+
Topotest File Hierarchy
.. code:: py
- class TemplateTopo(Topo):
- "Test topology builder"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ topodef = {
+ "s1": "r1"
+ "s2": ("r1", "r2")
+ }
+
+If more specialized topology definitions, or router initialization arguments are
+required a build function can be used instead of a dictionary:
+
+.. code:: py
+
+ def build_topo(tgen):
+ "Build function"
- # Create 2 routers
- for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ # Create 2 routers
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- # Create a switch with just one router connected to it to simulate a
- # empty network.
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
+ # Create a switch with just one router connected to it to simulate a
+ # empty network.
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
- # Create a connection between r1 and r2
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ # Create a connection between r1 and r2
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- Run the topology
.. option:: -s
- Actives input/output capture. This is required by mininet in order to show
- the interactive shell.
+ Actives input/output capture. If this is not specified a new window will be
+ opened for the interactive CLI, otherwise it will be activated inline.
.. option:: --topology-only
.. code:: shell
- === test session starts ===
- platform linux2 -- Python 2.7.12, pytest-3.1.2, py-1.4.34, pluggy-0.4.0
- rootdir: /media/sf_src/topotests, inifile: pytest.ini
- collected 3 items
-
- ospf-topo1/test_ospf_topo1.py *** Starting controller
-
- *** Starting 6 switches
- switch1 switch2 switch3 switch4 switch5 switch6 ...
- r2: frr zebra started
- r2: frr ospfd started
- r3: frr zebra started
- r3: frr ospfd started
- r1: frr zebra started
- r1: frr ospfd started
- r4: frr zebra started
- r4: frr ospfd started
- *** Starting CLI:
- mininet>
-
-The last line shows us that we are now using the Mininet CLI (Command Line
-Interface), from here you can call your router ``vtysh`` or even bash.
-
-Here are some commands example:
-
-.. code:: shell
-
- mininet> r1 ping 10.0.3.1
- PING 10.0.3.1 (10.0.3.1) 56(84) bytes of data.
- 64 bytes from 10.0.3.1: icmp_seq=1 ttl=64 time=0.576 ms
- 64 bytes from 10.0.3.1: icmp_seq=2 ttl=64 time=0.083 ms
- 64 bytes from 10.0.3.1: icmp_seq=3 ttl=64 time=0.088 ms
- ^C
- --- 10.0.3.1 ping statistics ---
- 3 packets transmitted, 3 received, 0% packet loss, time 1998ms
- rtt min/avg/max/mdev = 0.083/0.249/0.576/0.231 ms
+ frr/tests/topotests# sudo pytest -s --topology-only ospf_topo1/test_ospf_topo1.py
+ ============================= test session starts ==============================
+ platform linux -- Python 3.9.2, pytest-6.2.4, py-1.10.0, pluggy-0.13.1
+ rootdir: /home/chopps/w/frr/tests/topotests, configfile: pytest.ini
+ plugins: forked-1.3.0, xdist-2.3.0
+ collected 11 items
+ [...]
+ unet>
+The last line shows us that we are now using the CLI (Command Line
+Interface), from here you can call your router ``vtysh`` or even bash.
- mininet> r1 ping 10.0.3.3
- PING 10.0.3.3 (10.0.3.3) 56(84) bytes of data.
- 64 bytes from 10.0.3.3: icmp_seq=1 ttl=64 time=2.87 ms
- 64 bytes from 10.0.3.3: icmp_seq=2 ttl=64 time=0.080 ms
- 64 bytes from 10.0.3.3: icmp_seq=3 ttl=64 time=0.091 ms
- ^C
- --- 10.0.3.3 ping statistics ---
- 3 packets transmitted, 3 received, 0% packet loss, time 2003ms
- rtt min/avg/max/mdev = 0.080/1.014/2.872/1.313 ms
+Here's the help text:
+.. code:: shell
+ unet> help
+ Commands:
+ help :: this help
+ sh [hosts] <shell-command> :: execute <shell-command> on <host>
+ term [hosts] :: open shell terminals for hosts
+ vtysh [hosts] :: open vtysh terminals for hosts
+ [hosts] <vtysh-command> :: execute vtysh-command on hosts
- mininet> r3 vtysh
+.. code:: shell
- Hello, this is FRRouting (version 3.1-devrzalamena-build).
- Copyright 1996-2005 Kunihiro Ishiguro, et al.
+Here are some commands example:
- frr-1# show running-config
- Building configuration...
+.. code:: shell
- Current configuration:
- !
- frr version 3.1-devrzalamena-build
- frr defaults traditional
- hostname r3
- no service integrated-vtysh-config
- !
- log file zebra.log
- !
- log file ospfd.log
- !
- interface r3-eth0
- ip address 10.0.3.1/24
- !
- interface r3-eth1
- ip address 10.0.10.1/24
- !
- interface r3-eth2
- ip address 172.16.0.2/24
- !
- router ospf
- ospf router-id 10.0.255.3
- redistribute kernel
- redistribute connected
- redistribute static
- network 10.0.3.0/24 area 0
- network 10.0.10.0/24 area 0
- network 172.16.0.0/24 area 1
- !
- line vty
- !
- end
- frr-1#
+ unet> sh r1 ping 10.0.3.1
+ PING 10.0.3.1 (10.0.3.1) 56(84) bytes of data.
+ 64 bytes from 10.0.3.1: icmp_seq=1 ttl=64 time=0.576 ms
+ 64 bytes from 10.0.3.1: icmp_seq=2 ttl=64 time=0.083 ms
+ 64 bytes from 10.0.3.1: icmp_seq=3 ttl=64 time=0.088 ms
+ ^C
+ --- 10.0.3.1 ping statistics ---
+ 3 packets transmitted, 3 received, 0% packet loss, time 1998ms
+ rtt min/avg/max/mdev = 0.083/0.249/0.576/0.231 ms
+
+ unet> r1 show run
+ Building configuration...
+
+ Current configuration:
+ !
+ frr version 8.1-dev-my-manual-build
+ frr defaults traditional
+ hostname r1
+ log file /tmp/topotests/ospf_topo1.test_ospf_topo1/r1/zebra.log
+ [...]
+ end
+
+ unet> show daemons
+ ------ Host: r1 ------
+ zebra ospfd ospf6d staticd
+ ------- End: r1 ------
+ ------ Host: r2 ------
+ zebra ospfd ospf6d staticd
+ ------- End: r2 ------
+ ------ Host: r3 ------
+ zebra ospfd ospf6d staticd
+ ------- End: r3 ------
+ ------ Host: r4 ------
+ zebra ospfd ospf6d staticd
+ ------- End: r4 ------
After you successfully configured your topology, you can obtain the
configuration files (per-daemon) using the following commands:
.. code:: shell
- mininet> r3 vtysh -d ospfd
+ unet> sh r3 vtysh -d ospfd
Hello, this is FRRouting (version 3.1-devrzalamena-build).
Copyright 1996-2005 Kunihiro Ishiguro, et al.
- frr-1# show running-config
+ r1# show running-config
Building configuration...
Current configuration:
line vty
!
end
- frr-1#
+ r1#
+
+You can also login to the node specified by nsenter using bash, etc.
+A pid file for each node will be created in the relevant test dir.
+You can run scripts inside the node, or use vtysh's <tab> or <?> feature.
+
+.. code:: shell
+
+ [unet shell]
+ # cd tests/topotests/srv6_locator
+ # ./test_srv6_locator.py --topology-only
+ unet> r1 show segment-routing srv6 locator
+ Locator:
+ Name ID Prefix Status
+ -------------------- ------- ------------------------ -------
+ loc1 1 2001:db8:1:1::/64 Up
+ loc2 2 2001:db8:2:2::/64 Up
+
+ [Another shell]
+ # nsenter -a -t $(cat /tmp/topotests/srv6_locator.test_srv6_locator/r1.pid) bash --norc
+ # vtysh
+ r1# r1 show segment-routing srv6 locator
+ Locator:
+ Name ID Prefix Status
+ -------------------- ------- ------------------------ -------
+ loc1 1 2001:db8:1:1::/64 Up
+ loc2 2 2001:db8:2:2::/64 Up
Writing Tests
"""""""""""""
Test topologies should always be bootstrapped from
-:file:`tests/topotests/example-test/test_template.py` because it contains
+:file:`tests/topotests/example_test/test_template.py` because it contains
important boilerplate code that can't be avoided, like:
-- imports: os, sys, pytest, topotest/topogen and mininet topology class
-- The global variable CWD (Current Working directory): which is most likely
- going to be used to reference the routers configuration file location
-
Example:
.. code:: py
- # For all registered routers, load the zebra configuration file
- for rname, router in router_list.items():
- router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
- )
- # os.path.join() joins the CWD string with arguments adding the necessary
- # slashes ('/'). Arguments must not begin with '/'.
+ # For all routers arrange for:
+ # - starting zebra using config file from <rtrname>/zebra.conf
+ # - starting ospfd using an empty config file.
+ for rname, router in router_list.items():
+ router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ router.load_config(TopoRouter.RD_OSPF)
-- The topology class that inherits from Mininet Topo class:
+
+- The topology definition or build function
.. code:: py
- class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+ topodef = {
+ "s1": ("r1", "r2"),
+ "s2": ("r2", "r3")
+ }
+
+ def build_topo(tgen):
# topology build code
+ ...
-- pytest ``setup_module()`` and ``teardown_module()`` to start the topology
+- pytest setup/teardown fixture to start the topology and supply `tgen` argument
+ to tests.
.. code:: py
- def setup_module(_m):
- tgen = Topogen(TemplateTopo)
- tgen.start_topology('debug')
- def teardown_module(_m):
- tgen = get_topogen()
- tgen.stop_topology()
+ @pytest.fixture(scope="module")
+ def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
-- ``__main__`` initialization code (to support running the script directly)
+ tgen = Topogen(topodef, module.__name__)
+ # or
+ tgen = Topogen(build_topo, module.__name__)
-.. code:: py
+ ...
+
+ # Start and configure the router daemons
+ tgen.start_router()
+
+ # Provide tgen as argument to each test function
+ yield tgen
+
+ # Teardown after last test runs
+ tgen.stop_topology()
- if __name__ == '__main__':
- sys.exit(pytest.main(["-s"]))
Requirements:
(Pdb) router1 = tgen.gears[router]
(Pdb) router1.vtysh_cmd('show ip ospf route')
'============ OSPF network routing table ============\r\nN 10.0.1.0/24 [10] area: 0.0.0.0\r\n directly attached to r1-eth0\r\nN 10.0.2.0/24 [20] area: 0.0.0.0\r\n via 10.0.3.3, r1-eth1\r\nN 10.0.3.0/24 [10] area: 0.0.0.0\r\n directly attached to r1-eth1\r\nN 10.0.10.0/24 [20] area: 0.0.0.0\r\n via 10.0.3.1, r1-eth1\r\nN IA 172.16.0.0/24 [20] area: 0.0.0.0\r\n via 10.0.3.1, r1-eth1\r\nN IA 172.16.1.0/24 [30] area: 0.0.0.0\r\n via 10.0.3.1, r1-eth1\r\n\r\n============ OSPF router routing table =============\r\nR 10.0.255.2 [10] area: 0.0.0.0, ASBR\r\n via 10.0.3.3, r1-eth1\r\nR 10.0.255.3 [10] area: 0.0.0.0, ABR, ASBR\r\n via 10.0.3.1, r1-eth1\r\nR 10.0.255.4 IA [20] area: 0.0.0.0, ASBR\r\n via 10.0.3.1, r1-eth1\r\n\r\n============ OSPF external routing table ===========\r\n\r\n\r\n'
- (Pdb) tgen.mininet_cli()
- *** Starting CLI:
- mininet>
+ (Pdb) tgen.cli()
+ unet>
-To enable more debug messages in other Topogen subsystems (like Mininet), more
+To enable more debug messages in other Topogen subsystems, more
logging messages can be displayed by modifying the test configuration file
``pytest.ini``:
objects. If you encounter existing apis that *could* be ``const``,
consider including changes in your own pull-request.
+Help with specific warnings
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+FRR's configure script enables a whole batch of extra warnings, some of which
+may not be obvious in how to fix. Here are some notes on specific warnings:
+
+* ``-Wstrict-prototypes``: you probably just forgot the ``void`` in a function
+ declaration with no parameters, i.e. ``static void foo() {...}`` rather than
+ ``static void foo(void) {...}``.
+
+ Without the ``void``, in C, it's a function with *unspecified* parameters
+ (and varargs calling convention.) This is a notable difference to C++, where
+ the ``void`` is optional and an empty parameter list means no parameters.
+
.. _documentation:
.. clicmd:: distance bgp (1-255) (1-255) (1-255)
- This command change distance value of BGP. The arguments are the distance
- values for for external routes, internal routes and local routes
+ This command changes distance value of BGP. The arguments are the distance
+ values for external routes, internal routes and local routes
respectively.
.. clicmd:: distance (1-255) A.B.C.D/M
RIB entries 7, using 1344 bytes of memory
Peers 2, using 43 KiB of memory
- Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt
- 192.168.0.2 4 65002 8 10 0 0 0 00:03:09 5 (Policy)
- fe80:1::2222 4 65002 9 11 0 0 0 00:03:09 (Policy) (Policy)
+ Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc
+ 192.168.0.2 4 65002 8 10 0 0 0 00:03:09 5 (Policy) N/A
+ fe80:1::2222 4 65002 9 11 0 0 0 00:03:09 (Policy) (Policy) N/A
Additionally a `show bgp neighbor` command would indicate in the `For address family:`
block that:
.. clicmd:: bgp graceful-restart
- This command will enable BGP graceful restart ifunctionality at the global
+ This command will enable BGP graceful restart functionality at the global
level.
.. clicmd:: bgp graceful-restart disable
.. clicmd:: neighbor A.B.C.D graceful-restart
- This command will enable BGP graceful restart ifunctionality at the peer
+ This command will enable BGP graceful restart functionality at the peer
level.
.. clicmd:: neighbor A.B.C.D graceful-restart-helper
This configuration demonstrates how the 'no bgp default ipv4-unicast' might
be used in a setup with two upstreams where each of the upstreams should only
- receive either IPv4 or IPv6 annocuments.
+ receive either IPv4 or IPv6 announcements.
Using the ``bgp default ipv6-unicast`` configuration, IPv6 unicast
address family is enabled by default for all new neighbors.
This feature is used to enable read-only mode on BGP process restart or when
a BGP process is cleared using 'clear ip bgp \*'. Note that this command is
- configured under the specific bgp instance/vrf that the feaure is enabled for.
+ configured under the specific bgp instance/vrf that the feature is enabled for.
It cannot be used at the same time as the global "bgp update-delay" described
above, which is entered at the global level and applies to all bgp instances.
The global and per-vrf approaches to defining update-delay are mutually
limit is set to 100 by default. Increasing this value will really be
possible if more file descriptors are available in the BGP process. This
value is defined by the underlying system (ulimit value), and can be
- overriden by `--limit-fds`. More information is available in chapter
+ overridden by `--limit-fds`. More information is available in chapter
(:ref:`common-invocation-options`).
.. clicmd:: coalesce-time (0-4294967295)
Allow peerings between directly connected eBGP peers using loopback
addresses.
+.. clicmd:: neighbor PEER disable-link-bw-encoding-ieee
+
+ By default bandwidth in extended communities is carried encoded as IEEE
+ floating-point format, which is according to the draft.
+
+ Older versions have the implementation where extended community bandwidth
+ value is carried encoded as uint32. To enable backward compatibility we
+ need to disable IEEE floating-point encoding option per-peer.
+
.. clicmd:: neighbor PEER ebgp-multihop
Specifying ``ebgp-multihop`` allows sessions with eBGP neighbors to
can't connect them directly. This is an alternative to
`neighbor WORD as-override`.
- The parameter `(1-10)` configures the amount of accepted occurences of the
+ The parameter `(1-10)` configures the amount of accepted occurrences of the
system AS number in AS path.
The parameter `origin` configures BGP to only accept routes originated with
Configure BGP to send best known paths to neighbor in order to preserve multi
path capabilities inside a network.
+.. clicmd:: neighbor <A.B.C.D|X:X::X:X|WORD> disable-addpath-rx
+
+ Do not accept additional paths from this neighbor.
+
.. clicmd:: neighbor PEER ttl-security hops NUMBER
This command enforces Generalized TTL Security Mechanism (GTSM), as
Set keepalive and hold timers for a neighbor. The first value is keepalive
and the second is hold time.
-.. clicmd:: neighbor PEER connect (1-65535)
+.. clicmd:: neighbor PEER timers connect (1-65535)
Set connect timer for a neighbor. The connect timer controls how long BGP
waits between connection attempts to a neighbor.
default, the DelayOpenTimer is disabled. The timer interval may be set to a
duration of 1 to 240 seconds.
+.. clicmd:: bgp minimum-holdtime (1-65535)
+
+ This command allows user to prevent session establishment with BGP peers
+ with lower holdtime less than configured minimum holdtime.
+ When this command is not set, minimum holdtime does not work.
+
Displaying Information about Peers
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. clicmd:: match extcommunity WORD
+.. clicmd:: set extcommunity none
+
+ This command resets the extended community value in BGP updates. If the attribute is
+ already configured or received from the peer, the attribute is discarded and set to
+ none. This is useful if you need to strip incoming extended communities.
+
.. clicmd:: set extcommunity rt EXTCOMMUNITY
This command set Route Target value.
SAFI RIB of the default VRF for use in MPLS-based L3VPNs. Unicast routes may
also be leaked between any VRFs (including the unicast RIB of the default BGP
instanced). A shortcut syntax is also available for specifying leaking from one
-VRF to another VRF using the default instance's VPN RIB as the intemediary. A
+VRF to another VRF using the default instance's VPN RIB as the intermediary. A
common application of the VRF-VRF feature is to connect a customer's private
routing domain to a provider's VPN service. Leaking is configured from the
point of view of an individual VRF: ``import`` refers to routes leaked from VPN
.. clicmd:: advertise-pip [ip <addr> [mac <addr>]]
-Enables or disables advertise-pip feature, specifiy system-IP and/or system-MAC
+Enables or disables advertise-pip feature, specify system-IP and/or system-MAC
parameters.
EVPN advertise-svi-ip
1. This feature is supported for asymmetric routing model only. While
sending packets to SN1, ingress PE (PE2) performs routing and
egress PE (PE1) performs only bridging.
-2. This feature supports only tratitional(non vlan-aware) bridge model. Bridge
+2. This feature supports only traditional(non vlan-aware) bridge model. Bridge
interface associated with L2VNI is an L3 interface. i.e., this interface is
configured with an address in the L2VNI subnet. Note that the gateway IP
should also have an address in the same subnet.
Ethernet Segments
"""""""""""""""""
An Ethernet Segment can be configured by specifying a system-MAC and a
-local discriminatior against the bond interface on the PE (via zebra) -
+local discriminator against the bond interface on the PE (via zebra) -
.. clicmd:: evpn mh es-id (1-16777215)
BUM traffic is rxed via the overlay by all PEs attached to a server but
only the DF can forward the de-capsulated traffic to the access port. To
-accomodate that non-DF filters are installed in the dataplane to drop
+accommodate that non-DF filters are installed in the dataplane to drop
the traffic.
Similarly traffic received from ES peers via the overlay cannot be forwarded
.. clicmd:: show bgp [afi] [safi] [all] summary failed [json]
- Show a bgp peer summary for peers that are not succesfully exchanging routes
+ Show a bgp peer summary for peers that are not successfully exchanging routes
for the specified address family, and subsequent address-family.
.. clicmd:: show bgp [afi] [safi] [all] summary established [json]
- Show a bgp peer summary for peers that are succesfully exchanging routes
+ Show a bgp peer summary for peers that are successfully exchanging routes
for the specified address family, and subsequent address-family.
.. clicmd:: show bgp [afi] [safi] [all] summary neighbor [PEER] [json]
If ``summary`` option is specified, output is a summary of the counts for
the chunks, inuse, ledger and requests list along with the count of
- outstanding chunk requests to Zebra and the nummber of zebra reconnects
+ outstanding chunk requests to Zebra and the number of zebra reconnects
that have happened
If ``json`` option is specified, output is displayed in JSON format.
.. [bgp-route-osci-cond] McPherson, D. and Gill, V. and Walton, D., "Border Gateway Protocol (BGP) Persistent Route Oscillation Condition", IETF RFC3345
.. [stable-flexible-ibgp] Flavel, A. and M. Roughan, "Stable and flexible iBGP", ACM SIGCOMM 2009
.. [ibgp-correctness] Griffin, T. and G. Wilfong, "On the correctness of IBGP configuration", ACM SIGCOMM 2002
+
+.. _bgp-fast-convergence:
+
+BGP fast-convergence support
+============================
+Whenever BGP peer address becomes unreachable we must bring down the BGP
+session immediately. Currently only single-hop EBGP sessions are brought
+down immediately.IBGP and multi-hop EBGP sessions wait for hold-timer
+expiry to bring down the sessions.
+
+This new configuration option helps user to teardown BGP sessions immediately
+whenever peer becomes unreachable.
+
+.. clicmd:: bgp fast-convergence
+
+This configuration is available at the bgp level. When enabled, configuration
+is applied to all the neighbors configured in that bgp instance.
+
+.. code-block:: frr
+
+ router bgp 64496
+ neighbor 10.0.0.2 remote-as 64496
+ neighbor fd00::2 remote-as 64496
+ bgp fast-convergence
+ !
+ address-family ipv4 unicast
+ redistribute static
+ exit-address-family
+ !
+ address-family ipv6 unicast
+ neighbor fd00::2 activate
+ exit-address-family
The `not-advertise` option, when present, prevents the summary route from
being advertised, effectively filtering the summarized routes.
-.. clicmd:: area A.B.C.D nssa
+.. clicmd:: area A.B.C.D nssa [no-summary]
-.. clicmd:: area (0-4294967295) nssa
+.. clicmd:: area (0-4294967295) nssa [no-summary]
Configure the area to be a NSSA (Not-So-Stubby Area).
4. Support for NSSA Translator functionality when there are multiple NSSA
ABR in an area.
+ An NSSA ABR can be configured with the `no-summary` option to prevent the
+ advertisement of summaries into the area. In that case, a single Type-3 LSA
+ containing a default route is originated into the NSSA.
+
.. _ospf6-interface:
OSPF6 interface
Redistribute routes to OSPF6
============================
-.. clicmd:: redistribute <babel|bgp|connected|isis|kernel|openfabric|ripng|sharp|static|table> [route-map WORD]
+.. clicmd:: redistribute <babel|bgp|connected|isis|kernel|openfabric|ripng|sharp|static|table> [metric-type (1-2)] [metric (0-16777214)] [route-map WORD]
- Redistribute routes from other protocols into OSPFv3.
+ Redistribute routes of the specified protocol or kind into OSPFv3, with the
+ metric type and metric set if specified, filtering the routes using the
+ given route-map if specified.
.. clicmd:: default-information originate [{always|metric (0-16777214)|metric-type (1-2)|route-map WORD}]
The following commands configure one or multiple cache servers.
-.. clicmd:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [SSH_PUBKEY_PATH] [KNOWN_HOSTS_PATH] PREFERENCE
+.. clicmd:: rpki cache (A.B.C.D|WORD) PORT [SSH_USERNAME] [SSH_PRIVKEY_PATH] [SSH_PUBKEY_PATH] [KNOWN_HOSTS_PATH] [source A.B.C.D] PREFERENCE
Add a cache server to the socket. By default, the connection between router
on the configuration of the operating system environment, usually
:file:`~/.ssh/known_hosts`.
+ source A.B.C.D
+ Source address of the RPKI connection to access cache server.
+
.. _validating-bgp-updates:
rpki polling_period 1000
rpki timeout 10
! SSH Example:
- rpki cache example.com 22 rtr-ssh ./ssh_key/id_rsa ./ssh_key/id_rsa.pub preference 1
+ rpki cache example.com source 141.22.28.223 22 rtr-ssh ./ssh_key/id_rsa ./ssh_key/id_rsa.pub preference 1
! TCP Example:
rpki cache rpki-validator.realmv6.org 8282 preference 2
exit
network 192.168.0.0/16
neighbor 123.123.123.0 remote-as 60002
neighbor 123.123.123.0 route-map rpki in
+ neighbor 123.123.123.0 update-source 141.22.28.223
!
address-family ipv6
neighbor 123.123.123.0 activate
- neighbor 123.123.123.0 route-map rpki in
+ neighbor 123.123.123.0 route-map rpki in
exit-address-family
!
route-map rpki permit 10
+# syntax=docker/dockerfile:1
+
+# Create a basic stage set up to build APKs
+FROM alpine:3.13 as alpine-builder
+RUN apk add \
+ --update-cache \
+ abuild \
+ alpine-conf \
+ alpine-sdk \
+ && setup-apkcache /var/cache/apk \
+ && mkdir -p /pkgs/apk \
+ && echo 'builder ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+RUN adduser -D -G abuild builder && su builder -c 'abuild-keygen -a -n'
+
+# This stage builds a libyang APK from source
+FROM alpine-builder as libyang-builder
+RUN mkdir -p /libyang && chown -R builder /pkgs /libyang
+COPY docker/alpine/libyang/ /libyang
+USER builder
+RUN cd /libyang \
+ && abuild checksum \
+ && abuild -r -P /pkgs/apk
+
# This stage builds a dist tarball from the source
FROM alpine:3.13 as source-builder
--update-cache \
$makedepends \
gzip \
+ py-pip \
&& pip install pytest
+RUN mkdir -p /pkgs/apk
+COPY --from=libyang-builder /pkgs/apk/ /pkgs/apk/
+RUN apk add \
+ --no-cache \
+ --allow-untrusted /pkgs/apk/*/*.apk
+
COPY . /src
ARG PKGVER
RUN cd /src \
--with-pkg-extra-version="_git$PKGVER" \
&& make dist
-# This stage builds an apk from the dist tarball
-FROM alpine:3.13 as alpine-builder
-# Don't use nocache here so that abuild can use the cache
-RUN apk add \
- --update-cache \
- abuild \
- alpine-conf \
- alpine-sdk \
- py-pip \
- && pip install pytest \
- && setup-apkcache /var/cache/apk \
- && mkdir -p /pkgs/apk \
- && echo 'builder ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
-
+# This stage builds an APK from the dist tarball
+FROM alpine-builder as frr-apk-builder
+COPY --from=libyang-builder /pkgs/apk/ /pkgs/apk/
COPY --from=source-builder /src/frr-*.tar.gz /src/alpine/* /dist/
-RUN adduser -D -G abuild builder && chown -R builder /dist /pkgs
+RUN find /pkgs/apk -type f -name APKINDEX.tar.gz -delete
+RUN apk add \
+ --no-cache \
+ --allow-untrusted /pkgs/apk/*/*.apk
+RUN chown -R builder /dist /pkgs
USER builder
RUN cd /dist \
- && abuild-keygen -a -n \
&& abuild checksum \
&& git init \
&& abuild -r -P /pkgs/apk
# This stage installs frr from the apk
FROM alpine:3.13
RUN mkdir -p /pkgs/apk
-COPY --from=alpine-builder /pkgs/apk/ /pkgs/apk/
+COPY --from=frr-apk-builder /pkgs/apk/ /pkgs/apk/
RUN apk add \
--no-cache \
--update-cache \
#!/bin/ash
+if [ -r "/lib/lsb/init-functions" ]; then
+ . /lib/lsb/init-functions
+else
+ log_success_msg() {
+ echo "$@"
+ }
+ log_warning_msg() {
+ echo "$@" >&2
+ }
+ log_failure_msg() {
+ echo "$@" >&2
+ }
+fi
+
source /usr/lib/frr/frrcommon.sh
/usr/lib/frr/watchfrr $(daemon_list)
--- /dev/null
+From 8f4907590afbe3eafabcf5b461c0ae51b65c3a37 Mon Sep 17 00:00:00 2001
+From: Michal Vasko <mvasko@cesnet.cz>
+Date: Thu, 10 Jun 2021 15:07:02 +0200
+Subject: [PATCH] libyang BUGFIX do not include non-standard headers
+
+Fixes #1614
+---
+ src/context.c | 1 -
+ src/diff.c | 1 -
+ src/log.c | 1 -
+ src/out.c | 1 -
+ src/plugins_types.c | 1 -
+ src/plugins_types/bits.c | 1 -
+ src/plugins_types/date_and_time.c | 1 -
+ src/plugins_types/identityref.c | 1 -
+ src/plugins_types/integer.c | 1 -
+ src/plugins_types/ipv4_address.c | 1 -
+ src/plugins_types/ipv4_address_no_zone.c | 1 -
+ src/plugins_types/ipv4_prefix.c | 1 -
+ src/plugins_types/ipv6_address.c | 1 -
+ src/plugins_types/ipv6_address_no_zone.c | 1 -
+ src/plugins_types/ipv6_prefix.c | 1 -
+ src/plugins_types/union.c | 1 -
+ src/schema_compile_node.c | 1 -
+ src/tree_data_helpers.c | 1 -
+ src/tree_schema.c | 1 -
+ src/validation.c | 1 -
+ src/xpath.c | 1 -
+ tools/re/main.c | 1 -
+ 22 files changed, 22 deletions(-)
+
+diff --git a/src/context.c b/src/context.c
+index eb671255..ac62cac5 100644
+--- a/src/context.c
++++ b/src/context.c
+@@ -17,7 +17,6 @@
+ #define _XOPEN_SOURCE 1
+ #define _XOPEN_SOURCE_EXTENDED 1
+ #endif
+-#include <sys/cdefs.h>
+
+ #include "context.h"
+
+diff --git a/src/diff.c b/src/diff.c
+index b40dd73a..4971c6fe 100644
+--- a/src/diff.c
++++ b/src/diff.c
+@@ -12,7 +12,6 @@
+ * https://opensource.org/licenses/BSD-3-Clause
+ */
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "diff.h"
+
+diff --git a/src/log.c b/src/log.c
+index 97c7b283..9cd5fd0d 100644
+--- a/src/log.c
++++ b/src/log.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "log.h"
+
+diff --git a/src/out.c b/src/out.c
+index 37beb696..898d663a 100644
+--- a/src/out.c
++++ b/src/out.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "out.h"
+ #include "out_internal.h"
+diff --git a/src/plugins_types.c b/src/plugins_types.c
+index 26bac210..a2cf0f38 100644
+--- a/src/plugins_types.c
++++ b/src/plugins_types.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/bits.c b/src/plugins_types/bits.c
+index 9d086ffb..ef87691b 100644
+--- a/src/plugins_types/bits.c
++++ b/src/plugins_types/bits.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/date_and_time.c b/src/plugins_types/date_and_time.c
+index 0d52dbb1..a23caaa9 100644
+--- a/src/plugins_types/date_and_time.c
++++ b/src/plugins_types/date_and_time.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/identityref.c b/src/plugins_types/identityref.c
+index 90546d69..91ddbde2 100644
+--- a/src/plugins_types/identityref.c
++++ b/src/plugins_types/identityref.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/integer.c b/src/plugins_types/integer.c
+index 44e87f99..bf2b7812 100644
+--- a/src/plugins_types/integer.c
++++ b/src/plugins_types/integer.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/ipv4_address.c b/src/plugins_types/ipv4_address.c
+index a95752ea..a7369d6b 100644
+--- a/src/plugins_types/ipv4_address.c
++++ b/src/plugins_types/ipv4_address.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/ipv4_address_no_zone.c b/src/plugins_types/ipv4_address_no_zone.c
+index a17a7efe..1fb34b06 100644
+--- a/src/plugins_types/ipv4_address_no_zone.c
++++ b/src/plugins_types/ipv4_address_no_zone.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/ipv4_prefix.c b/src/plugins_types/ipv4_prefix.c
+index 3108b2c5..6fb93390 100644
+--- a/src/plugins_types/ipv4_prefix.c
++++ b/src/plugins_types/ipv4_prefix.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/ipv6_address.c b/src/plugins_types/ipv6_address.c
+index c0d20fa4..d09425b3 100644
+--- a/src/plugins_types/ipv6_address.c
++++ b/src/plugins_types/ipv6_address.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/ipv6_address_no_zone.c b/src/plugins_types/ipv6_address_no_zone.c
+index c612b663..06bd1891 100644
+--- a/src/plugins_types/ipv6_address_no_zone.c
++++ b/src/plugins_types/ipv6_address_no_zone.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/ipv6_prefix.c b/src/plugins_types/ipv6_prefix.c
+index b3ad34b6..91431fef 100644
+--- a/src/plugins_types/ipv6_prefix.c
++++ b/src/plugins_types/ipv6_prefix.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/plugins_types/union.c b/src/plugins_types/union.c
+index a8ec43b3..89e81c7a 100644
+--- a/src/plugins_types/union.c
++++ b/src/plugins_types/union.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* strdup */
+-#include <sys/cdefs.h>
+
+ #include "plugins_types.h"
+
+diff --git a/src/schema_compile_node.c b/src/schema_compile_node.c
+index 424b7f8f..273023de 100644
+--- a/src/schema_compile_node.c
++++ b/src/schema_compile_node.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "schema_compile_node.h"
+
+diff --git a/src/tree_data_helpers.c b/src/tree_data_helpers.c
+index 488efbbb..2d9ba624 100644
+--- a/src/tree_data_helpers.c
++++ b/src/tree_data_helpers.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include <assert.h>
+ #include <ctype.h>
+diff --git a/src/tree_schema.c b/src/tree_schema.c
+index 93f29796..4a57cc47 100644
+--- a/src/tree_schema.c
++++ b/src/tree_schema.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "tree_schema.h"
+
+diff --git a/src/validation.c b/src/validation.c
+index b9eda810..e2062256 100644
+--- a/src/validation.c
++++ b/src/validation.c
+@@ -12,7 +12,6 @@
+ * https://opensource.org/licenses/BSD-3-Clause
+ */
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "validation.h"
+
+diff --git a/src/xpath.c b/src/xpath.c
+index b68a76b8..ea1cdfc9 100644
+--- a/src/xpath.c
++++ b/src/xpath.c
+@@ -12,7 +12,6 @@
+ * https://opensource.org/licenses/BSD-3-Clause
+ */
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include "xpath.h"
+
+diff --git a/tools/re/main.c b/tools/re/main.c
+index b512ad80..4d8aa99c 100644
+--- a/tools/re/main.c
++++ b/tools/re/main.c
+@@ -13,7 +13,6 @@
+ */
+
+ #define _GNU_SOURCE /* asprintf, strdup */
+-#include <sys/cdefs.h>
+
+ #include <errno.h>
+ #include <getopt.h>
+--
+2.31.1
+
--- /dev/null
+From 2054431ea3024b177083f09c66c1bb4c3d08b048 Mon Sep 17 00:00:00 2001
+From: Wesley Coakley <w@wesleycoakley.com>
+Date: Wed, 16 Jun 2021 00:30:50 -0400
+Subject: [PATCH] don't attempt to parse dlerror() in utests
+
+---
+ tests/utests/basic/test_plugins.c | 17 -----------------
+ 1 file changed, 17 deletions(-)
+
+diff --git a/tests/utests/basic/test_plugins.c b/tests/utests/basic/test_plugins.c
+index fd9e6130..662fd9b4 100644
+--- a/tests/utests/basic/test_plugins.c
++++ b/tests/utests/basic/test_plugins.c
+@@ -36,23 +36,6 @@ static void
+ test_add_invalid(void **state)
+ {
+ assert_int_equal(LY_ESYS, lyplg_add(TESTS_BIN "/plugins/plugin_does_not_exist" LYPLG_SUFFIX));
+-
+-#ifdef __APPLE__
+- CHECK_LOG("Loading \""TESTS_BIN "/plugins/plugin_does_not_exist" LYPLG_SUFFIX "\" as a plugin failed "
+- "(dlopen("TESTS_BIN "/plugins/plugin_does_not_exist" LYPLG_SUFFIX ", 2): image not found).", NULL);
+-#else
+- CHECK_LOG("Loading \""TESTS_BIN "/plugins/plugin_does_not_exist" LYPLG_SUFFIX "\" as a plugin failed "
+- "("TESTS_BIN "/plugins/plugin_does_not_exist" LYPLG_SUFFIX ": cannot open shared object file: "
+- "No such file or directory).", NULL);
+-#endif
+-
+- assert_int_equal(LY_EINVAL, lyplg_add(TESTS_BIN "/plugins/plugin_invalid" LYPLG_SUFFIX));
+-#ifndef __APPLE__
+- /* OS X prints address of the symbol being searched and cmocka doesn't support wildcards in string checking assert */
+- CHECK_LOG("Processing user type plugin \""TESTS_BIN "/plugins/plugin_invalid"LYPLG_SUFFIX "\" failed, "
+- "missing type plugins information ("TESTS_BIN "/plugins/plugin_invalid"LYPLG_SUFFIX ": "
+- "undefined symbol: plugins_types__).", NULL);
+-#endif
+ }
+
+ static void
+--
+2.31.1
+
--- /dev/null
+# Contributor: Sören Tempel <soeren+alpine@soeren-tempel.net>
+# Maintainer: Christian Franke <nobody@nowhere.ws>
+pkgname=libyang
+pkgver=2.0.7
+pkgrel=0
+pkgdesc="YANG data modelling language parser and toolkit"
+url="https://github.com/CESNET/libyang"
+arch="all"
+license="BSD-3-Clause-Clear"
+makedepends="bison cmake cmocka-dev flex pcre2-dev"
+checkdepends="expect grep shunit2"
+subpackages="$pkgname-dev $pkgname-doc"
+source="$pkgname-$pkgver.tar.gz::https://github.com/CESNET/libyang/archive/v$pkgver.tar.gz
+ 10-remove-non-standard-headers.patch
+ 11-utest-dont-parse-dlerror.patch"
+
+# secfixes:
+# 1.0.215-r1:
+# - CVE-2021-28902
+# - CVE-2021-28903
+# - CVE-2021-28904
+# - CVE-2021-28905
+# - CVE-2021-28906
+
+build() {
+ if [ "$CBUILD" != "$CHOST" ]; then
+ CMAKE_CROSSOPTS="-DCMAKE_SYSTEM_NAME=Linux -DCMAKE_HOST_SYSTEM_NAME=Linux"
+ fi
+ cmake -B build \
+ -DCMAKE_BUILD_TYPE=None \
+ -DCMAKE_INSTALL_PREFIX=/usr \
+ -DCMAKE_INSTALL_LIBDIR=lib \
+ -DBUILD_SHARED_LIBS=True \
+ -DCMAKE_C_FLAGS="$CFLAGS" \
+ -DENABLE_BUILD_TESTS=ON \
+ "$CMAKE_CROSSOPTS"
+ make -C build
+}
+
+package() {
+ make -C build DESTDIR="$pkgdir" install
+}
+
+sha512sums="edb1d8d372b25ed820fa312e0dc96d4af7c8cd5ddeb785964de73f64774062ea7a5586bb27e2039ad24189d4a2ba04268921ca86e82423fc48647d1d10a2a0a7 libyang-2.0.7.tar.gz
+385008c715e6b0dc9e8f33c9cb550b3af7ee16f056f35d09a4ba01b9e00ddb88940915f93fc608fedd30b4f9a6a1503df414ae0be64b1263681b0ee18e6f4db8 10-remove-non-standard-headers.patch
+b16881d301a6aec68fbe6bfb7ba53a8fcdb4b9eead3b03573e0e2a4a8c3c3d6962db623be14d29c023b5a7ad0f685da1f6033dd9985f7a2914ad2f4da07e60cb 11-utest-dont-parse-dlerror.patch"
RUN apt update && \
apt-get install -y \
git autoconf automake libtool make libreadline-dev texinfo \
- pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
+ libc-ares-dev python3-dev python3-sphinx \
install-info build-essential libsnmp-dev perl libcap-dev \
libelf-dev \
sudo gdb iputils-ping time \
- mininet python-pip iproute2 iperf && \
- pip install ipaddr && \
- pip install "pytest<5" && \
- pip install "scapy>=2.4.2" && \
- pip install exabgp==3.4.17
+ python-pip net-tools iproute2 && \
+ python3 -m pip install wheel && \
+ python3 -m pip install pytest && \
+ python3 -m pip install pytest-xdist && \
+ python3 -m pip install "scapy>=2.4.2" && \
+ python3 -m pip install xmltodict && \
+ python2 -m pip install 'exabgp<4.0.0'
RUN groupadd -r -g 92 frr && \
groupadd -r -g 85 frrvty && \
RUN apt update && \
apt-get install -y \
git autoconf automake libtool make libreadline-dev texinfo \
- pkg-config libpam0g-dev libjson-c-dev bison flex python3-pytest \
- libc-ares-dev python3-dev python-ipaddress python3-sphinx \
+ pkg-config libpam0g-dev libjson-c-dev bison flex python3-pip \
+ libc-ares-dev python3-dev python3-sphinx \
install-info build-essential libsnmp-dev perl \
libcap-dev python2 libelf-dev \
sudo gdb curl iputils-ping time \
libgrpc++-dev libgrpc-dev protobuf-compiler-grpc \
lua5.3 liblua5.3-dev \
- mininet iproute2 iperf && \
+ net-tools iproute2 && \
curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output /tmp/get-pip.py && \
python2 /tmp/get-pip.py && \
rm -f /tmp/get-pip.py && \
- pip2 install ipaddr && \
- pip2 install "pytest<5" && \
- pip2 install "scapy>=2.4.2" && \
- pip2 install exabgp==3.4.17
+ python3 -m pip install wheel && \
+ python3 -m pip install pytest && \
+ python3 -m pip install pytest-xdist && \
+ python3 -m pip install "scapy>=2.4.2" && \
+ python3 -m pip install xmltodict && \
+ python2 -m pip install 'exabgp<4.0.0'
RUN groupadd -r -g 92 frr && \
groupadd -r -g 85 frrvty && \
void eigrp_cli_show_end_header(struct vty *vty, struct lyd_node *dnode)
{
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
install_element(EIGRP_NODE, &eigrp_neighbor_cmd);
install_element(EIGRP_NODE, &eigrp_redistribute_source_metric_cmd);
- vrf_cmd_init(NULL, &eigrpd_privs);
+ vrf_cmd_init(NULL);
if_cmd_init(eigrp_write_interface);
break;
default:
frr_help_exit(1);
- break;
}
}
{
struct eigrp *eigrp;
const char *vrf;
+ struct vrf *pVrf;
vrf_id_t vrfid;
switch (args->event) {
break;
case NB_EV_PREPARE:
vrf = yang_dnode_get_string(args->dnode, "./vrf");
- vrfid = vrf_name_to_id(vrf);
+
+ pVrf = vrf_lookup_by_name(vrf);
+ if (pVrf)
+ vrfid = pVrf->vrf_id;
+ else
+ vrfid = VRF_DEFAULT;
eigrp = eigrp_get(yang_dnode_get_uint16(args->dnode, "./asn"),
vrfid);
struct eigrp *eigrp;
uint32_t proto;
vrf_id_t vrfid;
+ struct vrf *pVrf;
switch (args->event) {
case NB_EV_VALIDATE:
proto = yang_dnode_get_enum(args->dnode, "./protocol");
vrfname = yang_dnode_get_string(args->dnode, "../vrf");
- vrfid = vrf_name_to_id(vrfname);
+
+ pVrf = vrf_lookup_by_name(vrfname);
+ if (pVrf)
+ vrfid = pVrf->vrf_id;
+ else
+ vrfid = VRF_DEFAULT;
+
if (vrf_bitmap_check(zclient->redist[AFI_IP][proto], vrfid))
return NB_ERR_INCONSISTENCY;
break;
write += hook_call(isis_circuit_config_write,
circuit, vty);
}
- vty_endframe(vty, "!\n");
+ vty_endframe(vty, "exit\n!\n");
}
return write;
vty_out(vty, "\n");
}
+void cli_show_router_isis_end(struct vty *vty, struct lyd_node *dnode)
+{
+ vty_out(vty, "exit\n");
+}
+
/*
* XPath: /frr-interface:lib/interface/frr-isisd:isis/
* XPath: /frr-interface:lib/interface/frr-isisd:isis/ipv4-routing
}
/*
- * Build a list of LSPs with non-zero ht bounded by start and stop ids
+ * Build a list of LSPs with non-zero ht and seqno bounded by start and stop ids
*/
void lsp_build_list_nonzero_ht(struct lspdb_head *head, const uint8_t *start_id,
const uint8_t *stop_id, struct list *list)
ISIS_SYS_ID_LEN + 2) > 0)
break;
- if (lsp->hdr.rem_lifetime)
+ if (lsp->hdr.rem_lifetime && lsp->hdr.seqno)
listnode_add(list, lsp);
}
}
break;
default:
frr_help_exit(1);
- break;
}
}
.xpath = "/frr-isisd:isis/instance",
.cbs = {
.cli_show = cli_show_router_isis,
+ .cli_show_end = cli_show_router_isis_end,
.create = isis_instance_create,
.destroy = isis_instance_destroy,
},
/* Optional 'cli_show' callbacks. */
void cli_show_router_isis(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
+void cli_show_router_isis_end(struct vty *vty, struct lyd_node *dnode);
void cli_show_ip_isis_ipv4(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
void cli_show_ip_isis_ipv6(struct vty *vty, struct lyd_node *dnode,
*
* 2. I could be replaced in unit test environment
*/
-#ifndef ISIS_SNMP_HAVE_TIME_FUNC
-static uint32_t isis_snmp_time(void)
-{
- return (uint32_t)time(NULL);
-}
-
-#endif
/* ISIS-MIB instances. */
static oid isis_oid[] = {ISIS_MIB};
struct isis_circuit *circuit;
uint32_t up_ticks;
uint32_t delta_ticks;
- uint32_t now_time;
+ time_t now_time;
int res;
*write_method = NULL;
return SNMP_INTEGER(0);
up_ticks = (uint32_t)netsnmp_get_agent_uptime();
- now_time = isis_snmp_time();
+ now_time = time(NULL);
if (circuit->last_uptime >= now_time)
return SNMP_INTEGER(up_ticks);
oid *oid_idx;
size_t oid_idx_len;
int res;
- uint32_t val;
+ time_t val;
struct isis_adjacency *adj;
uint32_t up_ticks;
uint32_t delta_ticks;
- uint32_t now_time;
+ time_t now_time;
*write_method = NULL;
* It seems that we want remaining timer
*/
if (adj->last_upd != 0) {
- val = isis_snmp_time();
+ val = time(NULL);
if (val < (adj->last_upd + adj->hold_time))
return SNMP_INTEGER(adj->last_upd
+ adj->hold_time - val);
up_ticks = (uint32_t)netsnmp_get_agent_uptime();
- now_time = isis_snmp_time();
+ now_time = time(NULL);
if (adj->last_flap >= now_time)
return SNMP_INTEGER(up_ticks);
if (isis == NULL || !isis->snmp_notifications || !smux_enabled())
return 0;
- time_now = isis_snmp_time();
+ time_now = time(NULL);
if ((isis_snmp_trap_timestamp[trap_id] + 5) > time_now)
/* Throttle trap rate at 1 in 5 secs */
vrf_init(isis_vrf_new, isis_vrf_enable, isis_vrf_disable,
isis_vrf_delete, isis_vrf_enable);
- vrf_cmd_init(NULL, &isisd_privs);
+ vrf_cmd_init(NULL);
}
void isis_terminate()
write += area_write_mt_settings(area, vty);
write += fabricd_write_settings(area, vty);
+
+ vty_out(vty, "exit\n");
}
}
ldpTrapSession(nbr, LDPSESSIONDOWN);
}
-static int ldp_snmp_agentx_enabled()
+static int ldp_snmp_agentx_enabled(void)
{
main_imsg_compose_both(IMSG_AGENTX_ENABLED, NULL, 0);
ia->hello_interval != 0)
vty_out (vty, " discovery hello interval %u\n",
ia->hello_interval);
+
+ vty_out (vty, " exit\n");
}
}
ldp_af_config_write(vty, AF_INET, ldpd_conf, &ldpd_conf->ipv4);
ldp_af_config_write(vty, AF_INET6, ldpd_conf, &ldpd_conf->ipv6);
vty_out (vty, " !\n");
+ vty_out (vty, "exit\n");
vty_out (vty, "!\n");
return (1);
" ! Incomplete config, specify a neighbor lsr-id\n");
if (missing_pwid)
vty_out (vty," ! Incomplete config, specify a pw-id\n");
+
+ vty_out (vty, " exit\n");
}
static int
ldp_l2vpn_pw_config_write(vty, pw);
vty_out (vty, " !\n");
+ vty_out (vty, "exit\n");
vty_out (vty, "!\n");
}
break;
default:
frr_help_exit(1);
- break;
}
}
if (args->ifnamelen)
stream_put(s, args->ifname, args->ifnamelen);
}
+
+ /* Send the C bit indicator. */
+ stream_putc(s, args->cbit);
#endif /* HAVE_BFDD */
/* Finish the message by writing the size. */
item(JOIN_TKN),
item(START_TKN),
item(END_TKN),
+ item(NEG_ONLY_TKN),
{0},
};
/* clang-format on */
/* MUST eventually converge on CONFIG_NODE */
enum node_type node_parent(enum node_type node)
{
- enum node_type ret;
+ struct cmd_node *cnode;
assert(node > CONFIG_NODE);
- switch (node) {
- case BGP_VPNV4_NODE:
- case BGP_VPNV6_NODE:
- case BGP_FLOWSPECV4_NODE:
- case BGP_FLOWSPECV6_NODE:
- case BGP_VRF_POLICY_NODE:
- case BGP_VNC_DEFAULTS_NODE:
- case BGP_VNC_NVE_GROUP_NODE:
- case BGP_VNC_L2_GROUP_NODE:
- case BGP_IPV4_NODE:
- case BGP_IPV4M_NODE:
- case BGP_IPV4L_NODE:
- case BGP_IPV6_NODE:
- case BGP_IPV6M_NODE:
- case BGP_EVPN_NODE:
- case BGP_IPV6L_NODE:
- case BMP_NODE:
- ret = BGP_NODE;
- break;
- case BGP_EVPN_VNI_NODE:
- ret = BGP_EVPN_NODE;
- break;
- case KEYCHAIN_KEY_NODE:
- ret = KEYCHAIN_NODE;
- break;
- case LINK_PARAMS_NODE:
- ret = INTERFACE_NODE;
- break;
- case LDP_IPV4_NODE:
- case LDP_IPV6_NODE:
- ret = LDP_NODE;
- break;
- case LDP_IPV4_IFACE_NODE:
- ret = LDP_IPV4_NODE;
- break;
- case LDP_IPV6_IFACE_NODE:
- ret = LDP_IPV6_NODE;
- break;
- case LDP_PSEUDOWIRE_NODE:
- ret = LDP_L2VPN_NODE;
- break;
- case BFD_PEER_NODE:
- ret = BFD_NODE;
- break;
- case BFD_PROFILE_NODE:
- ret = BFD_NODE;
- break;
- case SR_TRAFFIC_ENG_NODE:
- ret = SEGMENT_ROUTING_NODE;
- break;
- case SR_SEGMENT_LIST_NODE:
- ret = SR_TRAFFIC_ENG_NODE;
- break;
- case SR_POLICY_NODE:
- ret = SR_TRAFFIC_ENG_NODE;
- break;
- case SR_CANDIDATE_DYN_NODE:
- ret = SR_POLICY_NODE;
- break;
- case PCEP_NODE:
- ret = SR_TRAFFIC_ENG_NODE;
- break;
- case PCEP_PCE_CONFIG_NODE:
- ret = PCEP_NODE;
- break;
- case PCEP_PCE_NODE:
- ret = PCEP_NODE;
- break;
- case PCEP_PCC_NODE:
- ret = PCEP_NODE;
- break;
- case SRV6_NODE:
- ret = SEGMENT_ROUTING_NODE;
- break;
- case SRV6_LOCS_NODE:
- ret = SRV6_NODE;
- break;
- case SRV6_LOC_NODE:
- ret = SRV6_LOCS_NODE;
- break;
- default:
- ret = CONFIG_NODE;
- break;
- }
+ cnode = vector_lookup(cmdvec, node);
- return ret;
+ return cnode->parent_node;
}
/* Execute command by argument vline vector. */
case START_TKN:
case JOIN_TKN:
+ case NEG_ONLY_TKN:
/* "<foo|bar> WORD" -> word is not "bar" or "foo" */
prevname = NULL;
break;
case JOIN_TKN:
color = "#ddaaff";
break;
+ case NEG_ONLY_TKN:
+ color = "#ffddaa";
+ break;
case WORD_TKN:
color = "#ffffff";
break;
JOIN_TKN, // marks subgraph end
START_TKN, // first token in line
END_TKN, // last token in line
+ NEG_ONLY_TKN, // filter token, match if "no ..." command
SPECIAL_TKN = FORK_TKN,
};
{VARIABLE} {yylval->string = XSTRDUP(MTYPE_LEX, yytext); return VARIABLE;}
{WORD} {yylval->string = XSTRDUP(MTYPE_LEX, yytext); return WORD;}
{RANGE} {yylval->string = XSTRDUP(MTYPE_LEX, yytext); return RANGE;}
+!\[ {yylval->string = NULL; return EXCL_BRACKET;}
. {return yytext[0];}
%%
/* matcher helper prototypes */
static int add_nexthops(struct list *, struct graph_node *,
- struct graph_node **, size_t);
+ struct graph_node **, size_t, bool);
static enum matcher_rv command_match_r(struct graph_node *, vector,
unsigned int, struct graph_node **,
static enum match_type match_mac(const char *, bool);
+static bool is_neg(vector vline, size_t idx)
+{
+ if (idx >= vector_active(vline))
+ return false;
+ return !strcmp(vector_slot(vline, idx), "no");
+}
+
enum matcher_rv command_match(struct graph *cmdgraph, vector vline,
struct list **argv, const struct cmd_element **el)
{
// get all possible nexthops
struct list *next = list_new();
- add_nexthops(next, start, NULL, 0);
+ add_nexthops(next, start, NULL, 0, is_neg(vline, 1));
// determine the best match
for (ALL_LIST_ELEMENTS_RO(next, ln, gn)) {
{
// pointer to next input token to match
char *input_token;
+ bool neg = is_neg(vline, 0);
struct list *
current =
// add all children of start node to list
struct graph_node *start = vector_slot(graph->nodes, 0);
- add_nexthops(next, start, &start, 0);
+ add_nexthops(next, start, &start, 0, neg);
unsigned int idx;
for (idx = 0; idx < vector_active(vline) && next->count > 0; idx++) {
listnode_add(next, newstack);
} else if (matchtype >= minmatch)
add_nexthops(next, gstack[0], gstack,
- idx + 1);
+ idx + 1, neg);
break;
default:
trace_matcher("no_match\n");
* output, instead of direct node pointers!
*/
static int add_nexthops(struct list *list, struct graph_node *node,
- struct graph_node **stack, size_t stackpos)
+ struct graph_node **stack, size_t stackpos, bool neg)
{
int added = 0;
struct graph_node *child;
if (j != stackpos)
continue;
}
+
+ if (token->type == NEG_ONLY_TKN && !neg)
+ continue;
+
if (token->type >= SPECIAL_TKN && token->type != END_TKN) {
- added += add_nexthops(list, child, stack, stackpos);
+ added +=
+ add_nexthops(list, child, stack, stackpos, neg);
} else {
if (stack) {
nextstack = XMALLOC(
%token <string> MAC
%token <string> MAC_PREFIX
+/* special syntax, value is irrelevant */
+%token <string> EXCL_BRACKET
+
/* union types for parsed rules */
%type <node> start
%type <node> literal_token
}
;
+/* ![option] productions */
+selector: EXCL_BRACKET selector_seq_seq ']' varname_token
+{
+ struct graph_node *neg_only = new_token_node (ctx, NEG_ONLY_TKN, NULL, NULL);
+
+ $$ = $2;
+ graph_add_edge ($$.start, neg_only);
+ graph_add_edge (neg_only, $$.end);
+ cmd_token_varname_set ($2.end->data, $4);
+ XFREE (MTYPE_LEX, $4);
+}
+;
+
%%
#undef scanner
if (gn->data) {
struct cmd_token *tok = gn->data;
switch (tok->type) {
-#define item(x) case x: wrap->type = #x; break;
- item(WORD_TKN) // words
- item(VARIABLE_TKN) // almost anything
- item(RANGE_TKN) // integer range
- item(IPV4_TKN) // IPV4 addresses
- item(IPV4_PREFIX_TKN) // IPV4 network prefixes
- item(IPV6_TKN) // IPV6 prefixes
- item(IPV6_PREFIX_TKN) // IPV6 network prefixes
- item(MAC_TKN) // MAC address
- item(MAC_PREFIX_TKN) // MAC address with mask
-
- /* plumbing types */
- item(FORK_TKN) item(JOIN_TKN) item(START_TKN)
- item(END_TKN) default
- : wrap->type = "???";
+#define item(x) \
+ case x: \
+ wrap->type = #x; \
+ break /* no semicolon */
+
+ item(WORD_TKN); // words
+ item(VARIABLE_TKN); // almost anything
+ item(RANGE_TKN); // integer range
+ item(IPV4_TKN); // IPV4 addresses
+ item(IPV4_PREFIX_TKN); // IPV4 network prefixes
+ item(IPV6_TKN); // IPV6 prefixes
+ item(IPV6_PREFIX_TKN); // IPV6 network prefixes
+ item(MAC_TKN); // MAC address
+ item(MAC_PREFIX_TKN); // MAC address with mask
+
+ /* plumbing types */
+ item(FORK_TKN);
+ item(JOIN_TKN);
+ item(START_TKN);
+ item(END_TKN);
+ item(NEG_ONLY_TKN);
+#undef item
+ default:
+ wrap->type = "???";
}
wrap->deprecated = (tok->attr == CMD_ATTR_DEPRECATED);
Py_RETURN_NONE;
}
+#ifdef HAVE_ELF_GETDATA_RAWCHUNK
static bool elffile_virt2file(struct elffile *w, GElf_Addr virt,
GElf_Addr *offs)
{
return false;
}
+#endif /* HAVE_ELF_GETDATA_RAWCHUNK */
static PyObject *elffile_subscript(PyObject *self, PyObject *key)
{
.tp_methods = methods_elffile,
};
+#ifdef HAVE_ELF_GETDATA_RAWCHUNK
static char *elfdata_strptr(Elf_Data *data, size_t offset)
{
char *p;
}
}
+#endif /* HAVE_ELF_GETDATA_RAWCHUNK */
/* primary (only, really) entry point to anything in this module */
static PyObject *elffile_load(PyTypeObject *type, PyObject *args,
whose names are inconsistent. */
#ifndef getenv
-extern char *getenv();
+extern char *getenv(const char *);
#endif
-static char *my_index(str, chr) const char *str;
-int chr;
+static char *my_index(const char *str, int chr)
{
while (*str) {
if (*str == chr)
#define HOOK_ADDDEF(...) (void *hookarg , ## __VA_ARGS__)
#define HOOK_ADDARG(...) (hookarg , ## __VA_ARGS__)
+/* and another helper to convert () into (void) to get a proper prototype */
+#define _SKIP_10(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, ret, ...) ret
+#define _MAKE_VOID(...) _SKIP_10(, ##__VA_ARGS__, , , , , , , , , , void)
+
+#define HOOK_VOIDIFY(...) (_MAKE_VOID(__VA_ARGS__) __VA_ARGS__)
+
/* use in header file - declares the hook and its arguments
* usage: DECLARE_HOOK(my_hook, (int arg1, struct foo *arg2), (arg1, arg2));
* as above, "passlist" must use the same order and same names as "arglist"
*/
#define DECLARE_HOOK(hookname, arglist, passlist) \
extern struct hook _hook_##hookname; \
- __attribute__((unused)) static void *_hook_typecheck_##hookname( \
- int(*funcptr) arglist) \
+ __attribute__((unused)) static inline void * \
+ _hook_typecheck_##hookname(int(*funcptr) HOOK_VOIDIFY arglist) \
{ \
return (void *)funcptr; \
} \
- __attribute__((unused)) static void *_hook_typecheck_arg_##hookname( \
- int(*funcptr) HOOK_ADDDEF arglist) \
+ __attribute__((unused)) static inline void \
+ *_hook_typecheck_arg_##hookname(int(*funcptr) \
+ HOOK_ADDDEF arglist) \
{ \
return (void *)funcptr; \
} \
struct hook _hook_##hookname = { \
.name = #hookname, .entries = NULL, .reverse = rev, \
}; \
- static int hook_call_##hookname arglist \
+ static int hook_call_##hookname HOOK_VOIDIFY arglist \
{ \
int hooksum = 0; \
struct hookent *he = _hook_##hookname.entries; \
void *hookarg; \
union { \
void *voidptr; \
- int(*fptr) arglist; \
+ int(*fptr) HOOK_VOIDIFY arglist; \
int(*farg) HOOK_ADDDEF arglist; \
} hookp; \
for (; he; he = he->next) { \
vty_out(vty, "\n");
}
+static void cli_show_interface_end(struct vty *vty, struct lyd_node *dnode)
+{
+ vty_out(vty, "exit\n");
+}
+
/*
* XPath: /frr-interface:lib/interface/description
*/
.create = lib_interface_create,
.destroy = lib_interface_destroy,
.cli_show = cli_show_interface,
+ .cli_show_end = cli_show_interface_end,
.get_next = lib_interface_get_next,
.get_keys = lib_interface_get_keys,
.lookup_entry = lib_interface_lookup_entry,
vty_out(vty, " exit\n");
}
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
switch (opt) {
case 'h':
frr_help_exit(0);
- break;
case 'v':
print_version(di->progname);
exit(0);
return nexthop;
}
-struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type)
+struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type,
+ vrf_id_t nh_vrf_id)
{
struct nexthop *nexthop;
nexthop = nexthop_new();
- nexthop->vrf_id = VRF_DEFAULT;
+ nexthop->vrf_id = nh_vrf_id;
nexthop->type = NEXTHOP_TYPE_BLACKHOLE;
nexthop->bh_type = bh_type;
vrf_id_t vrf_id);
struct nexthop *nexthop_from_ipv6_ifindex(const struct in6_addr *ipv6,
ifindex_t ifindex, vrf_id_t vrf_id);
-struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type);
+struct nexthop *nexthop_from_blackhole(enum blackhole_type bh_type,
+ vrf_id_t nh_vrf_id);
/*
* Hash a nexthop. Suitable for use with hash tables.
nexthop_group_write_nexthop_internal(vty, nh);
}
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
We need to make sure our route-map processing matches the above
*/
-route_map_result_t route_map_apply(struct route_map *map,
- const struct prefix *prefix, void *object)
+route_map_result_t route_map_apply_ext(struct route_map *map,
+ const struct prefix *prefix,
+ void *match_object, void *set_object)
{
static int recursion = 0;
enum route_map_cmd_result_t match_ret = RMAP_NOMATCH;
if ((!map->optimization_disabled)
&& (map->ipv4_prefix_table || map->ipv6_prefix_table)) {
- index = route_map_get_index(map, prefix, object,
+ index = route_map_get_index(map, prefix, match_object,
(uint8_t *)&match_ret);
if (index) {
index->applied++;
index->applied++;
/* Apply this index. */
match_ret = route_map_apply_match(&index->match_list,
- prefix, object);
+ prefix, match_object);
if (rmap_debug) {
zlog_debug(
"Route-map: %s, sequence: %d, prefix: %pFX, result: %s",
* return code.
*/
(void)(*set->cmd->func_apply)(
- set->value, prefix, object);
+ set->value, prefix, set_object);
/* Call another route-map if available */
if (index->nextrm) {
jump to it */
{
recursion++;
- ret = route_map_apply(
- nextrm, prefix, object);
+ ret = route_map_apply_ext(
+ nextrm, prefix,
+ match_object,
+ set_object);
recursion--;
}
(strmatch(A, "frr-bgp-route-map:set-large-community"))
#define IS_SET_COMMUNITY(A) \
(strmatch(A, "frr-bgp-route-map:set-community"))
+#define IS_SET_EXTCOMMUNITY_NONE(A) \
+ (strmatch(A, "frr-bgp-route-map:set-extcommunity-none"))
#define IS_SET_EXTCOMMUNITY_RT(A) \
(strmatch(A, "frr-bgp-route-map:set-extcommunity-rt"))
#define IS_SET_EXTCOMMUNITY_SOO(A) \
#define IS_SET_BGP_EVPN_GATEWAY_IP_IPV6(A) \
(strmatch(A, "frr-bgp-route-map:set-evpn-gateway-ip-ipv6"))
+enum ecommunity_lb_type {
+ EXPLICIT_BANDWIDTH,
+ CUMULATIVE_BANDWIDTH,
+ COMPUTED_BANDWIDTH
+};
+
/* Prototypes. */
extern void route_map_init(void);
struct route_map *route_map_lookup_warn_noexist(struct vty *vty, const char *name);
/* Apply route map to the object. */
-extern route_map_result_t route_map_apply(struct route_map *map,
- const struct prefix *prefix,
- void *object);
+extern route_map_result_t route_map_apply_ext(struct route_map *map,
+ const struct prefix *prefix,
+ void *match_object,
+ void *set_object);
+#define route_map_apply(map, prefix, object) \
+ route_map_apply_ext(map, prefix, object, object)
extern void route_map_add_hook(void (*func)(const char *));
extern void route_map_delete_hook(void (*func)(const char *));
#include "lib/command.h"
#include "lib/northbound_cli.h"
#include "lib/routemap.h"
-#include "bgpd/bgp_ecommunity.h"
#ifndef VTYSH_EXTRACT_PL
#include "lib/routemap_cli_clippy.c"
void route_map_instance_show_end(struct vty *vty, struct lyd_node *dnode)
{
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
strlcat(str, " non-transitive", sizeof(str));
vty_out(vty, " set extcommunity bandwidth %s\n", str);
+ } else if (IS_SET_EXTCOMMUNITY_NONE(action)) {
+ if (yang_dnode_get_bool(
+ dnode,
+ "./rmap-set-action/frr-bgp-route-map:extcommunity-none"))
+ vty_out(vty, " set extcommunity none\n");
} else if (IS_SET_AGGREGATOR(action)) {
vty_out(vty, " set aggregator as %s %s\n",
yang_dnode_get_string(
static int vrf_backend;
static int vrf_backend_configured;
-static struct zebra_privs_t *vrf_daemon_privs;
static char vrf_default_name[VRF_NAMSIZ] = VRF_DEFAULT_NAME_INTERNAL;
/*
return VRF_LOGNAME(vrf);
}
-vrf_id_t vrf_name_to_id(const char *name)
-{
- struct vrf *vrf;
- vrf_id_t vrf_id = VRF_DEFAULT; // Pending: need a way to return invalid
- // id/ routine not used.
-
- if (!name)
- return vrf_id;
- vrf = vrf_lookup_by_name(name);
- if (vrf)
- vrf_id = vrf->vrf_id;
-
- return vrf_id;
-}
-
/* Get the data pointer of the specified VRF. If not found, create one. */
void *vrf_info_get(vrf_id_t vrf_id)
{
.prompt = "%s(config-vrf)# ",
};
-DEFUN_NOSH (vrf_netns,
- vrf_netns_cmd,
- "netns NAME",
- "Attach VRF to a Namespace\n"
- "The file name in " NS_RUN_DIR ", or a full pathname\n")
-{
- int idx_name = 1, ret;
- char *pathname = ns_netns_pathname(vty, argv[idx_name]->arg);
-
- VTY_DECLVAR_CONTEXT(vrf, vrf);
-
- if (!pathname)
- return CMD_WARNING_CONFIG_FAILED;
-
- frr_with_privs(vrf_daemon_privs) {
- ret = vrf_netns_handler_create(vty, vrf, pathname,
- NS_UNKNOWN,
- NS_UNKNOWN,
- NS_UNKNOWN);
- }
- return ret;
-}
-
-DEFUN_NOSH (no_vrf_netns,
- no_vrf_netns_cmd,
- "no netns [NAME]",
- NO_STR
- "Detach VRF from a Namespace\n"
- "The file name in " NS_RUN_DIR ", or a full pathname\n")
-{
- struct ns *ns = NULL;
-
- VTY_DECLVAR_CONTEXT(vrf, vrf);
-
- if (!vrf_is_backend_netns()) {
- vty_out(vty, "VRF backend is not Netns. Aborting\n");
- return CMD_WARNING_CONFIG_FAILED;
- }
- if (!vrf->ns_ctxt) {
- vty_out(vty, "VRF %s(%u) is not configured with NetNS\n",
- vrf->name, vrf->vrf_id);
- return CMD_WARNING_CONFIG_FAILED;
- }
-
- ns = (struct ns *)vrf->ns_ctxt;
-
- ns->vrf_ctxt = NULL;
- vrf_disable(vrf);
- /* vrf ID from VRF is necessary for Zebra
- * so that propagate to other clients is done
- */
- ns_delete(ns);
- vrf->ns_ctxt = NULL;
- return CMD_SUCCESS;
-}
-
/*
* Debug CLI for vrf's
*/
install_element(ENABLE_NODE, &no_vrf_debug_cmd);
}
-void vrf_cmd_init(int (*writefunc)(struct vty *vty),
- struct zebra_privs_t *daemon_privs)
+void vrf_cmd_init(int (*writefunc)(struct vty *vty))
{
install_element(CONFIG_NODE, &vrf_cmd);
install_element(CONFIG_NODE, &no_vrf_cmd);
install_node(&vrf_node);
install_default(VRF_NODE);
install_element(VRF_NODE, &vrf_exit_cmd);
- if (vrf_is_backend_netns() && ns_have_netns()) {
- /* Install NS commands. */
- vrf_daemon_privs = daemon_privs;
- install_element(VRF_NODE, &vrf_netns_cmd);
- install_element(VRF_NODE, &no_vrf_netns_cmd);
- }
}
void vrf_set_default_name(const char *default_name, bool force)
return ret;
}
-vrf_id_t vrf_generate_id(void)
-{
- static int vrf_id_local;
-
- return ++vrf_id_local;
-}
-
/* ------- Northbound callbacks ------- */
/*
extern struct vrf *vrf_get(vrf_id_t, const char *);
extern struct vrf *vrf_update(vrf_id_t new_vrf_id, const char *name);
extern const char *vrf_id_to_name(vrf_id_t vrf_id);
-extern vrf_id_t vrf_name_to_id(const char *);
#define VRF_LOGNAME(V) V ? V->name : "Unknown"
/* VRF vty command initialisation
*/
-extern void vrf_cmd_init(int (*writefunc)(struct vty *vty),
- struct zebra_privs_t *daemon_priv);
+extern void vrf_cmd_init(int (*writefunc)(struct vty *vty));
/* VRF vty debugging
*/
extern void vrf_disable(struct vrf *vrf);
extern int vrf_enable(struct vrf *vrf);
extern void vrf_delete(struct vrf *vrf);
-extern vrf_id_t vrf_generate_id(void);
extern const struct frr_yang_module_info frr_vrf_info;
/* Display current configuration. */
static int vty_config_write(struct vty *vty)
{
- vty_out(vty, "line vty\n");
+ vty_frame(vty, "line vty\n");
if (vty_accesslist_name)
vty_out(vty, " access-class %s\n", vty_accesslist_name);
if (no_password_check)
vty_out(vty, " no login\n");
+ vty_endframe(vty, "exit\n");
+
if (do_log_commands)
vty_out(vty, "log commands\n");
return -1;
}
-int zclient_neigh_ip_encode(struct stream *s,
- uint16_t cmd,
- union sockunion *in,
- union sockunion *out,
- struct interface *ifp)
+int zclient_neigh_ip_encode(struct stream *s, uint16_t cmd, union sockunion *in,
+ union sockunion *out, struct interface *ifp,
+ int ndm_state)
{
int ret = 0;
stream_putc(s, AF_UNSPEC);
stream_putl(s, ifp->ifindex);
if (out)
- stream_putl(s, ZEBRA_NEIGH_STATE_REACHABLE);
+ stream_putl(s, ndm_state);
else
stream_putl(s, ZEBRA_NEIGH_STATE_FAILED);
return ret;
* ip_in is the underlay IP, ip_out is the tunnel dest
* index stands for the index of the interface
* ndm state stands for the NDM value in netlink
+ * (see linux/neighbour.h)
*/
+#define ZEBRA_NEIGH_STATE_INCOMPLETE (0x01)
#define ZEBRA_NEIGH_STATE_REACHABLE (0x02)
-#define ZEBRA_NEIGH_STATE_FAILED (0x20)
+#define ZEBRA_NEIGH_STATE_STALE (0x04)
+#define ZEBRA_NEIGH_STATE_DELAY (0x08)
+#define ZEBRA_NEIGH_STATE_PROBE (0x10)
+#define ZEBRA_NEIGH_STATE_FAILED (0x20)
+#define ZEBRA_NEIGH_STATE_NOARP (0x40)
+#define ZEBRA_NEIGH_STATE_PERMANENT (0x80)
+#define ZEBRA_NEIGH_STATE_NONE (0x00)
+
struct zapi_neigh_ip {
int cmd;
struct ipaddr ip_in;
uint32_t ndm_state;
};
int zclient_neigh_ip_decode(struct stream *s, struct zapi_neigh_ip *api);
-int zclient_neigh_ip_encode(struct stream *s,
- uint16_t cmd,
- union sockunion *in,
- union sockunion *out,
- struct interface *ifp);
+int zclient_neigh_ip_encode(struct stream *s, uint16_t cmd, union sockunion *in,
+ union sockunion *out, struct interface *ifp,
+ int ndm_state);
/*
* We reserve the top 4 bits for l2-NHG, everything else
break;
default:
frr_help_exit(1);
- break;
}
}
}
stream_reset(s);
zclient_neigh_ip_encode(s, out ? ZEBRA_NEIGH_IP_ADD :
ZEBRA_NEIGH_IP_DEL, in, out,
- ifp);
+ ifp, out ? ZEBRA_NEIGH_STATE_REACHABLE
+ : ZEBRA_NEIGH_STATE_FAILED);
stream_putw_at(s, 0, stream_get_endp(s));
zclient_send_message(zclient);
}
}
}
- vty_endframe(vty, "!\n");
+ vty_endframe(vty, "exit\n!\n");
}
return 0;
install_element(CONFIG_NODE, &nhrp_multicast_nflog_group_cmd);
install_element(CONFIG_NODE, &no_nhrp_multicast_nflog_group_cmd);
- vrf_cmd_init(NULL, &nhrpd_privs);
+ vrf_cmd_init(NULL);
/* interface specific commands */
if_cmd_init(interface_config_write);
uint16_t type;
int is_debug = 0;
- if (IS_OSPF6_DEBUG_ABR)
- zlog_debug("%s : start area %s, route %pFX", __func__,
- area->name, &route->prefix);
+ if (IS_OSPF6_DEBUG_ABR) {
+ char buf[BUFSIZ];
+
+ if (route->type == OSPF6_DEST_TYPE_ROUTER)
+ inet_ntop(AF_INET,
+ &ADV_ROUTER_IN_PREFIX(&route->prefix), buf,
+ sizeof(buf));
+ else
+ prefix2str(&route->prefix, buf, sizeof(buf));
+
+ zlog_debug("%s : start area %s, route %s", __func__, area->name,
+ buf);
+ }
if (route->type == OSPF6_DEST_TYPE_ROUTER)
summary_table = area->summary_router;
struct ospf6_area *oa;
struct ospf6_route *range = NULL;
- if (IS_OSPF6_DEBUG_ABR)
- zlog_debug("%s: route %pFX", __func__, &route->prefix);
+ if (IS_OSPF6_DEBUG_ABR) {
+ char buf[BUFSIZ];
+
+ if (route->type == OSPF6_DEST_TYPE_ROUTER)
+ inet_ntop(AF_INET,
+ &ADV_ROUTER_IN_PREFIX(&route->prefix), buf,
+ sizeof(buf));
+ else
+ prefix2str(&route->prefix, buf, sizeof(buf));
+
+ zlog_debug("%s: route %s", __func__, buf);
+ }
if (route->type == OSPF6_DEST_TYPE_NETWORK) {
oa = ospf6_area_lookup(route->path.area_id, ospf6);
def->path.cost = metric_value(o, type, 0);
for (ALL_LIST_ELEMENTS(o->area_list, node, nnode, oa)) {
- if (!IS_AREA_STUB(oa)) {
+ if (IS_AREA_STUB(oa) || (IS_AREA_NSSA(oa) && oa->no_summary)) {
+ /* announce defaults to stubby areas */
+ if (IS_OSPF6_DEBUG_ABR)
+ zlog_debug(
+ "Announcing default route into stubby area %s",
+ oa->name);
+ UNSET_FLAG(def->flag, OSPF6_ROUTE_REMOVE);
+ ospf6_abr_originate_summary_to_area(def, oa);
+ } else {
/* withdraw defaults when an area switches from stub to
* non-stub */
route = ospf6_route_lookup(&def->prefix,
SET_FLAG(def->flag, OSPF6_ROUTE_REMOVE);
ospf6_abr_originate_summary_to_area(def, oa);
}
- } else {
- /* announce defaults to stubby areas */
- if (IS_OSPF6_DEBUG_ABR)
- zlog_debug(
- "Announcing default route into stubby area %s",
- oa->name);
- UNSET_FLAG(def->flag, OSPF6_ROUTE_REMOVE);
- ospf6_abr_originate_summary_to_area(def, oa);
}
}
ospf6_route_delete(def);
__func__, &prefix, listcount(old->paths));
}
for (old_route = old; old_route; old_route = old_route->next) {
- if (!ospf6_route_is_same(old_route, route) ||
- (old_route->type != route->type) ||
- (old_route->path.type != route->path.type))
+
+ /* The route linked-list is grouped in batches of prefix.
+ * If the new prefix is not the same as the one of interest
+ * then we have walked over the end of the batch and so we
+ * should break rather than continuing unnecessarily.
+ */
+ if (!ospf6_route_is_same(old_route, route))
+ break;
+ if ((old_route->type != route->type)
+ || (old_route->path.type != route->path.type))
continue;
if ((ospf6_route_cmp(route, old_route) != 0)) {
#include "ospf6d.h"
#include "lib/json.h"
#include "ospf6_nssa.h"
+#ifndef VTYSH_EXTRACT_PL
+#include "ospf6d/ospf6_area_clippy.c"
+#endif
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_AREA, "OSPF6 area");
DEFINE_MTYPE_STATIC(OSPF6D, OSPF6_PLISTNAME, "Prefix list name");
static int ospf6_area_stub_set(struct ospf6 *ospf6, struct ospf6_area *area)
{
if (!IS_AREA_STUB(area)) {
+ /* Disable NSSA first. */
+ ospf6_area_nssa_unset(ospf6, area);
+
SET_FLAG(area->flag, OSPF6_AREA_STUB);
ospf6_area_stub_update(area);
}
return 1;
}
-static void ospf6_area_stub_unset(struct ospf6 *ospf6, struct ospf6_area *area)
+void ospf6_area_stub_unset(struct ospf6 *ospf6, struct ospf6_area *area)
{
if (IS_AREA_STUB(area)) {
UNSET_FLAG(area->flag, OSPF6_AREA_STUB);
else
vty_out(vty, " area %s stub\n", oa->name);
}
- if (IS_AREA_NSSA(oa))
- vty_out(vty, " area %s nssa\n", oa->name);
+ if (IS_AREA_NSSA(oa)) {
+ vty_out(vty, " area %s nssa", oa->name);
+ if (oa->no_summary)
+ vty_out(vty, " no-summary");
+ vty_out(vty, "\n");
+ }
if (PREFIX_NAME_IN(oa))
vty_out(vty, " area %s filter-list prefix %s in\n",
oa->name, PREFIX_NAME_IN(oa));
return CMD_SUCCESS;
}
-DEFUN(ospf6_area_nssa, ospf6_area_nssa_cmd,
- "area <A.B.C.D|(0-4294967295)> nssa",
+DEFPY(ospf6_area_nssa, ospf6_area_nssa_cmd,
+ "area <A.B.C.D|(0-4294967295)>$area_str nssa [no-summary$no_summary]",
"OSPF6 area parameters\n"
"OSPF6 area ID in IP address format\n"
"OSPF6 area ID as a decimal value\n"
- "Configure OSPF6 area as nssa\n")
+ "Configure OSPF6 area as nssa\n"
+ "Do not inject inter-area routes into area\n")
{
- int idx_ipv4_number = 1;
struct ospf6_area *area;
VTY_DECLVAR_CONTEXT(ospf6, ospf6);
- OSPF6_CMD_AREA_GET(argv[idx_ipv4_number]->arg, area, ospf6);
+ OSPF6_CMD_AREA_GET(area_str, area, ospf6);
if (!ospf6_area_nssa_set(ospf6, area)) {
vty_out(vty,
return CMD_WARNING_CONFIG_FAILED;
}
+ if (no_summary)
+ ospf6_area_no_summary_set(ospf6, area);
+ else
+ ospf6_area_no_summary_unset(ospf6, area);
+ if (ospf6_check_and_set_router_abr(ospf6))
+ ospf6_abr_defaults_to_stub(ospf6);
+
return CMD_SUCCESS;
}
-DEFUN(no_ospf6_area_nssa, no_ospf6_area_nssa_cmd,
- "no area <A.B.C.D|(0-4294967295)> nssa",
+DEFPY(no_ospf6_area_nssa, no_ospf6_area_nssa_cmd,
+ "no area <A.B.C.D|(0-4294967295)>$area_str nssa [no-summary$no_summary]",
NO_STR
"OSPF6 area parameters\n"
"OSPF6 area ID in IP address format\n"
"OSPF6 area ID as a decimal value\n"
- "Configure OSPF6 area as nssa\n")
+ "Configure OSPF6 area as nssa\n"
+ "Do not inject inter-area routes into area\n")
{
- int idx_ipv4_number = 2;
struct ospf6_area *area;
VTY_DECLVAR_CONTEXT(ospf6, ospf6);
- OSPF6_CMD_AREA_GET(argv[idx_ipv4_number]->arg, area, ospf6);
+ OSPF6_CMD_AREA_GET(area_str, area, ospf6);
ospf6_area_nssa_unset(ospf6, area);
+ ospf6_area_no_summary_unset(ospf6, area);
return CMD_SUCCESS;
}
extern int ospf6_area_cmp(void *va, void *vb);
-extern struct ospf6_area *ospf6_area_create(uint32_t, struct ospf6 *, int);
-extern void ospf6_area_delete(struct ospf6_area *);
-extern struct ospf6_area *ospf6_area_lookup(uint32_t, struct ospf6 *);
+extern struct ospf6_area *ospf6_area_create(uint32_t area_id,
+ struct ospf6 *ospf6, int df);
+extern void ospf6_area_delete(struct ospf6_area *oa);
+extern struct ospf6_area *ospf6_area_lookup(uint32_t area_id,
+ struct ospf6 *ospf6);
extern struct ospf6_area *ospf6_area_lookup_by_area_id(uint32_t area_id);
-extern void ospf6_area_enable(struct ospf6_area *);
-extern void ospf6_area_disable(struct ospf6_area *);
+extern void ospf6_area_stub_unset(struct ospf6 *ospf6, struct ospf6_area *area);
+extern void ospf6_area_enable(struct ospf6_area *oa);
+extern void ospf6_area_disable(struct ospf6_area *oa);
-extern void ospf6_area_show(struct vty *, struct ospf6_area *,
+extern void ospf6_area_show(struct vty *vty, struct ospf6_area *oa,
json_object *json_areas, bool use_json);
extern void ospf6_area_plist_update(struct prefix_list *plist, int add);
next_route = old_route->next;
- if (!ospf6_route_is_same(old_route, route)
- || (old_route->path.type != route->path.type))
+ /* The route linked-list is grouped in batches of prefix.
+ * If the new prefix is not the same as the one of interest
+ * then we have walked over the end of the batch and so we
+ * should break rather than continuing unnecessarily.
+ */
+ if (!ospf6_route_is_same(old_route, route))
+ break;
+ if (old_route->path.type != route->path.type)
continue;
/* Current and New route has same origin,
/* Add new route */
for (old_route = old; old_route; old_route = old_route->next) {
- /* Current and New Route prefix or route type
- * is not same skip this current node.
+ /* The route linked-list is grouped in batches of prefix.
+ * If the new prefix is not the same as the one of interest
+ * then we have walked over the end of the batch and so we
+ * should break rather than continuing unnecessarily.
*/
- if (!ospf6_route_is_same(old_route, route)
- || (old_route->path.type != route->path.type))
+ if (!ospf6_route_is_same(old_route, route))
+ break;
+ if (old_route->path.type != route->path.type)
continue;
/* Old Route and New Route have Equal Cost, Merge NHs */
&asbr_id);
return;
}
+
+ /*
+ * RFC 3101 - Section 2.5:
+ * "For a Type-7 LSA the matching routing table entry must
+ * specify an intra-area path through the LSA's originating
+ * NSSA".
+ */
+ if (ntohs(lsa->header->type) == OSPF6_LSTYPE_TYPE_7
+ && (asbr_entry->path.area_id != oa->area_id
+ || asbr_entry->path.type != OSPF6_PATH_TYPE_INTRA)) {
+ if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL))
+ zlog_debug(
+ "Intra-area route to NSSA ASBR not found: %pFX",
+ &asbr_id);
+ return;
+ }
+ }
+
+ /*
+ * RFC 3101 - Section 2.5:
+ * "If the destination is a Type-7 default route (destination ID =
+ * DefaultDestination) and one of the following is true, then do
+ * nothing with this LSA and consider the next in the list:
+ *
+ * o The calculating router is a border router and the LSA has
+ * its P-bit clear. Appendix E describes a technique
+ * whereby an NSSA border router installs a Type-7 default
+ * LSA without propagating it.
+ *
+ * o The calculating router is a border router and is
+ * suppressing the import of summary routes as Type-3
+ * summary-LSAs".
+ */
+ if (ntohs(lsa->header->type) == OSPF6_LSTYPE_TYPE_7
+ && external->prefix.prefix_length == 0
+ && CHECK_FLAG(ospf6->flag, OSPF6_FLAG_ABR)
+ && (CHECK_FLAG(external->prefix.prefix_options,
+ OSPF6_PREFIX_OPTION_P)
+ || oa->no_summary)) {
+ if (IS_OSPF6_DEBUG_EXAMIN(AS_EXTERNAL))
+ zlog_debug("Skipping Type-7 default route");
+ return;
}
/* Check the forwarding address */
struct ospf6 *ospf6 = oa->ospf6;
const struct route_node *iterend;
- /* skip if router is in other non-stub areas */
+ /* skip if router is in other non-stub/non-NSSA areas */
for (ALL_LIST_ELEMENTS(ospf6->area_list, node, nnode, area))
- if (!IS_AREA_STUB(area))
+ if (!IS_AREA_STUB(area) && !IS_AREA_NSSA(area))
return;
/* if router is only in a stub area then purge AS-External LSAs */
ospf6_asbr_status_update(ospf6, ospf6->redistribute);
}
-DEFUN (ospf6_redistribute,
+DEFPY (ospf6_redistribute,
ospf6_redistribute_cmd,
- "redistribute " FRR_REDIST_STR_OSPF6D,
- "Redistribute\n"
- FRR_REDIST_HELP_STR_OSPF6D)
-{
- int type;
- struct ospf6_redist *red;
-
- VTY_DECLVAR_CONTEXT(ospf6, ospf6);
-
- char *proto = argv[argc - 1]->text;
-
- type = proto_redistnum(AFI_IP6, proto);
- if (type < 0)
- return CMD_WARNING_CONFIG_FAILED;
-
- red = ospf6_redist_lookup(ospf6, type, 0);
- if (!red) {
- ospf6_redist_add(ospf6, type, 0);
- } else {
- /* To check, if user is providing same config */
- if (ROUTEMAP_NAME(red) == NULL)
- return CMD_SUCCESS;
-
- ospf6_asbr_redistribute_unset(ospf6, red, type);
- }
-
- ospf6_asbr_redistribute_set(ospf6, type);
-
- return CMD_SUCCESS;
-}
-
-DEFUN (ospf6_redistribute_routemap,
- ospf6_redistribute_routemap_cmd,
- "redistribute " FRR_REDIST_STR_OSPF6D " route-map WORD",
+ "redistribute " FRR_REDIST_STR_OSPF6D "[{metric (0-16777214)|metric-type (1-2)$metric_type|route-map WORD$rmap_str}]",
"Redistribute\n"
FRR_REDIST_HELP_STR_OSPF6D
+ "Metric for redistributed routes\n"
+ "OSPF default metric\n"
+ "OSPF exterior metric type for redistributed routes\n"
+ "Set OSPF External Type 1/2 metrics\n"
"Route map reference\n"
"Route map name\n")
{
- int idx_protocol = 1;
- int idx_word = 3;
int type;
struct ospf6_redist *red;
+ int idx_protocol = 1;
+ char *proto = argv[idx_protocol]->text;
VTY_DECLVAR_CONTEXT(ospf6, ospf6);
- char *proto = argv[idx_protocol]->text;
type = proto_redistnum(AFI_IP6, proto);
if (type < 0)
return CMD_WARNING_CONFIG_FAILED;
+ if (!metric_str)
+ metric = -1;
+ if (!metric_type_str)
+ metric_type = -1;
+
red = ospf6_redist_lookup(ospf6, type, 0);
if (!red) {
red = ospf6_redist_add(ospf6, type, 0);
} else {
- /* To check, if user is providing same route map */
- if ((ROUTEMAP_NAME(red) != NULL)
- && (strcmp(argv[idx_word]->arg, ROUTEMAP_NAME(red)) == 0))
+ /* Check if nothing has changed. */
+ if (red->dmetric.value == metric
+ && red->dmetric.type == metric_type
+ && ((!ROUTEMAP_NAME(red) && !rmap_str)
+ || (ROUTEMAP_NAME(red) && rmap_str
+ && strmatch(ROUTEMAP_NAME(red), rmap_str))))
return CMD_SUCCESS;
ospf6_asbr_redistribute_unset(ospf6, red, type);
}
- ospf6_asbr_routemap_set(red, argv[idx_word]->arg);
+ red->dmetric.value = metric;
+ red->dmetric.type = metric_type;
+ if (rmap_str)
+ ospf6_asbr_routemap_set(red, rmap_str);
+ else
+ ospf6_asbr_routemap_unset(red);
ospf6_asbr_redistribute_set(ospf6, type);
return CMD_SUCCESS;
DEFUN (no_ospf6_redistribute,
no_ospf6_redistribute_cmd,
- "no redistribute " FRR_REDIST_STR_OSPF6D " [route-map WORD]",
+ "no redistribute " FRR_REDIST_STR_OSPF6D "[{metric (0-16777214)|metric-type (1-2)|route-map WORD}]",
NO_STR
"Redistribute\n"
FRR_REDIST_HELP_STR_OSPF6D
+ "Metric for redistributed routes\n"
+ "OSPF default metric\n"
+ "OSPF exterior metric type for redistributed routes\n"
+ "Set OSPF External Type 1/2 metrics\n"
"Route map reference\n"
"Route map name\n")
{
- int idx_protocol = 2;
int type;
struct ospf6_redist *red;
+ int idx_protocol = 2;
+ char *proto = argv[idx_protocol]->text;
VTY_DECLVAR_CONTEXT(ospf6, ospf6);
- char *proto = argv[idx_protocol]->text;
type = proto_redistnum(AFI_IP6, proto);
if (type < 0)
return CMD_WARNING_CONFIG_FAILED;
if (type == ZEBRA_ROUTE_OSPF6)
continue;
+ vty_out(vty, " redistribute %s", ZROUTE_NAME(type));
+ if (red->dmetric.value >= 0)
+ vty_out(vty, " metric %d", red->dmetric.value);
+ if (red->dmetric.type != DEFAULT_METRIC_TYPE)
+ vty_out(vty, " metric-type 1");
if (ROUTEMAP_NAME(red))
- vty_out(vty, " redistribute %s route-map %s\n",
- ZROUTE_NAME(type), ROUTEMAP_NAME(red));
- else
- vty_out(vty, " redistribute %s\n", ZROUTE_NAME(type));
+ vty_out(vty, " route-map %s", ROUTEMAP_NAME(red));
+ vty_out(vty, "\n");
}
return 0;
install_element(OSPF6_NODE,
&no_ospf6_default_information_originate_cmd);
install_element(OSPF6_NODE, &ospf6_redistribute_cmd);
- install_element(OSPF6_NODE, &ospf6_redistribute_routemap_cmd);
install_element(OSPF6_NODE, &no_ospf6_redistribute_cmd);
}
ospf6_lsdb_lookup(lsa->header->type, lsa->header->id,
lsa->header->adv_router, lsa->lsdb);
- if (lsa_in_db && lsa_in_db->tobe_acknowledged)
+ if (lsa_in_db && lsa_in_db->tobe_acknowledged) {
+ ospf6_lsa_unlock(lsa);
+ if (lsanext)
+ ospf6_lsa_unlock(lsanext);
+
return OSPF6_TRUE;
+ }
}
return OSPF6_FALSE;
}
}
-static int show_ospf6_gr_helper_details(struct vty *vty, struct ospf6 *ospf6,
+static void show_ospf6_gr_helper_details(struct vty *vty, struct ospf6 *ospf6,
json_object *json, bool uj, bool detail)
{
struct ospf6_interface *oi;
}
}
}
-
- return CMD_SUCCESS;
}
/* Graceful Restart HELPER config Commands */
int config_write_ospf6_debug_gr_helper(struct vty *vty)
{
if (IS_DEBUG_OSPF6_GR)
- vty_out(vty, "debug ospf6 gr helper\n");
+ vty_out(vty, "debug ospf6 graceful-restart\n");
return 0;
}
ospf6_bfd_write_config(vty, oi);
- vty_endframe(vty, "!\n");
+ vty_endframe(vty, "exit\n!\n");
}
return 0;
}
extern struct ospf6_interface *
ospf6_interface_lookup_by_ifindex(ifindex_t, vrf_id_t vrf_id);
-extern struct ospf6_interface *ospf6_interface_create(struct interface *);
-extern void ospf6_interface_delete(struct ospf6_interface *);
+extern struct ospf6_interface *ospf6_interface_create(struct interface *ifp);
+extern void ospf6_interface_delete(struct ospf6_interface *oi);
-extern void ospf6_interface_enable(struct ospf6_interface *);
-extern void ospf6_interface_disable(struct ospf6_interface *);
+extern void ospf6_interface_enable(struct ospf6_interface *oi);
+extern void ospf6_interface_disable(struct ospf6_interface *oi);
-extern void ospf6_interface_state_update(struct interface *);
-extern void ospf6_interface_connected_route_update(struct interface *);
+extern void ospf6_interface_state_update(struct interface *ifp);
+extern void ospf6_interface_connected_route_update(struct interface *ifp);
extern struct in6_addr *
ospf6_interface_get_global_address(struct interface *ifp);
/* interface event */
-extern int interface_up(struct thread *);
-extern int interface_down(struct thread *);
-extern int wait_timer(struct thread *);
-extern int backup_seen(struct thread *);
-extern int neighbor_change(struct thread *);
+extern int interface_up(struct thread *thread);
+extern int interface_down(struct thread *thread);
+extern int wait_timer(struct thread *thread);
+extern int backup_seen(struct thread *thread);
+extern int neighbor_change(struct thread *thread);
extern void ospf6_interface_init(void);
extern void ospf6_interface_clear(struct interface *ifp);
for (old_route = old; old_route; old_route = old_route->next) {
bool route_updated = false;
- if (!ospf6_route_is_same(old_route, route) ||
- (old_route->path.type != route->path.type))
+ /* The route linked-list is grouped in batches of prefix.
+ * If the new prefix is not the same as the one of interest
+ * then we have walked over the end of the batch and so we
+ * should break rather than continuing unnecessarily.
+ */
+ if (!ospf6_route_is_same(old_route, route))
+ break;
+ if (old_route->path.type != route->path.type)
continue;
/* Current and New route has same origin,
for (old_route = old; old_route; old_route = old_route->next) {
- if (!ospf6_route_is_same(old_route, route) ||
- (old_route->path.type != route->path.type))
+ /* The route linked-list is grouped in batches of prefix.
+ * If the new prefix is not the same as the one of interest
+ * then we have walked over the end of the batch and so we
+ * should break rather than continuing unnecessarily.
+ */
+ if (!ospf6_route_is_same(old_route, route))
+ break;
+ if (old_route->path.type != route->path.type)
continue;
/* Old Route and New Route have Equal Cost, Merge NHs */
struct ospf6_lsa *lsa);
extern int ospf6_router_is_stub_router(struct ospf6_lsa *lsa);
-extern int ospf6_router_lsa_originate(struct thread *);
-extern int ospf6_network_lsa_originate(struct thread *);
-extern int ospf6_link_lsa_originate(struct thread *);
-extern int ospf6_intra_prefix_lsa_originate_transit(struct thread *);
-extern int ospf6_intra_prefix_lsa_originate_stub(struct thread *);
+extern int ospf6_router_lsa_originate(struct thread *thread);
+extern int ospf6_network_lsa_originate(struct thread *thread);
+extern int ospf6_link_lsa_originate(struct thread *thread);
+extern int ospf6_intra_prefix_lsa_originate_transit(struct thread *thread);
+extern int ospf6_intra_prefix_lsa_originate_stub(struct thread *thread);
extern void ospf6_intra_prefix_lsa_add(struct ospf6_lsa *lsa);
extern void ospf6_intra_prefix_lsa_remove(struct ospf6_lsa *lsa);
extern int ospf6_orig_as_external_lsa(struct thread *thread);
extern int metric_value(struct ospf6 *ospf6, int type, uint8_t instance);
extern int ospf6_lsa_is_differ(struct ospf6_lsa *lsa1, struct ospf6_lsa *lsa2);
extern int ospf6_lsa_is_changed(struct ospf6_lsa *lsa1, struct ospf6_lsa *lsa2);
-extern uint16_t ospf6_lsa_age_current(struct ospf6_lsa *);
-extern void ospf6_lsa_age_update_to_send(struct ospf6_lsa *, uint32_t);
-extern void ospf6_lsa_premature_aging(struct ospf6_lsa *);
-extern int ospf6_lsa_compare(struct ospf6_lsa *, struct ospf6_lsa *);
+extern uint16_t ospf6_lsa_age_current(struct ospf6_lsa *lsa);
+extern void ospf6_lsa_age_update_to_send(struct ospf6_lsa *lsa,
+ uint32_t transdelay);
+extern void ospf6_lsa_premature_aging(struct ospf6_lsa *lsa);
+extern int ospf6_lsa_compare(struct ospf6_lsa *lsa1, struct ospf6_lsa *lsa2);
extern char *ospf6_lsa_printbuf(struct ospf6_lsa *lsa, char *buf, int size);
extern void ospf6_lsa_header_print_raw(struct ospf6_lsa_header *header);
extern struct ospf6_lsa *
ospf6_lsa_create_headeronly(struct ospf6_lsa_header *header);
extern void ospf6_lsa_delete(struct ospf6_lsa *lsa);
-extern struct ospf6_lsa *ospf6_lsa_copy(struct ospf6_lsa *);
+extern struct ospf6_lsa *ospf6_lsa_copy(struct ospf6_lsa *lsa);
extern struct ospf6_lsa *ospf6_lsa_lock(struct ospf6_lsa *lsa);
extern struct ospf6_lsa *ospf6_lsa_unlock(struct ospf6_lsa *lsa);
-extern int ospf6_lsa_expire(struct thread *);
-extern int ospf6_lsa_refresh(struct thread *);
+extern int ospf6_lsa_expire(struct thread *thread);
+extern int ospf6_lsa_refresh(struct thread *thread);
-extern unsigned short ospf6_lsa_checksum(struct ospf6_lsa_header *);
-extern int ospf6_lsa_checksum_valid(struct ospf6_lsa_header *);
+extern unsigned short ospf6_lsa_checksum(struct ospf6_lsa_header *lsah);
+extern int ospf6_lsa_checksum_valid(struct ospf6_lsa_header *lsah);
extern int ospf6_lsa_prohibited_duration(uint16_t type, uint32_t id,
uint32_t adv_router, void *scope);
/*
* Since we are locking the lsa in ospf6_lsdb_head
- * and then unlocking it in lspf6_lsa_lock, when
+ * and then unlocking it in ospf6_lsa_unlock, when
* we cache the next pointer we need to increment
* the lock for the lsa so we don't accidently free
* it really early.
#define ALL_LSDB(lsdb, lsa, lsanext) \
const struct route_node *iterend = \
ospf6_lsdb_head(lsdb, 0, 0, 0, &lsa); \
- (lsa) != NULL &&ospf6_lsa_lock(lsa) \
+ (lsa) != NULL && ospf6_lsa_lock(lsa) \
&& ((lsanext) = ospf6_lsdb_next(iterend, (lsa)), 1); \
ospf6_lsa_unlock(lsa), (lsa) = (lsanext)
break;
default:
frr_help_exit(1);
- break;
}
}
if (twoway)
thread_execute(master, twoway_received, on, 0);
else {
- if (IS_DEBUG_OSPF6_GR)
- zlog_debug(
- "%s, Received oneway hello from RESTARTER so ignore here.",
- __PRETTY_FUNCTION__);
-
- if (!OSPF6_GR_IS_ACTIVE_HELPER(on)) {
+ if (OSPF6_GR_IS_ACTIVE_HELPER(on)) {
+ if (IS_DEBUG_OSPF6_GR)
+ zlog_debug(
+ "%s, Received oneway hello from RESTARTER so ignore here.",
+ __PRETTY_FUNCTION__);
+ } else {
/* If the router is DR_OTHER, RESTARTER will not wait
* until it receives the hello from it if it receives
* from DR and BDR.
int ospf6_neighbor_cmp(void *va, void *vb);
void ospf6_neighbor_dbex_init(struct ospf6_neighbor *on);
-struct ospf6_neighbor *ospf6_neighbor_lookup(uint32_t,
- struct ospf6_interface *);
-struct ospf6_neighbor *ospf6_neighbor_create(uint32_t,
- struct ospf6_interface *);
-void ospf6_neighbor_delete(struct ospf6_neighbor *);
+struct ospf6_neighbor *ospf6_neighbor_lookup(uint32_t router_id,
+ struct ospf6_interface *oi);
+struct ospf6_neighbor *ospf6_neighbor_create(uint32_t router_id,
+ struct ospf6_interface *oi);
+void ospf6_neighbor_delete(struct ospf6_neighbor *on);
/* Neighbor event */
-extern int hello_received(struct thread *);
-extern int twoway_received(struct thread *);
-extern int negotiation_done(struct thread *);
-extern int exchange_done(struct thread *);
-extern int loading_done(struct thread *);
-extern int adj_ok(struct thread *);
-extern int seqnumber_mismatch(struct thread *);
-extern int bad_lsreq(struct thread *);
-extern int oneway_received(struct thread *);
-extern int inactivity_timer(struct thread *);
-extern void ospf6_check_nbr_loading(struct ospf6_neighbor *);
+extern int hello_received(struct thread *thread);
+extern int twoway_received(struct thread *thread);
+extern int negotiation_done(struct thread *thread);
+extern int exchange_done(struct thread *thread);
+extern int loading_done(struct thread *thread);
+extern int adj_ok(struct thread *thread);
+extern int seqnumber_mismatch(struct thread *thread);
+extern int bad_lsreq(struct thread *thread);
+extern int oneway_received(struct thread *thread);
+extern int inactivity_timer(struct thread *thread);
+extern void ospf6_check_nbr_loading(struct ospf6_neighbor *on);
extern void ospf6_neighbor_init(void);
extern int config_write_ospf6_debug_neighbor(struct vty *vty);
uint16_t type;
struct ospf6_lsa *lsa = NULL, *type5 = NULL;
struct ospf6 *ospf6 = area->ospf6;
- const struct route_node *rt = NULL;
if (IS_OSPF6_DEBUG_NSSA)
zlog_debug("%s: area %s", __func__, area->name);
/* Flush the NSSA LSA */
type = htons(OSPF6_LSTYPE_TYPE_7);
- rt = ospf6_lsdb_head(area->lsdb_self, 0, type, ospf6->router_id, &lsa);
- while (lsa) {
+ for (ALL_LSDB_TYPED_ADVRTR(area->lsdb, type, ospf6->router_id, lsa)) {
lsa->header->age = htons(OSPF_LSA_MAXAGE);
SET_FLAG(lsa->flag, OSPF6_LSA_FLUSH);
ospf6_flood(NULL, lsa);
+
/* Flush the translated LSA */
if (ospf6_check_and_set_router_abr(ospf6)) {
type = htons(OSPF6_LSTYPE_AS_EXTERNAL);
ospf6_flood(NULL, type5);
}
}
- lsa = ospf6_lsdb_next(rt, lsa);
}
}
}
-static void ospf6_area_nssa_update(struct ospf6_area *area)
+void ospf6_area_nssa_update(struct ospf6_area *area)
{
if (IS_AREA_NSSA(area)) {
- if (!ospf6_check_and_set_router_abr(area->ospf6))
- OSPF6_OPT_CLEAR(area->options, OSPF6_OPT_E);
+ OSPF6_OPT_CLEAR(area->options, OSPF6_OPT_E);
area->ospf6->anyNSSA++;
OSPF6_OPT_SET(area->options, OSPF6_OPT_N);
area->NSSATranslatorRole = OSPF6_NSSA_ROLE_CANDIDATE;
if (IS_OSPF6_DEBUG_ORIGINATE(ROUTER))
zlog_debug("Normal area for if %s", area->name);
OSPF6_OPT_CLEAR(area->options, OSPF6_OPT_N);
- if (ospf6_check_and_set_router_abr(area->ospf6))
- OSPF6_OPT_SET(area->options, OSPF6_OPT_E);
+ OSPF6_OPT_SET(area->options, OSPF6_OPT_E);
area->ospf6->anyNSSA--;
area->NSSATranslatorState = OSPF6_NSSA_TRANSLATE_DISABLED;
}
if (IS_AREA_NSSA(area)) {
OSPF6_ROUTER_LSA_SCHEDULE(area);
+ /* Flush external LSAs. */
+ ospf6_asbr_remove_externals_from_area(area);
+
/* Check if router is ABR */
if (ospf6_check_and_set_router_abr(area->ospf6)) {
if (IS_OSPF6_DEBUG_NSSA)
if (IS_OSPF6_DEBUG_NSSA)
zlog_debug("Normal area %s", area->name);
ospf6_nssa_flush_area(area);
- ospf6_area_disable(area);
- ospf6_area_delete(area);
}
}
{
if (!IS_AREA_NSSA(area)) {
+ /* Disable stub first. */
+ ospf6_area_stub_unset(ospf6, area);
+
SET_FLAG(area->flag, OSPF6_AREA_NSSA);
if (IS_OSPF6_DEBUG_NSSA)
zlog_debug("area %s nssa set", area->name);
int ospf6_area_nssa_set(struct ospf6 *ospf6, struct ospf6_area *area);
extern void ospf6_nssa_lsa_flush(struct ospf6 *ospf6, struct prefix_ipv6 *p);
-extern struct ospf6_lsa *ospf6_translated_nssa_refresh(struct ospf6_area *,
- struct ospf6_lsa *,
- struct ospf6_lsa *);
-extern struct ospf6_lsa *ospf6_translated_nssa_originate(struct ospf6_area *,
- struct ospf6_lsa *);
+extern struct ospf6_lsa *ospf6_translated_nssa_refresh(struct ospf6_area *oa,
+ struct ospf6_lsa *type7,
+ struct ospf6_lsa *type5);
+extern struct ospf6_lsa *
+ospf6_translated_nssa_originate(struct ospf6_area *oa, struct ospf6_lsa *type7);
extern void ospf6_asbr_nssa_redist_task(struct ospf6 *ospf6);
extern void ospf6_schedule_abr_task(struct ospf6 *ospf6);
+extern void ospf6_area_nssa_update(struct ospf6_area *area);
void ospf6_asbr_prefix_readvertise(struct ospf6 *ospf6);
extern void ospf6_nssa_lsa_originate(struct ospf6_route *route,
struct ospf6_area *area);
if (route->type == OSPF6_DEST_TYPE_LINKSTATE)
ospf6_linkstate_prefix2str(&route->prefix, buf, sizeof(buf));
+ else if (route->type == OSPF6_DEST_TYPE_ROUTER)
+ inet_ntop(AF_INET, &ADV_ROUTER_IN_PREFIX(&route->prefix), buf,
+ sizeof(buf));
else
prefix2str(&route->prefix, buf, sizeof(buf));
if (route->type == OSPF6_DEST_TYPE_LINKSTATE)
ospf6_linkstate_prefix2str(&route->prefix, buf, sizeof(buf));
+ else if (route->type == OSPF6_DEST_TYPE_ROUTER)
+ inet_ntop(AF_INET, &ADV_ROUTER_IN_PREFIX(&route->prefix), buf,
+ sizeof(buf));
else
prefix2str(&route->prefix, buf, sizeof(buf));
ospf6_add_nexthop(route->nh_list, ifindex, addr)
extern struct ospf6_route *ospf6_route_create(struct ospf6 *ospf6);
-extern void ospf6_route_delete(struct ospf6_route *);
+extern void ospf6_route_delete(struct ospf6_route *route);
extern struct ospf6_route *ospf6_route_copy(struct ospf6_route *route);
extern int ospf6_route_cmp(struct ospf6_route *ra, struct ospf6_route *rb);
json_object *json, bool use_json);
-extern int ospf6_route_table_show(struct vty *, int, int, struct cmd_token **,
- struct ospf6_route_table *, bool use_json);
+extern int ospf6_route_table_show(struct vty *vty, int argc_start, int argc,
+ struct cmd_token **argv,
+ struct ospf6_route_table *table,
+ bool use_json);
extern int ospf6_linkstate_table_show(struct vty *vty, int idx_ipv4, int argc,
struct cmd_token **argv,
struct ospf6_route_table *table);
zlog_debug("%s : looking at area %s", __func__,
area->name);
- if (IS_OSPF6_DEBUG_SPF(PROCESS)) {
- type = htons(OSPF6_LSTYPE_TYPE_7);
- for (ALL_LSDB_TYPED(area->lsdb, type, lsa))
- ospf6_ase_calculate_route(ospf6, lsa,
- area);
- }
+ type = htons(OSPF6_LSTYPE_TYPE_7);
+ for (ALL_LSDB_TYPED(area->lsdb, type, lsa))
+ ospf6_ase_calculate_route(ospf6, lsa, area);
}
}
return 0;
vrf_init(ospf6_vrf_new, ospf6_vrf_enable, ospf6_vrf_disable,
ospf6_vrf_delete, ospf6_vrf_enable);
- vrf_cmd_init(NULL, &ospf6d_privs);
+ vrf_cmd_init(NULL);
}
static void ospf6_top_lsdb_hook_add(struct ospf6_lsa *lsa)
ospf6_distribute_config_write(vty, ospf6);
ospf6_asbr_summary_config_write(vty, ospf6);
config_write_ospf6_gr_helper(vty, ospf6);
+
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
return 0;
extern void ospf6_zebra_no_redistribute(int, vrf_id_t vrf_id);
#define ospf6_zebra_is_redistribute(type, vrf_id) \
vrf_bitmap_check(zclient->redist[AFI_IP6][type], vrf_id)
-extern void ospf6_zebra_init(struct thread_master *);
+extern void ospf6_zebra_init(struct thread_master *tm);
extern void ospf6_zebra_add_discard(struct ospf6_route *request,
struct ospf6 *ospf6);
extern void ospf6_zebra_delete_discard(struct ospf6_route *request,
struct ospf6 *ospf6);
-extern void ospf6_distance_reset(struct ospf6 *);
-extern uint8_t ospf6_distance_apply(struct prefix_ipv6 *, struct ospf6_route *,
- struct ospf6 *);
+extern void ospf6_distance_reset(struct ospf6 *ospf6);
+extern uint8_t ospf6_distance_apply(struct prefix_ipv6 *p,
+ struct ospf6_route * or,
+ struct ospf6 *ospf6);
-extern int ospf6_distance_set(struct vty *, struct ospf6 *, const char *,
- const char *, const char *);
-extern int ospf6_distance_unset(struct vty *, struct ospf6 *, const char *,
- const char *, const char *);
+extern int ospf6_distance_set(struct vty *vty, struct ospf6 *ospf6,
+ const char *distance_str, const char *ip_str,
+ const char *access_list_str);
+extern int ospf6_distance_unset(struct vty *vty, struct ospf6 *ospf6,
+ const char *distance_str, const char *ip_str,
+ const char *access_list_str);
extern int config_write_ospf6_debug_zebra(struct vty *vty);
extern void install_element_ospf6_debug_zebra(void);
bool all_vrf = false;
int idx_vrf = 0;
-
OSPF6_CMD_CHECK_RUNNING();
OSPF6_FIND_VRF_ARGS(argv, argc, idx_vrf, vrf_name, all_vrf);
if (idx_vrf > 0) {
for (ALL_LIST_ELEMENTS_RO(om6->ospf6, node, ospf6)) {
if (all_vrf || strcmp(ospf6->name, vrf_name) == 0) {
- ospf6_lsdb_type_show_wrapper(vty, level, NULL, &id,
- &adv_router, uj, ospf6);
-
+ ospf6_lsdb_show_wrapper(vty, level, NULL, &id,
+ &adv_router, uj, ospf6);
if (!all_vrf)
break;
}
clippy_scan += \
ospf6d/ospf6_top.c \
+ ospf6d/ospf6_area.c \
ospf6d/ospf6_asbr.c \
ospf6d/ospf6_lsa.c \
ospf6d/ospf6_gr_helper.c \
return 0;
}
+/* This function checks whether an LSA with initial sequence number should be
+ * originated after a wrap in sequence number
+ */
+void ospf_check_and_gen_init_seq_lsa(struct ospf_interface *oi,
+ struct ospf_lsa *recv_lsa)
+{
+ struct ospf_lsa *lsa = NULL;
+ struct ospf *ospf = oi->ospf;
+
+ lsa = ospf_lsa_lookup_by_header(oi->area, recv_lsa->data);
+
+ if ((lsa == NULL) || (!CHECK_FLAG(lsa->flags, OSPF_LSA_PREMATURE_AGE))
+ || (lsa->retransmit_counter != 0)) {
+ if (IS_DEBUG_OSPF(lsa, LSA))
+ zlog_debug(
+ "Do not generate LSA with initial seqence number.");
+ return;
+ }
+
+ ospf_lsa_maxage_delete(ospf, lsa);
+
+ lsa->data->ls_seqnum = lsa_seqnum_increment(lsa);
+
+ ospf_lsa_refresh(ospf, lsa);
+}
+
void ospf_lsa_maxage_delete(struct ospf *ospf, struct ospf_lsa *lsa)
{
struct route_node *rn;
#define LS_AGE(x) (OSPF_LSA_MAXAGE < get_age(x) ? OSPF_LSA_MAXAGE : get_age(x))
#define IS_LSA_SELF(L) (CHECK_FLAG ((L)->flags, OSPF_LSA_SELF))
#define IS_LSA_MAXAGE(L) (LS_AGE ((L)) == OSPF_LSA_MAXAGE)
+#define IS_LSA_MAX_SEQ(L) \
+ ((L)->data->ls_seqnum == htonl(OSPF_MAX_SEQUENCE_NUMBER))
#define OSPF_LSA_UPDATE_DELAY 2
extern struct ospf_lsa *ospf_translated_nssa_originate(struct ospf *ospf,
struct ospf_lsa *type7,
struct ospf_lsa *type5);
+extern void ospf_check_and_gen_init_seq_lsa(struct ospf_interface *oi,
+ struct ospf_lsa *lsa);
extern void ospf_flush_lsa_from_area(struct ospf *ospf, struct in_addr area_id,
int type);
#endif /* _ZEBRA_OSPF_LSA_H */
#endif /* SUPPORT_OSPF_API */
default:
frr_help_exit(1);
- break;
}
}
ospf_db_summary_add(nbr, lsa);
LSDB_LOOP (SUMMARY_LSDB(area), rn, lsa)
ospf_db_summary_add(nbr, lsa);
- LSDB_LOOP (ASBR_SUMMARY_LSDB(area), rn, lsa)
- ospf_db_summary_add(nbr, lsa);
/* Process only if the neighbor is opaque capable. */
if (CHECK_FLAG(nbr->options, OSPF_OPTION_O)) {
ospf_db_summary_add(nbr, lsa);
}
+ /* For Stub/NSSA area, we should not send Type-4 and Type-5 LSAs */
if (nbr->oi->type != OSPF_IFTYPE_VIRTUALLINK
- && area->external_routing == OSPF_AREA_DEFAULT)
+ && area->external_routing == OSPF_AREA_DEFAULT) {
+ LSDB_LOOP (ASBR_SUMMARY_LSDB(area), rn, lsa)
+ ospf_db_summary_add(nbr, lsa);
LSDB_LOOP (EXTERNAL_LSDB(nbr->oi->ospf), rn, lsa)
ospf_db_summary_add(nbr, lsa);
+ }
if (CHECK_FLAG(nbr->options, OSPF_OPTION_O)
&& (nbr->oi->type != OSPF_IFTYPE_VIRTUALLINK
return;
}
+ if (OSPF_GR_IS_ACTIVE_HELPER(nbr)) {
+ /* As per the GR Conformance Test Case 7.2. Section 3
+ * "Also, if X was the Designated Router on network segment S
+ * when the helping relationship began, Y maintains X as the
+ * Designated Router until the helping relationship is
+ * terminated."
+ * When I am helper for this neighbor, I should not trigger the
+ * ISM Events. Also Intentionally not setting the priority and
+ * other fields so that when the neighbor exits the Grace
+ * period, it can handle if there is any change before GR and
+ * after GR. */
+ if (IS_DEBUG_OSPF_GR)
+ zlog_debug(
+ "%s, Neighbor is under GR Restart, hence ignoring the ISM Events",
+ __PRETTY_FUNCTION__);
+
+ return;
+ }
+
/* If neighbor itself declares DR and no BDR exists,
cause event BackupSeen */
if (IPV4_ADDR_SAME(&nbr->address.u.prefix4, &hello->d_router))
if (current == NULL
|| (ret = ospf_lsa_more_recent(current, lsa)) < 0) {
/* CVE-2017-3224 */
- if (current && (lsa->data->ls_seqnum ==
- htonl(OSPF_MAX_SEQUENCE_NUMBER)
- && !IS_LSA_MAXAGE(lsa))) {
+ if (current && (IS_LSA_MAX_SEQ(current))
+ && (IS_LSA_MAX_SEQ(lsa)) && !IS_LSA_MAXAGE(lsa)) {
zlog_debug(
- "Link State Update[%s]: has Max Seq but not MaxAge. Dropping it",
+ "Link State Update[%s]: has Max Seq and higher checksum but not MaxAge. Dropping it",
dump_lsa_key(lsa));
DISCARD_LSA(lsa, 4);
lsr = ospf_ls_retransmit_lookup(nbr, lsa);
- if (lsr != NULL && ospf_lsa_more_recent(lsr, lsa) == 0)
+ if (lsr != NULL && ospf_lsa_more_recent(lsr, lsa) == 0) {
ospf_ls_retransmit_delete(nbr, lsr);
+ ospf_check_and_gen_init_seq_lsa(oi, lsa);
+ }
lsa->data = NULL;
ospf_lsa_discard(lsa);
ospf->ti_lfa_protection_type);
ospf_spf_cleanup(area->spf, area->spf_vertex_list);
+
+ area->spf = NULL;
+ area->spf_vertex_list = NULL;
}
void ospf_spf_calculate_areas(struct ospf *ospf, struct route_table *new_table,
* Remove Segment Routing Local Block.
*
*/
-static void sr_local_block_delete()
+static void sr_local_block_delete(void)
{
struct sr_local_block *srlb = &OspfSR.srlb;
json_vrf = json_object_new_object();
else
json_vrf = json;
- json_neighbor_sub = json_object_new_object();
}
ospf_show_vrf_name(ospf, vty, json_vrf, use_vrf);
if (nbr_nbma->nbr == NULL
|| nbr_nbma->nbr->state == NSM_Down) {
if (use_json) {
+ json_neighbor_sub =
+ json_object_new_object();
json_object_int_add(json_neighbor_sub,
"nbrNbmaPriority",
nbr_nbma->priority);
prefix2str(&rn->p, buf1, sizeof(buf1));
- json_route = json_object_new_object();
if (json) {
+ json_route = json_object_new_object();
json_object_object_add(json, buf1, json_route);
- json_object_to_json_string_ext(
- json, JSON_C_TO_STRING_NOSLASHESCAPE);
}
switch (or->path_type) {
}
}
}
- if (!json)
- json_object_free(json_route);
}
if (!json)
vty_out(vty, "\n");
continue;
int flag = 0;
- json_route = json_object_new_object();
if (json) {
+ json_route = json_object_new_object();
json_object_object_add(
json, inet_ntop(AF_INET, &rn->p.u.prefix4,
buf, sizeof(buf)),
}
}
}
- if (!json)
- json_object_free(json_route);
}
if (!json)
vty_out(vty, "\n");
char buf1[19];
snprintfrr(buf1, sizeof(buf1), "%pFX", &rn->p);
- json_route = json_object_new_object();
if (json) {
+ json_route = json_object_new_object();
json_object_object_add(json, buf1, json_route);
- json_object_to_json_string_ext(
- json, JSON_C_TO_STRING_NOSLASHESCAPE);
}
switch (er->path_type) {
}
}
}
- if (!json)
- json_object_free(json_route);
}
if (!json)
vty_out(vty, "\n");
if (uj) {
/* Keep Non-pretty format */
vty_out(vty, "%s\n",
- json_object_to_json_string(json));
+ json_object_to_json_string_ext(
+ json,
+ JSON_C_TO_STRING_NOSLASHESCAPE));
json_object_free(json);
} else if (!ospf_output)
vty_out(vty, "%% OSPF instance not found\n");
if (uj) {
vty_out(vty, "%s\n",
json_object_to_json_string_ext(
- json, JSON_C_TO_STRING_PRETTY));
+ json,
+ JSON_C_TO_STRING_PRETTY
+ | JSON_C_TO_STRING_NOSLASHESCAPE));
json_object_free(json);
} else
vty_out(vty, "%% OSPF instance not found\n");
if (uj) {
vty_out(vty, "%s\n",
json_object_to_json_string_ext(
- json, JSON_C_TO_STRING_PRETTY));
+ json,
+ JSON_C_TO_STRING_PRETTY
+ | JSON_C_TO_STRING_NOSLASHESCAPE));
json_object_free(json);
} else
vty_out(vty, "%% OSPF instance not found\n");
ret = show_ip_ospf_route_common(vty, ospf, json, use_vrf);
/* Keep Non-pretty format */
if (uj)
- vty_out(vty, "%s\n", json_object_to_json_string(json));
+ vty_out(vty, "%s\n",
+ json_object_to_json_string_ext(
+ json, JSON_C_TO_STRING_NOSLASHESCAPE));
}
if (uj)
/* Router Dead Interval print. */
if (OSPF_IF_PARAM_CONFIGURED(params, v_wait)
- && params->is_v_wait_set
- && params->v_wait
- != OSPF_ROUTER_DEAD_INTERVAL_DEFAULT) {
+ && params->is_v_wait_set) {
vty_out(vty, " ip ospf dead-interval ");
/* fast hello ? */
ospf_opaque_config_write_if(vty, ifp);
- vty_endframe(vty, NULL);
+ vty_endframe(vty, "exit\n!\n");
}
return write;
/* LDP-Sync print */
ospf_ldp_sync_write_config(vty, ospf);
+ vty_out(vty, "exit\n");
+
write++;
return write;
}
install_element(OSPF_NODE, &ospf_max_multipath_cmd);
install_element(OSPF_NODE, &no_ospf_max_multipath_cmd);
- vrf_cmd_init(NULL, &ospfd_privs);
+ vrf_cmd_init(NULL);
/* Init interface related vty commands. */
ospf_vty_if_init();
DEFINE_MTYPE_STATIC(OSPFD, OSPF_EXTERNAL, "OSPF External route table");
DEFINE_MTYPE_STATIC(OSPFD, OSPF_REDISTRIBUTE, "OSPF Redistriute");
-DEFINE_MTYPE_STATIC(OSPFD, OSPF_DIST_ARGS, "OSPF Distribute arguments");
/* Zebra structure to hold current status. */
struct external_info *ei;
struct route_table *rt;
struct ospf_lsa *lsa;
- int type, default_refresh = 0, arg_type;
- struct ospf *ospf = NULL;
- void **arg = THREAD_ARG(thread);
-
- ospf = (struct ospf *)arg[0];
- arg_type = (int)(intptr_t)arg[1];
+ int type, default_refresh = 0;
+ struct ospf *ospf = THREAD_ARG(thread);
if (ospf == NULL)
return 0;
zlog_info("Zebra[Redistribute]: distribute-list update timer fired!");
if (IS_DEBUG_OSPF_EVENT) {
- zlog_debug(
- "%s: ospf distribute-list update arg_type %d vrf %s id %d",
- __func__, arg_type, ospf_vrf_id_to_name(ospf->vrf_id),
- ospf->vrf_id);
+ zlog_debug("%s: ospf distribute-list update vrf %s id %d",
+ __func__, ospf_vrf_id_to_name(ospf->vrf_id),
+ ospf->vrf_id);
}
/* foreach all external info. */
if (default_refresh)
ospf_external_lsa_refresh_default(ospf);
- XFREE(MTYPE_OSPF_DIST_ARGS, arg);
return 0;
}
unsigned short instance)
{
struct ospf_external *ext;
- void **args = XCALLOC(MTYPE_OSPF_DIST_ARGS, sizeof(void *) * 2);
-
- args[0] = ospf;
- args[1] = (void *)((ptrdiff_t)type);
/* External info does not exist. */
ext = ospf_external_lookup(ospf, type, instance);
- if (!ext || !EXTERNAL_INFO(ext)) {
- XFREE(MTYPE_OSPF_DIST_ARGS, args);
+ if (!ext || !EXTERNAL_INFO(ext))
return;
- }
- /* If exists previously invoked thread, then let it continue. */
- if (ospf->t_distribute_update) {
- XFREE(MTYPE_OSPF_DIST_ARGS, args);
- return;
- }
-
- /* Set timer. */
- ospf->t_distribute_update = NULL;
- thread_add_timer_msec(master, ospf_distribute_list_update_timer, args,
+ /* Set timer. If timer is already started, this call does nothing. */
+ thread_add_timer_msec(master, ospf_distribute_list_update_timer, ospf,
ospf->min_ls_interval,
&ospf->t_distribute_update);
}
ospf_lsdb_delete_all(ospf->lsdb);
}
+ /* Since the LSAs are deleted, need reset the aggr flag */
+ ospf_unset_all_aggr_flag(ospf);
+
/* Delete the LSDB */
for (ALL_LIST_ELEMENTS(ospf->areas, node, nnode, area))
ospf_area_lsdb_discard_delete(area);
static int config_write_segment_routing(struct vty *vty);
-static int config_write_traffic_eng(struct vty *vty);
-static int config_write_segment_lists(struct vty *vty);
-static int config_write_sr_policies(struct vty *vty);
static int segment_list_has_src_dst(
struct vty *vty, char *xpath, long index, const char *index_str,
struct in_addr adj_src_ipv4, struct in_addr adj_dst_ipv4,
DEFINE_MTYPE_STATIC(PATHD, PATH_CLI, "Client");
+DEFINE_HOOK(pathd_srte_config_write, (struct vty *vty), (vty));
+
/* Vty node structures. */
static struct cmd_node segment_routing_node = {
.name = "segment-routing",
.node = SR_TRAFFIC_ENG_NODE,
.parent_node = SEGMENT_ROUTING_NODE,
.prompt = "%s(config-sr-te)# ",
- .config_write = config_write_traffic_eng,
};
static struct cmd_node srte_segment_list_node = {
.node = SR_SEGMENT_LIST_NODE,
.parent_node = SR_TRAFFIC_ENG_NODE,
.prompt = "%s(config-sr-te-segment-list)# ",
- .config_write = config_write_segment_lists,
};
static struct cmd_node srte_policy_node = {
.node = SR_POLICY_NODE,
.parent_node = SR_TRAFFIC_ENG_NODE,
.prompt = "%s(config-sr-te-policy)# ",
- .config_write = config_write_sr_policies,
};
static struct cmd_node srte_candidate_dyn_node = {
yang_dnode_get_string(dnode, "./name"));
}
+void cli_show_srte_segment_list_end(struct vty *vty, struct lyd_node *dnode)
+{
+ vty_out(vty, " exit\n");
+}
+
static int segment_list_has_src_dst(
struct vty *vty, char *xpath, long index, const char *index_str,
struct in_addr adj_src_ipv4, struct in_addr adj_dst_ipv4,
nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY,
"ipv6_adjacency");
node_src_id = adj_src_ipv6_str;
+ } else {
+ /*
+ * This is just to make the compiler happy about
+ * node_src_id not being initialized. This
+ * should never happen unless we change the cli
+ * function.
+ */
+ assert(!"We must have a adj_src_ipv4_str or a adj_src_ipv6_str");
}
+
/* addresses */
snprintf(xpath, XPATH_MAXLEN, "./segment[index='%s']/nai/local-address",
index_str);
yang_dnode_get_string(dnode, "./endpoint"));
}
+void cli_show_srte_policy_end(struct vty *vty, struct lyd_node *dnode)
+{
+ vty_out(vty, " exit\n");
+}
+
/*
* XPath: /frr-pathd:pathd/srte/policy/name
*/
}
}
+void cli_show_srte_policy_candidate_path_end(struct vty *vty,
+ struct lyd_node *dnode)
+{
+ const char *type = yang_dnode_get_string(dnode, "./type");
+
+ if (strmatch(type, "dynamic"))
+ vty_out(vty, " exit\n");
+}
+
static int config_write_dnode(const struct lyd_node *dnode, void *arg)
{
struct vty *vty = arg;
int config_write_segment_routing(struct vty *vty)
{
vty_out(vty, "segment-routing\n");
- return 1;
-}
-
-int config_write_traffic_eng(struct vty *vty)
-{
vty_out(vty, " traffic-eng\n");
+
path_ted_config_write(vty);
- return 1;
-}
-int config_write_segment_lists(struct vty *vty)
-{
yang_dnode_iterate(config_write_dnode, vty, running_config->dnode,
"/frr-pathd:pathd/srte/segment-list");
-
- return 1;
-}
-
-int config_write_sr_policies(struct vty *vty)
-{
yang_dnode_iterate(config_write_dnode, vty, running_config->dnode,
"/frr-pathd:pathd/srte/policy");
+ hook_call(pathd_srte_config_write, vty);
+
+ vty_out(vty, " exit\n");
+ vty_out(vty, "exit\n");
+
return 1;
}
break;
default:
frr_help_exit(1);
- break;
}
}
.cbs = {
.create = pathd_srte_segment_list_create,
.cli_show = cli_show_srte_segment_list,
+ .cli_show_end = cli_show_srte_segment_list_end,
.destroy = pathd_srte_segment_list_destroy,
.get_next = pathd_srte_segment_list_get_next,
.get_keys = pathd_srte_segment_list_get_keys,
.cbs = {
.create = pathd_srte_policy_create,
.cli_show = cli_show_srte_policy,
+ .cli_show_end = cli_show_srte_policy_end,
.destroy = pathd_srte_policy_destroy,
.get_next = pathd_srte_policy_get_next,
.get_keys = pathd_srte_policy_get_keys,
.cbs = {
.create = pathd_srte_policy_candidate_path_create,
.cli_show = cli_show_srte_policy_candidate_path,
+ .cli_show_end = cli_show_srte_policy_candidate_path_end,
.destroy = pathd_srte_policy_candidate_path_destroy,
.get_next = pathd_srte_policy_candidate_path_get_next,
.get_keys = pathd_srte_policy_candidate_path_get_keys,
/* Optional 'cli_show' callbacks. */
void cli_show_srte_segment_list(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
+void cli_show_srte_segment_list_end(struct vty *vty, struct lyd_node *dnode);
void cli_show_srte_segment_list_segment(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
void cli_show_srte_policy(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
+void cli_show_srte_policy_end(struct vty *vty, struct lyd_node *dnode);
void cli_show_srte_policy_name(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
void cli_show_srte_policy_binding_sid(struct vty *vty, struct lyd_node *dnode,
void cli_show_srte_policy_candidate_path(struct vty *vty,
struct lyd_node *dnode,
bool show_defaults);
+void cli_show_srte_policy_candidate_path_end(struct vty *vty,
+ struct lyd_node *dnode);
/* Utility functions */
typedef void (*of_pref_cp_t)(enum objfun_type type, void *arg);
/* Internal Util Function declarations */
static struct pce_opts_cli *pcep_cli_find_pce(const char *pce_name);
static bool pcep_cli_add_pce(struct pce_opts_cli *pce_opts_cli);
-static struct pce_opts_cli *pcep_cli_create_pce_opts();
+static struct pce_opts_cli *pcep_cli_create_pce_opts(const char *name);
static void pcep_cli_delete_pce(const char *pce_name);
static void
pcep_cli_merge_pcep_pce_config_options(struct pce_opts_cli *pce_opts_cli);
.name = "srte pcep",
.node = PCEP_NODE,
.parent_node = SR_TRAFFIC_ENG_NODE,
- .config_write = pcep_cli_pcep_config_write,
.prompt = "%s(config-sr-te-pcep)# "
};
.name = "srte pcep pcc",
.node = PCEP_PCC_NODE,
.parent_node = PCEP_NODE,
- .config_write = pcep_cli_pcc_config_write,
.prompt = "%s(config-sr-te-pcep-pcc)# "
};
.name = "srte pcep pce",
.node = PCEP_PCE_NODE,
.parent_node = PCEP_NODE,
- .config_write = pcep_cli_pce_config_write,
.prompt = "%s(config-sr-te-pcep-pce)# "
};
.name = "srte pcep pce-config",
.node = PCEP_PCE_CONFIG_NODE,
.parent_node = PCEP_NODE,
- .config_write = pcep_cli_pcep_pce_config_write,
.prompt = "%s(pce-sr-te-pcep-pce-config)# "
};
int pcep_cli_pcep_config_write(struct vty *vty)
{
vty_out(vty, " pcep\n");
+ pcep_cli_pcep_pce_config_write(vty);
+ pcep_cli_pce_config_write(vty);
+ pcep_cli_pcc_config_write(vty);
+ vty_out(vty, " exit\n");
return 1;
}
}
if (pce_connections_g.num_connections == 0) {
- return lines;
+ goto exit;
}
buf[0] = 0;
lines++;
buf[0] = 0;
}
+exit:
+ vty_out(vty, " exit\n");
return lines;
}
vty_out(vty, "%s", buf);
buf[0] = '\0';
+
+ vty_out(vty, " exit\n");
}
return lines;
pcep_cli_print_pce_config(group_opts, buf, sizeof(buf));
vty_out(vty, "%s", buf);
buf[0] = 0;
+
+ vty_out(vty, " exit\n");
}
return lines;
void pcep_cli_init(void)
{
+ hook_register(pathd_srte_config_write, pcep_cli_pcep_config_write);
hook_register(nb_client_debug_config_write,
pcep_cli_debug_config_write);
hook_register(nb_client_debug_set_all, pcep_cli_debug_set_all);
DECLARE_MGROUP(PATHD);
+DECLARE_HOOK(pathd_srte_config_write, (struct vty *vty), (vty));
+
enum srte_protocol_origin {
SRTE_ORIGIN_UNDEFINED = 0,
SRTE_ORIGIN_PCEP = 1,
break;
default:
frr_help_exit(1);
- break;
}
}
pbr_map_write_interfaces(vty, ifp);
- vty_endframe(vty, "!\n");
+ vty_endframe(vty, "exit\n!\n");
}
}
pbrms_nexthop_group_write_individual_nexthop(vty, pbrms);
}
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
return 1;
}
{
cmd_variable_handler_register(pbr_map_name);
- vrf_cmd_init(NULL, &pbr_privs);
+ vrf_cmd_init(NULL);
if_cmd_init(pbr_interface_config_write);
[PCEP_OBJ_TLV_TYPE_OBJECTIVE_FUNCTION_LIST] = pcep_decode_tlv_of_list,
};
-static void initialize_tlv_coders()
+static void initialize_tlv_coders(void)
{
static bool initialized = false;
void send_pcep_open(pcep_session *session); /* forward decl */
-static bool run_session_logic_common()
+static bool run_session_logic_common(void)
{
if (session_logic_handle_ != NULL) {
pcep_log(LOG_WARNING,
}
/* Internal util function */
-static int get_next_session_id()
+static int get_next_session_id(void)
{
if (session_id_ == INT_MAX) {
session_id_ = 0;
}
/* internal util method */
-static pcep_timers_context *create_timers_context_()
+static pcep_timers_context *create_timers_context_(void)
{
if (timers_context_ == NULL) {
timers_context_ = pceplib_malloc(PCEPLIB_INFRA,
struct igmp_group *group;
int i;
+ if (num_sources == 0) {
+ /*
+ RFC 3376: 3.1. Socket-State
+ If the requested filter mode is INCLUDE *and* the requested
+ source list is empty, then the entry corresponding to the
+ requested interface and multicast address is deleted if
+ present. If no such entry is present, the request is ignored.
+ So, deleting the group present.
+ */
+ group = find_group_by_addr(igmp, group_addr);
+ if (!group) {
+ return;
+ }
+ if (group->group_filtermode_isexcl) {
+ if (listcount(group->group_source_list) == 1) {
+ struct in_addr star = {.s_addr = INADDR_ANY};
+
+ source = igmp_find_source_by_addr(group, star);
+ if (source)
+ igmp_source_reset_gmi(igmp, group,
+ source);
+ }
+ } else {
+ igmp_group_delete(group);
+ }
+
+ return;
+ }
+
/* non-existant group is created as INCLUDE {empty} */
group = igmp_add_group_by_addr(igmp, group_addr);
if (!group) {
igmp_source_reset_gmi(igmp, group, source);
} /* scan received sources */
-
- if ((num_sources == 0) && (group->group_filtermode_isexcl)
- && (listcount(group->group_source_list) == 1)) {
- struct in_addr star = {.s_addr = INADDR_ANY};
-
- source = igmp_find_source_by_addr(group, star);
- if (source)
- igmp_source_reset_gmi(igmp, group, source);
- }
}
void igmpv3_report_isin(struct igmp_sock *igmp, struct in_addr from,
pim_global_config_write_worker(pim, vty);
if (vrf->vrf_id != VRF_DEFAULT)
- vty_endframe(vty, " exit-vrf\n!\n");
+ vty_endframe(vty, "exit-vrf\n!\n");
}
return 0;
vrf_init(pim_vrf_new, pim_vrf_enable, pim_vrf_disable,
pim_vrf_delete, NULL);
- vrf_cmd_init(pim_vrf_config_write, &pimd_privs);
+ vrf_cmd_init(pim_vrf_config_write);
}
void pim_vrf_terminate(void)
break;
default:
frr_help_exit(1);
- break;
}
}
case IGMPMSG_WRONGVIF:
return pim_mroute_msg_wrongvif(pim->mroute_socket, ifp,
msg);
- break;
case IGMPMSG_NOCACHE:
return pim_mroute_msg_nocache(pim->mroute_socket, ifp,
msg);
- break;
case IGMPMSG_WHOLEPKT:
return pim_mroute_msg_wholepkt(pim->mroute_socket, ifp,
(const char *)msg);
- break;
case IGMPMSG_WRVIFWHOLE:
return pim_mroute_msg_wrvifwhole(
pim->mroute_socket, ifp, (const char *)msg);
- break;
default:
break;
}
struct listnode *sock_node;
struct igmp_sock *igmp;
+ if (pim_ifp->igmp_query_max_response_time_dsec
+ == query_max_response_time_dsec)
+ return;
+
pim_ifp->igmp_query_max_response_time_dsec =
query_max_response_time_dsec;
}
return -1;
}
- return -1;
}
static void pim_sock_read_on(struct interface *ifp);
{
struct ip *ip = (struct ip *)buf;
- while (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) < 0) {
+ if (sendto(fd, buf, len, MSG_DONTWAIT, dst, salen) < 0) {
char dst_str[INET_ADDRSTRLEN];
switch (errno) {
default:
return 0;
}
-
- return 0;
}
int pim_rpf_addr_is_inaddr_any(struct pim_rpf *rpf)
default:
return 0;
}
-
- return 0;
}
int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2)
pim_bfd_write_config(vty, ifp);
++writes;
}
- vty_endframe(vty, "!\n");
+ vty_endframe(vty, "exit\n!\n");
++writes;
}
}
break;
default:
frr_help_exit(1);
- break;
}
}
struct rip_instance_head rip_instances = RB_INITIALIZER(&rip_instances);
-/* Utility function to set boradcast option to the socket. */
+/* Utility function to set broadcast option to the socket. */
static int sockopt_broadcast(int sock)
{
int ret;
}
/* Once the entry has been validated, update the metric by
- adding the cost of the network on wich the message
+ adding the cost of the network on which the message
arrived. If the result is greater than infinity, use infinity
(RFC2453 Sec. 3.9.2) */
/* Zebra ripd can handle offset-list in. */
/* Interface routemap configuration */
config_write_if_rmap(vty, rip->if_rmap_ctx);
+ vty_out(vty, "exit\n");
+
write = 1;
}
vrf_init(rip_vrf_new, rip_vrf_enable, rip_vrf_disable, rip_vrf_delete,
rip_vrf_enable);
- vrf_cmd_init(NULL, &ripd_privs);
+ vrf_cmd_init(NULL);
}
void rip_vrf_terminate(void)
break;
default:
frr_help_exit(1);
- break;
}
}
config_write_distribute(vty, ripng->distribute_ctx);
config_write_if_rmap(vty, ripng->if_rmap_ctx);
+ vty_out(vty, "exit\n");
+
write = 1;
}
vrf_init(ripng_vrf_new, ripng_vrf_enable, ripng_vrf_disable,
ripng_vrf_delete, ripng_vrf_enable);
- vrf_cmd_init(NULL, &ripngd_privs);
+ vrf_cmd_init(NULL);
}
void ripng_vrf_terminate(void)
break;
default:
frr_help_exit(1);
- break;
}
}
break;
default:
frr_help_exit(1);
- break;
}
}
SAFI_UNICAST, "ipv6 route");
if (vrf->vrf_id != VRF_DEFAULT)
- vty_endframe(vty, " exit-vrf\n!\n");
+ vty_endframe(vty, "exit-vrf\n!\n");
}
return 0;
vrf_init(static_vrf_new, static_vrf_enable,
static_vrf_disable, static_vrf_delete, NULL);
- vrf_cmd_init(static_vrf_config_write, &static_privs);
+ vrf_cmd_init(static_vrf_config_write);
}
void static_vrf_terminate(void)
printf("%s: %s\n", t->name, t->desc);
- ecom = ecommunity_parse((uint8_t *)t->data, t->len);
+ ecom = ecommunity_parse((uint8_t *)t->data, t->len, 0);
printf("ecom: %s\nvalidating...:\n", ecommunity_str(ecom));
struct lspdb_head _lspdb, *lspdb = &_lspdb;
lsp_db_init(&_lspdb);
- struct isis_lsp *lsp1 = lsp_new(area, lsp_id1, 6000, 0, 0, 0, NULL,
- ISIS_LEVEL2);
+ struct isis_lsp *lsp1 =
+ lsp_new(area, lsp_id1, 6000, 1, 0, 0, NULL, ISIS_LEVEL2);
- lsp_insert(lspdb, lsp1);
+ lspdb_add(lspdb, lsp1);
- struct isis_lsp *lsp2 = lsp_new(area, lsp_id2, 6000, 0, 0, 0, NULL,
- ISIS_LEVEL2);
+ struct isis_lsp *lsp2 =
+ lsp_new(area, lsp_id2, 6000, 1, 0, 0, NULL, ISIS_LEVEL2);
- lsp_insert(lspdb, lsp2);
+ lspdb_add(lspdb, lsp2);
struct list *list = list_new();
DUMMY_DEFUN(cmd13, "alt a X:X::X:X");
DUMMY_DEFUN(cmd14,
"pat g { foo A.B.C.D$foo|foo|bar X:X::X:X$bar| baz } [final]");
+DUMMY_DEFUN(cmd15, "no pat g ![ WORD ]");
+DUMMY_DEFUN(cmd16, "[no] pat h {foo ![A.B.C.D$foo]|bar X:X::X:X$bar} final");
#include "tests/lib/cli/test_cli_clippy.c"
install_element(ENABLE_NODE, &cmd13_cmd);
}
install_element(ENABLE_NODE, &cmd14_cmd);
+ install_element(ENABLE_NODE, &cmd15_cmd);
+ install_element(ENABLE_NODE, &cmd16_cmd);
install_element(ENABLE_NODE, &magic_test_cmd);
}
pat f foo
pat f key
+no pat g
+no pat g test
+no pat g test more
+
+pat h foo ?1.2.3.4 final
+no pat h foo ?1.2.3.4 final
+pat h foo final
+no pat h foo final
+pat h bar final
+no pat h bar final
+pat h bar 1::2 final
+no pat h bar 1::2 final
+pat h bar 1::2 foo final
+no pat h bar 1::2 foo final
+pat h bar 1::2 foo 1.2.3.4 final
+no pat h bar 1::2 foo 1.2.3.4 final
+
alt a a?b
alt a 1 .2?.3.4
alt a 1 :2? ::?3
% Command incomplete.\r
test# pat \r
a b c d e f \r
-g \r
+g h \r
test# pat \r
% Command incomplete.\r
test# \r
[01] f@(null): f\r
[02] key@(null): key\r
test# \r
+test# no pat g\r
+cmd15 with 3 args.\r
+[00] no@(null): no\r
+[01] pat@(null): pat\r
+[02] g@(null): g\r
+test# no pat g test\r
+cmd15 with 4 args.\r
+[00] no@(null): no\r
+[01] pat@(null): pat\r
+[02] g@(null): g\r
+[03] WORD@g: test\r
+test# no pat g test more\r
+% [NONE] Unknown command: no pat g test more\r
+test# \r
+test# pat h foo \r
+ A.B.C.D 04\r
+test# pat h foo 1.2.3.4 final\r
+cmd16 with 5 args.\r
+[00] pat@(null): pat\r
+[01] h@(null): h\r
+[02] foo@(null): foo\r
+[03] A.B.C.D@foo: 1.2.3.4\r
+[04] final@(null): final\r
+test# no pat h foo \r
+ A.B.C.D 04\r
+ bar 05\r
+ final 07\r
+test# no pat h foo 1.2.3.4 final\r
+cmd16 with 6 args.\r
+[00] no@no: no\r
+[01] pat@(null): pat\r
+[02] h@(null): h\r
+[03] foo@(null): foo\r
+[04] A.B.C.D@foo: 1.2.3.4\r
+[05] final@(null): final\r
+test# pat h foo final\r
+% [NONE] Unknown command: pat h foo final\r
+test# no pat h foo final\r
+cmd16 with 5 args.\r
+[00] no@no: no\r
+[01] pat@(null): pat\r
+[02] h@(null): h\r
+[03] foo@(null): foo\r
+[04] final@(null): final\r
+test# pat h bar final\r
+% [NONE] Unknown command: pat h bar final\r
+test# no pat h bar final\r
+% [NONE] Unknown command: no pat h bar final\r
+test# pat h bar 1::2 final\r
+cmd16 with 5 args.\r
+[00] pat@(null): pat\r
+[01] h@(null): h\r
+[02] bar@(null): bar\r
+[03] X:X::X:X@bar: 1::2\r
+[04] final@(null): final\r
+test# no pat h bar 1::2 final\r
+cmd16 with 6 args.\r
+[00] no@no: no\r
+[01] pat@(null): pat\r
+[02] h@(null): h\r
+[03] bar@(null): bar\r
+[04] X:X::X:X@bar: 1::2\r
+[05] final@(null): final\r
+test# pat h bar 1::2 foo final\r
+% [NONE] Unknown command: pat h bar 1::2 foo final\r
+test# no pat h bar 1::2 foo final\r
+cmd16 with 7 args.\r
+[00] no@no: no\r
+[01] pat@(null): pat\r
+[02] h@(null): h\r
+[03] bar@(null): bar\r
+[04] X:X::X:X@bar: 1::2\r
+[05] foo@(null): foo\r
+[06] final@(null): final\r
+test# pat h bar 1::2 foo 1.2.3.4 final\r
+cmd16 with 7 args.\r
+[00] pat@(null): pat\r
+[01] h@(null): h\r
+[02] bar@(null): bar\r
+[03] X:X::X:X@bar: 1::2\r
+[04] foo@(null): foo\r
+[05] A.B.C.D@foo: 1.2.3.4\r
+[06] final@(null): final\r
+test# no pat h bar 1::2 foo 1.2.3.4 final\r
+cmd16 with 8 args.\r
+[00] no@no: no\r
+[01] pat@(null): pat\r
+[02] h@(null): h\r
+[03] bar@(null): bar\r
+[04] X:X::X:X@bar: 1::2\r
+[05] foo@(null): foo\r
+[06] A.B.C.D@foo: 1.2.3.4\r
+[07] final@(null): final\r
+test# \r
test# alt a \r
test# alt a a\r
WORD 02\r
!\r
!\r
!\r
-line vty\r
!\r
end\r
test# conf t\r
!\r
!\r
!\r
-line vty\r
!\r
end\r
foohost(config)#
nexthop_free(nh2);
/* Blackhole */
- nh1 = nexthop_from_blackhole(BLACKHOLE_REJECT);
- nh2 = nexthop_from_blackhole(BLACKHOLE_REJECT);
+ nh1 = nexthop_from_blackhole(BLACKHOLE_REJECT, 0);
+ nh2 = nexthop_from_blackhole(BLACKHOLE_REJECT, 0);
ret = nexthop_cmp_basic(nh1, nh2);
assert(ret == 0);
nexthop_free(nh2);
- nh2 = nexthop_from_blackhole(BLACKHOLE_NULL);
+ nh2 = nexthop_from_blackhole(BLACKHOLE_NULL, 0);
ret = nexthop_cmp_basic(nh1, nh2);
assert(ret != 0);
import glob
from time import sleep
-from mininet.topo import Topo
-from mininet.net import Mininet
-from mininet.node import Node, OVSSwitch, Host
-from mininet.log import setLogLevel, info
-from mininet.cli import CLI
-from mininet.link import Intf
-
-from functools import partial
pytestmark = [
pytest.mark.babeld,
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
+from lib.topogen import Topogen, get_topogen
fatal_error = ""
#####################################################
-class NetworkTopo(Topo):
- "All Protocol Startup Test"
-
- def build(self, **_opts):
-
- # Setup Routers
- router = {}
- #
- # Setup Main Router
- router[1] = topotest.addRouter(self, "r1")
- #
-
- # Setup Switches
- switch = {}
- #
- for i in range(0, 10):
- switch[i] = self.addSwitch("sw%s" % i, cls=topotest.LegacySwitch)
- self.addLink(switch[i], router[1], intfName2="r1-eth%s" % i)
+def build_topo(tgen):
+ router = tgen.add_router("r1")
+ for i in range(0, 10):
+ tgen.add_switch("sw%d" % i).add_link(router)
#####################################################
def setup_module(module):
- global topo, net
global fatal_error
print("\n\n** %s: Setup Topology" % module.__name__)
print("******************************************\n")
- print("Cleanup old Mininet runs")
- os.system("sudo mn -c > /dev/null 2>&1")
- os.system("sudo rm /tmp/r* > /dev/null 2>&1")
-
thisDir = os.path.dirname(os.path.realpath(__file__))
- topo = NetworkTopo()
+ tgen = Topogen(build_topo, module.__name__)
+ tgen.start_topology()
- net = Mininet(controller=None, topo=topo)
- net.start()
+ net = tgen.net
if net["r1"].get_routertype() != "frr":
fatal_error = "Test is only implemented for FRR"
net["r%s" % i].loadConf("nhrpd", "%s/r%s/nhrpd.conf" % (thisDir, i))
net["r%s" % i].loadConf("babeld", "%s/r%s/babeld.conf" % (thisDir, i))
net["r%s" % i].loadConf("pbrd", "%s/r%s/pbrd.conf" % (thisDir, i))
- net["r%s" % i].startRouter()
+ tgen.gears["r%s" % i].start()
# For debugging after starting FRR daemons, uncomment the next line
# CLI(net)
def teardown_module(module):
- global net
-
print("\n\n** %s: Shutdown Topology" % module.__name__)
print("******************************************\n")
-
- # End - Shutdown network
- net.stop()
+ tgen = get_topogen()
+ tgen.stop_topology()
def test_router_running():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_error_messages_vtysh():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_error_messages_daemons():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_converge_protocols():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def route_get_nhg_id(route_str):
+ net = get_topogen().net
output = net["r1"].cmd('vtysh -c "show ip route %s nexthop-group"' % route_str)
match = re.search(r"Nexthop Group ID: (\d+)", output)
assert match is not None, (
def verify_nexthop_group(nhg_id, recursive=False, ecmp=0):
+ net = get_topogen().net
# Verify NHG is valid/installed
output = net["r1"].cmd('vtysh -c "show nexthop-group rib %d"' % nhg_id)
def test_nexthop_groups():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_rip_status():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_ripng_status():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_ospfv2_interfaces():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
)
# Mask out Bandwidth portion. They may change..
actual = re.sub(r"BW [0-9]+ Mbit", "BW XX Mbit", actual)
- actual = re.sub(r"ifindex [0-9]", "ifindex X", actual)
+ actual = re.sub(r"ifindex [0-9]+", "ifindex X", actual)
# Drop time in next due
actual = re.sub(r"Hello due in [0-9\.]+s", "Hello due in XX.XXXs", actual)
def test_isis_interfaces():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_bgp_summary():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
# Read expected result from file
expected_original = open(refTableFile).read().rstrip()
- for arguments in ["", "remote-as internal", "remote-as external",
- "remote-as 100", "remote-as 123",
- "neighbor 192.168.7.10", "neighbor 192.168.7.10",
- "neighbor fc00:0:0:8::1000",
- "neighbor 10.0.0.1",
- "terse",
- "remote-as internal terse",
- "remote-as external terse",
- "remote-as 100 terse", "remote-as 123 terse",
- "neighbor 192.168.7.10 terse", "neighbor 192.168.7.10 terse",
- "neighbor fc00:0:0:8::1000 terse",
- "neighbor 10.0.0.1 terse"]:
+ for arguments in [
+ "",
+ "remote-as internal",
+ "remote-as external",
+ "remote-as 100",
+ "remote-as 123",
+ "neighbor 192.168.7.10",
+ "neighbor 192.168.7.10",
+ "neighbor fc00:0:0:8::1000",
+ "neighbor 10.0.0.1",
+ "terse",
+ "remote-as internal terse",
+ "remote-as external terse",
+ "remote-as 100 terse",
+ "remote-as 123 terse",
+ "neighbor 192.168.7.10 terse",
+ "neighbor 192.168.7.10 terse",
+ "neighbor fc00:0:0:8::1000 terse",
+ "neighbor 10.0.0.1 terse",
+ ]:
# Actual output from router
actual = (
net["r%s" % i]
- .cmd('vtysh -c "show ip bgp summary ' + arguments + '" 2> /dev/null')
+ .cmd(
+ 'vtysh -c "show ip bgp summary ' + arguments + '" 2> /dev/null'
+ )
.rstrip()
)
# Remove Unknown Summary (all of it)
actual = re.sub(r"Unknown Summary \(VRF default\):", "", actual)
actual = re.sub(r"No Unknown neighbor is configured", "", actual)
+ # Make Connect/Active/Idle the same (change them all to Active)
+ actual = re.sub(r" Connect ", " Active ", actual)
+ actual = re.sub(r" Idle ", " Active ", actual)
- actual = re.sub(r"IPv4 labeled-unicast Summary \(VRF default\):", "", actual)
+ actual = re.sub(
+ r"IPv4 labeled-unicast Summary \(VRF default\):", "", actual
+ )
actual = re.sub(
r"No IPv4 labeled-unicast neighbor is configured", "", actual
)
elif "remote-as 123" in arguments:
expected = re.sub(
r"(192.168.7.(1|2)0|fc00:0:0:8::(1|2)000).+Active.+",
- "", expected
+ "",
+ expected,
)
expected = re.sub(r"\nNeighbor.+Desc", "", expected)
expected = expected + "% No matching neighbor\n"
elif "192.168.7.10" in arguments:
expected = re.sub(
- r"(192.168.7.20|fc00:0:0:8::(1|2)000).+Active.+",
- "", expected
+ r"(192.168.7.20|fc00:0:0:8::(1|2)000).+Active.+", "", expected
)
elif "fc00:0:0:8::1000" in arguments:
expected = re.sub(
- r"(192.168.7.(1|2)0|fc00:0:0:8::2000).+Active.+",
- "", expected
+ r"(192.168.7.(1|2)0|fc00:0:0:8::2000).+Active.+", "", expected
)
elif "10.0.0.1" in arguments:
expected = "No such neighbor in VRF default"
# realign expected neighbor columns if needed
try:
- idx_actual = re.search(r"(Neighbor\s+V\s+)", actual).group(1).find("V")
- idx_expected = re.search(r"(Neighbor\s+V\s+)", expected).group(1).find("V")
+ idx_actual = (
+ re.search(r"(Neighbor\s+V\s+)", actual).group(1).find("V")
+ )
+ idx_expected = (
+ re.search(r"(Neighbor\s+V\s+)", expected).group(1).find("V")
+ )
idx_diff = idx_expected - idx_actual
if idx_diff > 0:
# Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
diff = topotest.get_textdiff(
actual,
expected,
- title1="actual SHOW IP BGP SUMMARY " + arguments.upper() ,
+ title1="actual SHOW IP BGP SUMMARY " + arguments.upper(),
title2="expected SHOW IP BGP SUMMARY " + arguments.upper(),
)
else:
print("r%s ok" % i)
- assert failures == 0, "SHOW IP BGP SUMMARY failed for router r%s:\n%s" % (
+ assert (
+ failures == 0
+ ), "SHOW IP BGP SUMMARY failed for router r%s:\n%s" % (
i,
diff,
)
def test_bgp_ipv6_summary():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
# Remove Unknown Summary (all of it)
actual = re.sub(r"Unknown Summary \(VRF default\):", "", actual)
actual = re.sub(r"No Unknown neighbor is configured", "", actual)
+ # Make Connect/Active/Idle the same (change them all to Active)
+ actual = re.sub(r" Connect ", " Active ", actual)
+ actual = re.sub(r" Idle ", " Active ", actual)
# Remove Labeled Unicast Summary (all of it)
- actual = re.sub(r"IPv6 labeled-unicast Summary \(VRF default\):", "", actual)
+ actual = re.sub(
+ r"IPv6 labeled-unicast Summary \(VRF default\):", "", actual
+ )
actual = re.sub(
r"No IPv6 labeled-unicast neighbor is configured", "", actual
)
def test_nht():
+ net = get_topogen().net
print("\n\n**** Test that nexthop tracking is at least nominally working ****\n")
thisDir = os.path.dirname(os.path.realpath(__file__))
expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
actual = net["r%s" % i].cmd('vtysh -c "show ip nht" 2> /dev/null').rstrip()
- actual = re.sub(r"fd [0-9][0-9]", "fd XX", actual)
+ actual = re.sub(r"fd [0-9]+", "fd XX", actual)
actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
diff = topotest.get_textdiff(
expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
actual = net["r%s" % i].cmd('vtysh -c "show ipv6 nht" 2> /dev/null').rstrip()
- actual = re.sub(r"fd [0-9][0-9]", "fd XX", actual)
+ actual = re.sub(r"fd [0-9]+", "fd XX", actual)
actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
diff = topotest.get_textdiff(
def test_bgp_ipv4():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_bgp_ipv6():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_route_map():
global fatal_error
- global net
+ net = get_topogen().net
if fatal_error != "":
pytest.skip(fatal_error)
def test_nexthop_groups_with_route_maps():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
net["r1"].cmd('vtysh -c "sharp remove routes %s 1"' % route_str)
net["r1"].cmd('vtysh -c "c t" -c "no ip protocol sharp route-map NH-SRC"')
net["r1"].cmd(
- 'vtysh -c "c t" -c "no route-map NH-SRC permit 111" -c "set src %s"' % src_str
+ 'vtysh -c "c t" -c "no route-map NH-SRC permit 111" # -c "set src %s"' % src_str
)
net["r1"].cmd('vtysh -c "c t" -c "no route-map NH-SRC"')
def test_nexthop_group_replace():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_mpls_interfaces():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_shutdown_check_stderr():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_shutdown_check_memleak():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
if __name__ == "__main__":
-
- setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# July 9 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; see the file COPYING; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+import argparse
+import glob
+import logging
+import os
+import re
+import subprocess
+import sys
+from collections import OrderedDict
+
+import xmltodict
+
+
+def get_summary(results):
+ ntest = int(results["@tests"])
+ nfail = int(results["@failures"])
+ nerror = int(results["@errors"])
+ nskip = int(results["@skipped"])
+ npass = ntest - nfail - nskip - nerror
+ return ntest, npass, nfail, nerror, nskip
+
+
+def print_summary(results, args):
+ ntest, npass, nfail, nerror, nskip = (0, 0, 0, 0, 0)
+ for group in results:
+ _ntest, _npass, _nfail, _nerror, _nskip = get_summary(results[group])
+ if args.verbose:
+ print(
+ f"Group: {group} Total: {_ntest} PASSED: {_npass}"
+ " FAIL: {_nfail} ERROR: {_nerror} SKIP: {_nskip}"
+ )
+ ntest += _ntest
+ npass += _npass
+ nfail += _nfail
+ nerror += _nerror
+ nskip += _nskip
+ print(f"Total: {ntest} PASSED: {npass} FAIL: {nfail} ERROR: {nerror} SKIP: {nskip}")
+
+
+def get_global_testcase(results):
+ for group in results:
+ for testcase in results[group]["testcase"]:
+ if "@file" not in testcase:
+ return testcase
+ return None
+
+
+def get_filtered(tfilters, results, args):
+ if isinstance(tfilters, str) or tfilters is None:
+ tfilters = [tfilters]
+ found_files = OrderedDict()
+ for group in results:
+ if isinstance(results[group]["testcase"], list):
+ tlist = results[group]["testcase"]
+ else:
+ tlist = [results[group]["testcase"]]
+ for testcase in tlist:
+ for tfilter in tfilters:
+ if tfilter is None:
+ if (
+ "failure" not in testcase
+ and "error" not in testcase
+ and "skipped" not in testcase
+ ):
+ break
+ elif tfilter in testcase:
+ break
+ else:
+ continue
+ # cname = testcase["@classname"]
+ fname = testcase.get("@file", "")
+ cname = testcase.get("@classname", "")
+ if not fname and not cname:
+ name = testcase.get("@name", "")
+ if not name:
+ continue
+ # If we had a failure at the module level we could be here.
+ fname = name.replace(".", "/") + ".py"
+ tcname = fname
+ else:
+ if not fname:
+ fname = cname.replace(".", "/") + ".py"
+ if args.files_only or "@name" not in testcase:
+ tcname = fname
+ else:
+ tcname = fname + "::" + testcase["@name"]
+ found_files[tcname] = testcase
+ return found_files
+
+
+def dump_testcase(testcase):
+ expand_keys = ("failure", "error", "skipped")
+
+ s = ""
+ for key, val in testcase.items():
+ if isinstance(val, str) or isinstance(val, float) or isinstance(val, int):
+ s += "{}: {}\n".format(key, val)
+ else:
+ for k2, v2 in val.items():
+ s += "{}: {}\n".format(k2, v2)
+ return s
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-A",
+ "--save",
+ action="store_true",
+ help="Save /tmp/topotests{,.xml} in --rundir if --rundir does not yet exist",
+ )
+ parser.add_argument(
+ "-F",
+ "--files-only",
+ action="store_true",
+ help="print test file names rather than individual full testcase names",
+ )
+ parser.add_argument(
+ "-S",
+ "--select",
+ default="fe",
+ help="select results combination of letters: 'e'rrored 'f'ailed 'p'assed 's'kipped.",
+ )
+ parser.add_argument(
+ "-r",
+ "--results",
+ help="xml results file or directory containing xml results file",
+ )
+ parser.add_argument("--rundir", help=argparse.SUPPRESS)
+ parser.add_argument(
+ "-E",
+ "--enumerate",
+ action="store_true",
+ help="enumerate each item (results scoped)",
+ )
+ parser.add_argument("-T", "--test", help="print testcase at enumeration")
+ parser.add_argument(
+ "--errmsg", action="store_true", help="print testcase error message"
+ )
+ parser.add_argument(
+ "--errtext", action="store_true", help="print testcase error text"
+ )
+ parser.add_argument("--time", action="store_true", help="print testcase run times")
+
+ parser.add_argument("-s", "--summary", action="store_true", help="print summary")
+ parser.add_argument("-v", "--verbose", action="store_true", help="be verbose")
+ args = parser.parse_args()
+
+ if args.save and args.results and not os.path.exists(args.results):
+ if not os.path.exists("/tmp/topotests"):
+ logging.critical('No "/tmp/topotests" directory to save')
+ sys.exit(1)
+ subprocess.run(["mv", "/tmp/topotests", args.results])
+ # # Old location for results
+ # if os.path.exists("/tmp/topotests.xml", args.results):
+ # subprocess.run(["mv", "/tmp/topotests.xml", args.results])
+
+ assert (
+ args.test is None or not args.files_only
+ ), "Can't have both --files and --test"
+
+ results = {}
+ ttfiles = []
+ if args.rundir:
+ basedir = os.path.realpath(args.rundir)
+ os.chdir(basedir)
+
+ newfiles = glob.glob("tt-group-*/topotests.xml")
+ if newfiles:
+ ttfiles.extend(newfiles)
+ if os.path.exists("topotests.xml"):
+ ttfiles.append("topotests.xml")
+ else:
+ if args.results:
+ if os.path.exists(os.path.join(args.results, "topotests.xml")):
+ args.results = os.path.join(args.results, "topotests.xml")
+ if not os.path.exists(args.results):
+ logging.critical("%s doesn't exist", args.results)
+ sys.exit(1)
+ ttfiles = [args.results]
+
+ if not ttfiles and os.path.exists("/tmp/topotests.xml"):
+ ttfiles.append("/tmp/topotests.xml")
+
+ for f in ttfiles:
+ m = re.match(r"tt-group-(\d+)/topotests.xml", f)
+ group = int(m.group(1)) if m else 0
+ with open(f) as xml_file:
+ results[group] = xmltodict.parse(xml_file.read())["testsuites"]["testsuite"]
+
+ filters = []
+ if "e" in args.select:
+ filters.append("error")
+ if "f" in args.select:
+ filters.append("failure")
+ if "s" in args.select:
+ filters.append("skipped")
+ if "p" in args.select:
+ filters.append(None)
+
+ found_files = get_filtered(filters, results, args)
+ if found_files:
+ if args.test is not None:
+ if args.test == "all":
+ keys = found_files.keys()
+ else:
+ keys = [list(found_files.keys())[int(args.test)]]
+ for key in keys:
+ testcase = found_files[key]
+ if args.errtext:
+ if "error" in testcase:
+ errmsg = testcase["error"]["#text"]
+ elif "failure" in testcase:
+ errmsg = testcase["failure"]["#text"]
+ else:
+ errmsg = "none found"
+ s = "{}: {}".format(key, errmsg)
+ elif args.time:
+ text = testcase["@time"]
+ s = "{}: {}".format(text, key)
+ elif args.errmsg:
+ if "error" in testcase:
+ errmsg = testcase["error"]["@message"]
+ elif "failure" in testcase:
+ errmsg = testcase["failure"]["@message"]
+ else:
+ errmsg = "none found"
+ s = "{}: {}".format(key, errmsg)
+ else:
+ s = dump_testcase(testcase)
+ print(s)
+ elif filters:
+ if args.enumerate:
+ print(
+ "\n".join(["{} {}".format(i, x) for i, x in enumerate(found_files)])
+ )
+ else:
+ print("\n".join(found_files))
+
+ if args.summary:
+ print_summary(results, args)
+
+
+if __name__ == "__main__":
+ main()
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
pytestmark = [pytest.mark.bgpd, pytest.mark.bfdd]
-class BFDTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Create 4 routers.
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BFDTopo, mod.__name__)
+ topodef = {
+ "s1": ("r1", "r2"),
+ "s2": ("r2", "r3"),
+ }
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import pytest
import json
-import re
from time import sleep
-from time import time
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
pytestmark = [pytest.mark.bfdd, pytest.mark.isisd]
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]:
- tgen.add_router(router)
-
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt2")
-
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt4")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt3")
-
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ topodef = {
+ "s1": ("rt1:eth-rt2", "rt2:eth-rt1"),
+ "s2": ("rt1:eth-rt3", "rt3:eth-rt1"),
+ "s3": ("rt2:eth-rt5", "rt5:eth-rt2"),
+ "s4": ("rt3:eth-rt4", "rt4:eth-rt3"),
+ "s5": ("rt4:eth-rt5", "rt5:eth-rt4"),
+ }
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import pytest
import json
-import re
from time import sleep
-from time import time
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
pytestmark = [pytest.mark.bfdd, pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]:
- tgen.add_router(router)
-
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt2")
-
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt4")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt3")
-
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ topodef = {
+ "s1": ("rt1:eth-rt2", "rt2:eth-rt1"),
+ "s2": ("rt1:eth-rt3", "rt3:eth-rt1"),
+ "s3": ("rt2:eth-rt5", "rt5:eth-rt2"),
+ "s4": ("rt3:eth-rt4", "rt4:eth-rt3"),
+ "s5": ("rt4:eth-rt5", "rt5:eth-rt4"),
+ }
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# For all registered routers, load the zebra configuration file
- for rname, router in router_list.iteritems():
+ for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.ospfd]
-class BFDProfTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Create 6 routers
- for routern in range(1, 7):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
-
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r4"])
- switch.add_link(tgen.gears["r5"])
-
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r6"])
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BFDProfTopo, mod.__name__)
+
+ topodef = {
+ "s1": ("r1", "r2"),
+ "s2": ("r2", "r3"),
+ "s3": ("r3", "r4"),
+ "s4": ("r4", "r5"),
+ "s5": ("r1", "r6"),
+ }
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
-class BFDTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BFDTopo, mod.__name__)
+ topodef = {
+ "s1": ("r1", "r2"),
+ "s2": ("r2", "r3"),
+ "s3": ("r2", "r4"),
+ }
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd, pytest.mark.ospfd]
-class BFDTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Create 4 routers.
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BFDTopo, mod.__name__)
+ topodef = {
+ "s1": ("r1", "r2"),
+ "s2": ("r2", "r3"),
+ "s3": ("r2", "r4"),
+ }
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
-class BFDTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BFDTopo, mod.__name__)
+ topodef = {
+ "s1": ("r1", "r2"),
+ "s2": ("r2", "r3"),
+ "s3": ("r3", "r4"),
+ }
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bfdd, pytest.mark.bgpd]
-class BFDTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BFDTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
logger.info("Testing with VRF Namespace support")
- cmds = [
- "if [ -e /var/run/netns/{0}-bfd-cust1 ] ; then ip netns del {0}-bfd-cust1 ; fi",
- "ip netns add {0}-bfd-cust1",
- "ip link set dev {0}-eth0 netns {0}-bfd-cust1 up",
- ]
- cmds2 = [
- "ip link set dev {0}-eth1 netns {0}-bfd-cust1",
- "ip netns exec {0}-bfd-cust1 ip link set {0}-eth1 up",
- "ip link set dev {0}-eth2 netns {0}-bfd-cust1 up",
- ]
-
for rname, router in router_list.items():
# create VRF rx-bfd-cust1 and link rx-eth0 to rx-bfd-cust1
- for cmd in cmds:
- output = tgen.net[rname].cmd(cmd.format(rname))
+ ns = "{}-bfd-cust1".format(rname)
+ router.net.add_netns(ns)
+ router.net.set_intf_netns(rname + "-eth0", ns, up=True)
if rname == "r2":
- for cmd in cmds2:
- output = tgen.net[rname].cmd(cmd.format(rname))
+ router.net.set_intf_netns(rname + "-eth1", ns, up=True)
+ router.net.set_intf_netns(rname + "-eth2", ns, up=True)
for rname, router in router_list.items():
router.load_config(
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
- # move back rx-eth0 to default VRF
- # delete rx-vrf
- cmds = [
- "ip netns exec {0}-bfd-cust1 ip link set {0}-eth0 netns 1",
- "ip netns delete {0}-bfd-cust1",
- ]
- cmds2 = [
- "ip netns exec {0}-bfd-cust1 ip link set {0}-eth1 netns 1",
- "ip netns exec {0}-cust2 ip link set {0}-eth1 netns 1",
- ]
+ # Move interfaces out of vrf namespace and delete the namespace
router_list = tgen.routers()
for rname, router in router_list.items():
if rname == "r2":
- for cmd in cmds2:
- tgen.net[rname].cmd(cmd.format(rname))
- for cmd in cmds:
- tgen.net[rname].cmd(cmd.format(rname))
+ router.net.reset_intf_netns(rname + "-eth2")
+ router.net.reset_intf_netns(rname + "-eth1")
+ router.net.reset_intf_netns(rname + "-eth0")
+ router.net.delete_netns("{}-bfd-cust1".format(rname))
tgen.stop_topology()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
-import json
-import time
import pytest
import functools
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BgpAggregateAddressTopo1(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ r1 = tgen.add_router("r1")
+ r2 = tgen.add_router("r2")
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
- r1 = tgen.add_router("r1")
- r2 = tgen.add_router("r2")
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1"
- )
-
- switch = tgen.add_switch("s1")
- switch.add_link(r1)
- switch.add_link(peer1)
+ switch = tgen.add_switch("s1")
+ switch.add_link(r1)
+ switch.add_link(peer1)
- switch = tgen.add_switch("s2")
- switch.add_link(r1)
- switch.add_link(r2)
+ switch = tgen.add_switch("s2")
+ switch.add_link(r1)
+ switch.add_link(r2)
def setup_module(mod):
- tgen = Topogen(BgpAggregateAddressTopo1, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router = tgen.gears["r1"]
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BgpAggregatorAsnZero(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ r1 = tgen.add_router("r1")
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
- r1 = tgen.add_router("r1")
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1"
- )
-
- switch = tgen.add_switch("s1")
- switch.add_link(r1)
- switch.add_link(peer1)
+ switch = tgen.add_switch("s1")
+ switch.add_link(r1)
+ switch.add_link(peer1)
def setup_module(mod):
- tgen = Topogen(BgpAggregatorAsnZero, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router = tgen.gears["r1"]
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp_and_verify,
verify_bgp_rib,
)
from lib.topojson import build_topo_from_json, build_config_from_json
NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
-class BGPALLOWASIN(Topo):
- """
- Test BGPALLOWASIN - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
+def build_topo(tgen):
+ """Build function"""
- # Building topology from json file
- build_topo_from_json(tgen, topo)
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(BGPALLOWASIN, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
protocol=protocol,
expected=False,
)
- assert result is not True, "Testcase {} : Failed \n"
- "Expected behavior: routes should not present in rib \n"
- "Error: {}".format(tc_name, result)
+ assert result is not True, (
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not present in rib \n"
+ + "Error: {}".format(result)
+ )
step("Configure allowas-in on R3 for R2.")
step("We should see the prefix advertised from R1 in R3's BGP table.")
result = verify_rib(
tgen, "ipv6", dut, static_route_ipv6, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "Expected behavior: routes are should not be present in ipv6 rib\n"
- " Error: {}".format(tc_name, result)
+ assert result is not True, (
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes are should not be present in ipv6 rib\n"
+ + " Error: {}".format(result)
+ )
step("Repeat the same test for IPv6 AFI.")
step("Configure allowas-in on R3 for R2 under IPv6 addr-family only")
result = verify_rib(
tgen, "ipv4", dut, static_route_ipv4, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "Expected behavior: routes should not be present in ipv4 rib\n"
- " Error: {}".format(tc_name, result)
+ assert result is not True, (
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not be present in ipv4 rib\n"
+ + " Error: {}".format(result)
+ )
result = verify_rib(tgen, "ipv6", dut, static_route_ipv6, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_rib(
tgen, addr_type, dut, static_routes, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n "
- "Expected behavior: routes are should not be present in rib\n"
- "Error: {}".format(tc_name, result)
+ assert result is not True, (
+ "Testcase {} : Failed \n ".format(tc_name)
+ + "Expected behavior: routes are should not be present in rib\n"
+ + "Error: {}".format(result)
+ )
for addr_type in ADDR_TYPES:
step('Configure "allowas-in 5" on R3 for R2.')
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BgpAggregatorAsnZero(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ r1 = tgen.add_router("r1")
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1")
- r1 = tgen.add_router("r1")
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.2", defaultRoute="via 10.0.0.1"
- )
-
- switch = tgen.add_switch("s1")
- switch.add_link(r1)
- switch.add_link(peer1)
+ switch = tgen.add_switch("s1")
+ switch.add_link(r1)
+ switch.add_link(peer1)
def setup_module(mod):
- tgen = Topogen(BgpAggregatorAsnZero, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router = tgen.gears["r1"]
neighbor 2.2.2.2 ebgp-multihop 3
neighbor 2.2.2.2 password hello1
neighbor 2.2.2.2 timers 3 10
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 password hello2
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
address-family ipv4 unicast
neighbor 2.2.2.2 activate
neighbor 3.3.3.3 activate
neighbor 2.2.2.2 update-source lo1
neighbor 2.2.2.2 ebgp-multihop 3
neighbor 2.2.2.2 timers 3 10
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 password blue1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo1
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password blue2
address-family ipv4 unicast
neighbor 2.2.2.2 activate
neighbor 2.2.2.2 update-source lo2
neighbor 2.2.2.2 ebgp-multihop 3
neighbor 2.2.2.2 timers 3 10
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 password red1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo2
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password red2
address-family ipv4 unicast
neighbor 2.2.2.2 activate
neighbor 2.2.2.2 update-source lo1
neighbor 2.2.2.2 ebgp-multihop 3
neighbor 2.2.2.2 timers 3 10
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 password hello1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo1
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password hello2
address-family ipv4 unicast
neighbor 2.2.2.2 activate
+interface R1-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf
network 10.10.0.0/16 area 0
network 10.20.0.0/16 area 0
- network 1.1.1.1/32 area 0
+ network 1.1.1.1/32 area 0
\ No newline at end of file
+interface R1-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf vrf blue
network 10.10.0.0/16 area 0
network 10.20.0.0/16 area 0
network 1.1.1.1/32 area 0
-
router ospf vrf red
network 10.10.0.0/16 area 0
network 10.20.0.0/16 area 0
+interface R1-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R1-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf vrf blue
network 10.10.0.0/16 area 0
network 10.20.0.0/16 area 0
neighbor 1.1.1.1 update-source lo
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password hello3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password blue1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo1
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password blue3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
neighbor 1.1.1.1 update-source lo2
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password red1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo2
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password red3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password blue1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo1
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password blue3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
neighbor 1.1.1.1 update-source lo2
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password red1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo2
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password red3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
neighbor 1.1.1.1 update-source lo
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password hello3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo1
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password hello3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello1
neighbor 3.3.3.3 remote-as 65003
neighbor 3.3.3.3 update-source lo1
neighbor 3.3.3.3 ebgp-multihop 3
neighbor 3.3.3.3 timers 3 10
- neighbor 3.3.3.3 timers connect 10
+ neighbor 3.3.3.3 timers connect 5
neighbor 3.3.3.3 password hello3
address-family ipv4 unicast
neighbor 1.1.1.1 activate
+interface R2-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf
network 10.10.0.0/16 area 0
network 10.30.0.0/16 area 0
+interface R2-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf vrf blue
network 10.10.0.0/16 area 0
network 10.30.0.0/16 area 0
network 2.2.2.2/32 area 0
-
router ospf vrf red
network 10.10.0.0/16 area 0
network 10.30.0.0/16 area 0
+interface R2-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R2-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf vrf blue
network 10.10.0.0/16 area 0
network 10.30.0.0/16 area 0
neighbor 1.1.1.1 update-source lo
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password hello3
address-family ipv4 unicast
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password blue2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo1
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password blue3
address-family ipv4 unicast
neighbor 1.1.1.1 update-source lo2
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password red2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo2
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password red3
address-family ipv4 unicast
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password blue2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo1
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password blue3
address-family ipv4 unicast
neighbor 1.1.1.1 update-source lo2
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password red2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo2
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password red3
address-family ipv4 unicast
neighbor 1.1.1.1 update-source lo
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password hello3
address-family ipv4 unicast
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo1
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password hello3
address-family ipv4 unicast
neighbor 1.1.1.1 update-source lo1
neighbor 1.1.1.1 ebgp-multihop 3
neighbor 1.1.1.1 timers 3 10
- neighbor 1.1.1.1 timers connect 10
+ neighbor 1.1.1.1 timers connect 5
neighbor 1.1.1.1 password hello2
neighbor 2.2.2.2 remote-as 65002
neighbor 2.2.2.2 update-source lo1
neighbor 2.2.2.2 ebgp-multihop 3
- neighbor 2.2.2.2 timers connect 10
+ neighbor 2.2.2.2 timers connect 5
neighbor 2.2.2.2 timers 3 10
neighbor 2.2.2.2 password hello3
address-family ipv4 unicast
+interface R3-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf
network 10.20.0.0/16 area 0
network 10.30.0.0/16 area 0
+interface R3-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf vrf blue
network 10.20.0.0/16 area 0
network 10.30.0.0/16 area 0
network 3.3.3.3/32 area 0
-!
router ospf vrf red
network 10.20.0.0/16 area 0
network 10.30.0.0/16 area 0
+interface R3-eth0
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth1
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth2
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth3
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth4
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
+interface R3-eth5
+ ip ospf dead-interval 4
+ ip ospf hello-interval 1
router ospf vrf blue
network 10.20.0.0/16 area 0
network 10.30.0.0/16 area 0
setup is 3 routers with 3 links between each each link in a different vrf
Default, blue and red respectively
Tests check various fiddling with passwords and checking that the peer
-establishment is as expected and passwords are not leaked across sockets
+establishment is as expected and passwords are not leaked across sockets
for bgp instances
"""
+# pylint: disable=C0413
-import os
-import sys
import json
+import os
import platform
-from functools import partial
-import pytest
+import sys
from time import sleep
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib import topotest
+import pytest
+from lib import common_config, topotest
+from lib.common_config import (
+ save_initial_config_on_routers,
+ reset_with_new_configs,
+)
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
-from lib.common_config import apply_raw_config
-
-ERROR_LIST = ["Malformed", "Failure", "Unknown", "Incomplete"]
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
+CWD = os.path.dirname(os.path.realpath(__file__))
-class InvalidCLIError(Exception):
- """Raise when the CLI command is wrong"""
-
- pass
-
-
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- #
- # Create routers
- tgen.add_router("R1")
- tgen.add_router("R2")
- tgen.add_router("R3")
-
- # R1-R2 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["R1"])
- switch.add_link(tgen.gears["R2"])
-
- # R1-R3 1
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["R1"])
- switch.add_link(tgen.gears["R3"])
-
- # R2-R3 1
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["R2"])
- switch.add_link(tgen.gears["R3"])
-
- # R1-R2 2
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["R1"])
- switch.add_link(tgen.gears["R2"])
-
- # R1-R3 2
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["R1"])
- switch.add_link(tgen.gears["R3"])
-
- # R2-R3 2
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["R2"])
- switch.add_link(tgen.gears["R3"])
-
- # R1-R2 3
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["R1"])
- switch.add_link(tgen.gears["R2"])
- # R1-R3 2
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["R1"])
- switch.add_link(tgen.gears["R3"])
+def build_topo(tgen):
+ tgen.add_router("R1")
+ tgen.add_router("R2")
+ tgen.add_router("R3")
- # R2-R3 2
- switch = tgen.add_switch("s9")
- switch.add_link(tgen.gears["R2"])
- switch.add_link(tgen.gears["R3"])
+ tgen.add_link(tgen.gears["R1"], tgen.gears["R2"])
+ tgen.add_link(tgen.gears["R1"], tgen.gears["R3"])
+ tgen.add_link(tgen.gears["R2"], tgen.gears["R3"])
+ tgen.add_link(tgen.gears["R1"], tgen.gears["R2"])
+ tgen.add_link(tgen.gears["R1"], tgen.gears["R3"])
+ tgen.add_link(tgen.gears["R2"], tgen.gears["R3"])
+ tgen.add_link(tgen.gears["R1"], tgen.gears["R2"])
+ tgen.add_link(tgen.gears["R1"], tgen.gears["R3"])
+ tgen.add_link(tgen.gears["R2"], tgen.gears["R3"])
def setup_module(mod):
"Sets up the pytest environment"
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
r3 = tgen.gears["R3"]
# blue vrf
- r1.run("ip link add blue type vrf table 1001")
- r1.run("ip link set up dev blue")
- r2.run("ip link add blue type vrf table 1001")
- r2.run("ip link set up dev blue")
- r3.run("ip link add blue type vrf table 1001")
- r3.run("ip link set up dev blue")
-
- r1.run("ip link add lo1 type dummy")
- r1.run("ip link set lo1 master blue")
- r1.run("ip link set up dev lo1")
- r2.run("ip link add lo1 type dummy")
- r2.run("ip link set up dev lo1")
- r2.run("ip link set lo1 master blue")
- r3.run("ip link add lo1 type dummy")
- r3.run("ip link set up dev lo1")
- r3.run("ip link set lo1 master blue")
-
- r1.run("ip link set R1-eth2 master blue")
- r1.run("ip link set R1-eth3 master blue")
- r2.run("ip link set R2-eth2 master blue")
- r2.run("ip link set R2-eth3 master blue")
- r3.run("ip link set R3-eth2 master blue")
- r3.run("ip link set R3-eth3 master blue")
-
- r1.run("ip link set up dev R1-eth2")
- r1.run("ip link set up dev R1-eth3")
- r2.run("ip link set up dev R2-eth2")
- r2.run("ip link set up dev R2-eth3")
- r3.run("ip link set up dev R3-eth2")
- r3.run("ip link set up dev R3-eth3")
+ r1.cmd_raises("ip link add blue type vrf table 1001")
+ r1.cmd_raises("ip link set up dev blue")
+ r2.cmd_raises("ip link add blue type vrf table 1001")
+ r2.cmd_raises("ip link set up dev blue")
+ r3.cmd_raises("ip link add blue type vrf table 1001")
+ r3.cmd_raises("ip link set up dev blue")
+
+ r1.cmd_raises("ip link add lo1 type dummy")
+ r1.cmd_raises("ip link set lo1 master blue")
+ r1.cmd_raises("ip link set up dev lo1")
+ r2.cmd_raises("ip link add lo1 type dummy")
+ r2.cmd_raises("ip link set up dev lo1")
+ r2.cmd_raises("ip link set lo1 master blue")
+ r3.cmd_raises("ip link add lo1 type dummy")
+ r3.cmd_raises("ip link set up dev lo1")
+ r3.cmd_raises("ip link set lo1 master blue")
+
+ r1.cmd_raises("ip link set R1-eth2 master blue")
+ r1.cmd_raises("ip link set R1-eth3 master blue")
+ r2.cmd_raises("ip link set R2-eth2 master blue")
+ r2.cmd_raises("ip link set R2-eth3 master blue")
+ r3.cmd_raises("ip link set R3-eth2 master blue")
+ r3.cmd_raises("ip link set R3-eth3 master blue")
+
+ r1.cmd_raises("ip link set up dev R1-eth2")
+ r1.cmd_raises("ip link set up dev R1-eth3")
+ r2.cmd_raises("ip link set up dev R2-eth2")
+ r2.cmd_raises("ip link set up dev R2-eth3")
+ r3.cmd_raises("ip link set up dev R3-eth2")
+ r3.cmd_raises("ip link set up dev R3-eth3")
# red vrf
- r1.run("ip link add red type vrf table 1002")
- r1.run("ip link set up dev red")
- r2.run("ip link add red type vrf table 1002")
- r2.run("ip link set up dev red")
- r3.run("ip link add red type vrf table 1002")
- r3.run("ip link set up dev red")
-
- r1.run("ip link add lo2 type dummy")
- r1.run("ip link set lo2 master red")
- r1.run("ip link set up dev lo2")
- r2.run("ip link add lo2 type dummy")
- r2.run("ip link set up dev lo2")
- r2.run("ip link set lo2 master red")
- r3.run("ip link add lo2 type dummy")
- r3.run("ip link set up dev lo2")
- r3.run("ip link set lo2 master red")
-
- r1.run("ip link set R1-eth4 master red")
- r1.run("ip link set R1-eth5 master red")
- r2.run("ip link set R2-eth4 master red")
- r2.run("ip link set R2-eth5 master red")
- r3.run("ip link set R3-eth4 master red")
- r3.run("ip link set R3-eth5 master red")
-
- r1.run("ip link set up dev R1-eth4")
- r1.run("ip link set up dev R1-eth5")
- r2.run("ip link set up dev R2-eth4")
- r2.run("ip link set up dev R2-eth5")
- r3.run("ip link set up dev R3-eth4")
- r3.run("ip link set up dev R3-eth5")
+ r1.cmd_raises("ip link add red type vrf table 1002")
+ r1.cmd_raises("ip link set up dev red")
+ r2.cmd_raises("ip link add red type vrf table 1002")
+ r2.cmd_raises("ip link set up dev red")
+ r3.cmd_raises("ip link add red type vrf table 1002")
+ r3.cmd_raises("ip link set up dev red")
+
+ r1.cmd_raises("ip link add lo2 type dummy")
+ r1.cmd_raises("ip link set lo2 master red")
+ r1.cmd_raises("ip link set up dev lo2")
+ r2.cmd_raises("ip link add lo2 type dummy")
+ r2.cmd_raises("ip link set up dev lo2")
+ r2.cmd_raises("ip link set lo2 master red")
+ r3.cmd_raises("ip link add lo2 type dummy")
+ r3.cmd_raises("ip link set up dev lo2")
+ r3.cmd_raises("ip link set lo2 master red")
+
+ r1.cmd_raises("ip link set R1-eth4 master red")
+ r1.cmd_raises("ip link set R1-eth5 master red")
+ r2.cmd_raises("ip link set R2-eth4 master red")
+ r2.cmd_raises("ip link set R2-eth5 master red")
+ r3.cmd_raises("ip link set R3-eth4 master red")
+ r3.cmd_raises("ip link set R3-eth5 master red")
+
+ r1.cmd_raises("ip link set up dev R1-eth4")
+ r1.cmd_raises("ip link set up dev R1-eth5")
+ r2.cmd_raises("ip link set up dev R2-eth4")
+ r2.cmd_raises("ip link set up dev R2-eth5")
+ r3.cmd_raises("ip link set up dev R3-eth4")
+ r3.cmd_raises("ip link set up dev R3-eth5")
# This is a sample of configuration loading.
router_list = tgen.routers()
# For all registred routers, load the zebra configuration file
for rname, router in router_list.items():
- router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
- router.load_config(
- TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
- )
- router.load_config(
- TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
- )
+ router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ router.load_config(TopoRouter.RD_OSPF)
+ router.load_config(TopoRouter.RD_BGP)
- # After loading the configurations, this function loads configured daemons.
+ # After copying the configurations, this function loads configured daemons.
tgen.start_router()
+ # Save the initial router config. reset_config_on_routers will return to this config.
+ save_initial_config_on_routers(tgen)
+
def teardown_module(mod):
"Teardown the pytest environment"
print(router.vtysh_cmd("show bgp {} neighbor".format(vrf_str(vrf))))
-def configure(conf_file):
- "configure from a file"
-
- tgen = get_topogen()
- router_list = tgen.routers()
- for rname, router in router_list.items():
- with open(
- os.path.join(CWD, "{}/{}").format(router.name, conf_file), "r+"
- ) as cfg:
- new_config = cfg.read()
-
- output = router.vtysh_multicmd(new_config, pretty_output=False)
- for out_err in ERROR_LIST:
- if out_err.lower() in output.lower():
- raise InvalidCLIError("%s" % output)
-
-
-def clear_bgp():
- "clear bgp configuration for a vrf"
-
- tgen = get_topogen()
- r1 = tgen.gears["R1"]
- r2 = tgen.gears["R2"]
- r3 = tgen.gears["R3"]
-
- r1.vtysh_cmd("conf t\nno router bgp 65001")
- r2.vtysh_cmd("conf t\nno router bgp 65002")
- r3.vtysh_cmd("conf t\nno router bgp 65003")
- r1.vtysh_cmd("conf t\nno router bgp 65001 vrf blue")
- r2.vtysh_cmd("conf t\nno router bgp 65002 vrf blue")
- r3.vtysh_cmd("conf t\nno router bgp 65003 vrf blue")
- r1.vtysh_cmd("conf t\nno router bgp 65001 vrf red")
- r2.vtysh_cmd("conf t\nno router bgp 65002 vrf red")
- r3.vtysh_cmd("conf t\nno router bgp 65003 vrf red")
-
-
-def configure_bgp(conf_file):
- "configure bgp from file"
-
- clear_bgp()
- configure(conf_file)
-
-
-def clear_ospf():
- "clear ospf configuration for a vrf"
-
- tgen = get_topogen()
- router_list = tgen.routers()
- for rname, router in router_list.items():
- router.vtysh_cmd("conf t\nno router ospf")
- router.vtysh_cmd("conf t\nno router ospf vrf blue")
- router.vtysh_cmd("conf t\nno router ospf vrf red")
-
+@common_config.retry(retry_timeout=190)
+def _check_neigh_state(router, peer, state, vrf=""):
+ "check BGP neighbor state on a router"
-def configure_ospf(conf_file):
- "configure bgp from file"
+ neigh_output = router.vtysh_cmd(
+ "show bgp {} neighbors {} json".format(vrf_str(vrf), peer)
+ )
- clear_ospf()
- configure(conf_file)
+ peer_state = "Unknown"
+ neigh_output_json = json.loads(neigh_output)
+ if peer in neigh_output_json:
+ peer_state = neigh_output_json[peer]["bgpState"]
+ if peer_state == state:
+ return True
+ return "{} peer with {} expected state {} got {} ".format(
+ router.name, peer, state, peer_state
+ )
def check_neigh_state(router, peer, state, vrf=""):
"check BGP neighbor state on a router"
- count = 0
- matched = False
- neigh_output = ""
- while count < 125:
- if vrf == "":
- neigh_output = router.vtysh_cmd("show bgp neighbors {} json".format(peer))
- else:
- neigh_output = router.vtysh_cmd(
- "show bgp vrf {} neighbors {} json".format(vrf, peer)
- )
- neigh_output_json = json.loads(neigh_output)
- if peer in neigh_output_json.keys():
- if neigh_output_json[peer]["bgpState"] == state:
- matched = True
- break
- count += 1
- sleep(1)
-
- assertmsg = "{} could not peer {} state expected {} got {} ".format(
- router.name, peer, state, neigh_output_json[peer]["bgpState"]
- )
- if matched != True:
- print_diag(vrf)
- assert matched == True, assertmsg
+ assertmsg = _check_neigh_state(router, peer, state, vrf)
+ assert assertmsg is True, assertmsg
def check_all_peers_established(vrf=""):
check_all_peers_established(vrf)
-def test_default_peer_established():
+def test_default_peer_established(tgen):
"default vrf 3 peers same password"
- configure_bgp("bgpd.conf")
- configure_ospf("ospfd.conf")
+ reset_with_new_configs(tgen, "bgpd.conf", "ospfd.conf")
check_all_peers_established()
- # tgen.mininet_cli()
-def test_default_peer_remove_passwords():
+def test_default_peer_remove_passwords(tgen):
"selectively remove passwords checking state"
- configure_bgp("bgpd.conf")
- configure_ospf("ospfd.conf")
+ reset_with_new_configs(tgen, "bgpd.conf", "ospfd.conf")
check_vrf_peer_remove_passwords()
-def test_default_peer_change_passwords():
+def test_default_peer_change_passwords(tgen):
"selectively change passwords checking state"
- configure_bgp("bgpd.conf")
- configure_ospf("ospfd.conf")
+ reset_with_new_configs(tgen, "bgpd.conf", "ospfd.conf")
check_vrf_peer_change_passwords()
-def test_default_prefix_peer_established():
+def test_default_prefix_peer_established(tgen):
"default vrf 3 peers same password with prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_prefix.conf")
- configure_ospf("ospfd.conf")
+ reset_with_new_configs(tgen, "bgpd_prefix.conf", "ospfd.conf")
check_all_peers_established()
- # tgen.mininet_cli()
-def test_prefix_peer_remove_passwords():
+def test_prefix_peer_remove_passwords(tgen):
"selectively remove passwords checking state with prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_prefix.conf")
- configure_ospf("ospfd.conf")
+ reset_with_new_configs(tgen, "bgpd_prefix.conf", "ospfd.conf")
check_vrf_peer_remove_passwords(prefix="yes")
-def test_prefix_peer_change_passwords():
+def test_prefix_peer_change_passwords(tgen):
"selecively change passwords checkig state with prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_prefix.conf")
- configure_ospf("ospfd.conf")
+ reset_with_new_configs(tgen, "bgpd_prefix.conf", "ospfd.conf")
check_vrf_peer_change_passwords(prefix="yes")
-def test_vrf_peer_established():
+def test_vrf_peer_established(tgen):
"default vrf 3 peers same password with VRF config"
# clean routers and load vrf config
- configure_bgp("bgpd_vrf.conf")
- configure_ospf("ospfd_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_vrf.conf", "ospfd_vrf.conf")
check_all_peers_established("blue")
- # tgen.mininet_cli()
-def test_vrf_peer_remove_passwords():
+def test_vrf_peer_remove_passwords(tgen):
"selectively remove passwords checking state with VRF config"
- configure_bgp("bgpd_vrf.conf")
- configure_ospf("ospfd_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_vrf.conf", "ospfd_vrf.conf")
check_vrf_peer_remove_passwords(vrf="blue")
-def test_vrf_peer_change_passwords():
+def test_vrf_peer_change_passwords(tgen):
"selectively change passwords checking state with VRF config"
- configure_bgp("bgpd_vrf.conf")
- configure_ospf("ospfd_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_vrf.conf", "ospfd_vrf.conf")
check_vrf_peer_change_passwords(vrf="blue")
-def test_vrf_prefix_peer_established():
+def test_vrf_prefix_peer_established(tgen):
"default vrf 3 peers same password with VRF prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_vrf_prefix.conf")
- configure_ospf("ospfd_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_vrf_prefix.conf", "ospfd_vrf.conf")
check_all_peers_established("blue")
-def test_vrf_prefix_peer_remove_passwords():
+def test_vrf_prefix_peer_remove_passwords(tgen):
"selectively remove passwords checking state with VRF prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_vrf_prefix.conf")
- configure_ospf("ospfd_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_vrf_prefix.conf", "ospfd_vrf.conf")
check_vrf_peer_remove_passwords(vrf="blue", prefix="yes")
-def test_vrf_prefix_peer_change_passwords():
+def test_vrf_prefix_peer_change_passwords(tgen):
"selectively change passwords checking state with VRF prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_vrf_prefix.conf")
- configure_ospf("ospfd_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_vrf_prefix.conf", "ospfd_vrf.conf")
check_vrf_peer_change_passwords(vrf="blue", prefix="yes")
-def test_multiple_vrf_peer_established():
+def test_multiple_vrf_peer_established(tgen):
"default vrf 3 peers same password with multiple VRFs"
- configure_bgp("bgpd_multi_vrf.conf")
- configure_ospf("ospfd_multi_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_multi_vrf.conf", "ospfd_multi_vrf.conf")
check_all_peers_established("blue")
check_all_peers_established("red")
- # tgen.mininet_cli()
-def test_multiple_vrf_peer_remove_passwords():
+def test_multiple_vrf_peer_remove_passwords(tgen):
"selectively remove passwords checking state with multiple VRFs"
- configure_bgp("bgpd_multi_vrf.conf")
- configure_ospf("ospfd_multi_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_multi_vrf.conf", "ospfd_multi_vrf.conf")
check_vrf_peer_remove_passwords("blue")
check_all_peers_established("red")
check_vrf_peer_remove_passwords("red")
check_all_peers_established("blue")
- # tgen.mininet_cli()
-def test_multiple_vrf_peer_change_passwords():
+def test_multiple_vrf_peer_change_passwords(tgen):
"selectively change passwords checking state with multiple VRFs"
- configure_bgp("bgpd_multi_vrf.conf")
- configure_ospf("ospfd_multi_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_multi_vrf.conf", "ospfd_multi_vrf.conf")
check_vrf_peer_change_passwords("blue")
check_all_peers_established("red")
check_vrf_peer_change_passwords("red")
check_all_peers_established("blue")
- # tgen.mininet_cli()
-def test_multiple_vrf_prefix_peer_established():
+def test_multiple_vrf_prefix_peer_established(tgen):
"default vrf 3 peers same password with multilpe VRFs and prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_multi_vrf_prefix.conf")
- configure_ospf("ospfd_multi_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_multi_vrf_prefix.conf", "ospfd_multi_vrf.conf")
check_all_peers_established("blue")
check_all_peers_established("red")
- # tgen.mininet_cli()
-def test_multiple_vrf_prefix_peer_remove_passwords():
+def test_multiple_vrf_prefix_peer_remove_passwords(tgen):
"selectively remove passwords checking state with multiple vrfs and prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_multi_vrf_prefix.conf")
- configure_ospf("ospfd_multi_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_multi_vrf_prefix.conf", "ospfd_multi_vrf.conf")
check_vrf_peer_remove_passwords(vrf="blue", prefix="yes")
check_all_peers_established("red")
check_vrf_peer_remove_passwords(vrf="red", prefix="yes")
check_all_peers_established("blue")
- # tgen.mininet_cli()
-def test_multiple_vrf_prefix_peer_change_passwords():
+def test_multiple_vrf_prefix_peer_change_passwords(tgen):
"selectively change passwords checking state with multiple vrfs and prefix config"
# only supported in kernel > 5.3
if topotest.version_cmp(platform.release(), "5.3") < 0:
return
- configure_bgp("bgpd_multi_vrf_prefix.conf")
- configure_ospf("ospfd_multi_vrf.conf")
+ reset_with_new_configs(tgen, "bgpd_multi_vrf_prefix.conf", "ospfd_multi_vrf.conf")
check_vrf_peer_change_passwords(vrf="blue", prefix="yes")
check_all_peers_established("red")
check_vrf_peer_change_passwords(vrf="red", prefix="yes")
check_all_peers_established("blue")
- # tgen.mininet_cli()
-def test_memory_leak():
+def test_memory_leak(tgen):
"Run the memory leak test and report results."
- tgen = get_topogen()
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
- Verify routes not installed in zebra when /32 routes received
with loopback BGP session subnet
"""
+# XXX clean up in later commit to avoid conflict on rebase
+# pylint: disable=C0413
import os
import sys
-import json
import time
import pytest
from copy import deepcopy
# Required to instantiate the topology builder class.
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
-
+from lib.bgp import (
+ clear_bgp_and_verify,
+ create_router_bgp,
+ modify_as_number,
+ verify_as_numbers,
+ verify_bgp_convergence,
+ verify_bgp_rib,
+ verify_bgp_timers_and_functionality,
+ verify_router_id,
+)
from lib.common_config import (
- step,
- start_topology,
- write_test_header,
- write_test_footer,
- reset_config_on_routers,
- create_static_routes,
- verify_rib,
- verify_admin_distance_for_static_routes,
- check_address_types,
- apply_raw_config,
addKernelRoute,
- verify_fib_routes,
+ apply_raw_config,
+ check_address_types,
create_prefix_lists,
create_route_maps,
- verify_bgp_community,
+ create_static_routes,
required_linux_kernel_version,
+ reset_config_on_routers,
+ start_topology,
+ step,
+ verify_admin_distance_for_static_routes,
+ verify_bgp_community,
+ verify_fib_routes,
+ verify_rib,
+ write_test_footer,
+ write_test_header,
)
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+from lib.topojson import build_config_from_json
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence,
- create_router_bgp,
- verify_router_id,
- modify_as_number,
- verify_as_numbers,
- clear_bgp_and_verify,
- verify_bgp_timers_and_functionality,
- verify_bgp_rib,
-)
-from lib.topojson import build_topo_from_json, build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_basic_functionality.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global Variable
KEEPALIVETIMER = 2
HOLDDOWNTIMER = 6
}
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_basic_functionality.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
def test_modify_and_delete_router_id(request):
- """ Test to modify, delete and verify router-id. """
+ """Test to modify, delete and verify router-id."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
tc_name = request.node.name
write_test_header(tc_name)
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
# Modify router id
input_dict = {
"r1": {"bgp": {"router_id": "12.12.12.12"}},
tc_name = request.node.name
write_test_header(tc_name)
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
input_dict = {
"r1": {"bgp": {"local_as": 131079}},
"r2": {"bgp": {"local_as": 131079}},
tc_name = request.node.name
write_test_header(tc_name)
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
# Api call to modify AS number
input_dict = {
"r1": {
},
}
result = modify_as_number(tgen, topo, input_dict)
- try:
- assert result is True
- except AssertionError:
- logger.info("Expected behaviour: {}".format(result))
- logger.info("BGP config is not created because of invalid ASNs")
+ assert (
+ result is not True
+ ), "Expected BGP config is not created because of invalid ASNs: {}".format(result)
+
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ result = verify_bgp_convergence(tgen, topo)
+ if result != True:
+ assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result)
write_test_footer(tc_name)
tc_name = request.node.name
write_test_header(tc_name)
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
+
+ result = verify_bgp_convergence(tgen, topo)
+ if result != True:
+ assert False, "Testcase " + tc_name + " :Failed \n Error: {}".format(result)
+
# Api call to modify AS number
input_dict = {
"r1": {"bgp": {"local_as": 131079}},
def test_static_routes(request):
- """ Test to create and verify static routes. """
+ """Test to create and verify static routes."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
def test_admin_distance_for_existing_static_routes(request):
- """ Test to modify and verify admin distance for existing static routes."""
+ """Test to modify and verify admin distance for existing static routes."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
def test_advertise_network_using_network_command(request):
- """ Test advertise networks using network command."""
+ """Test advertise networks using network command."""
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- # reset_config_on_routers(tgen)
+ # Creating configuration from JSON
+ reset_config_on_routers(tgen)
step("Configure static routes and redistribute in BGP on R3")
for addr_type in ADDR_TYPES:
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
dut = "r4"
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
input_dict_4 = {"largeCommunity": "500:500:500", "community": "500:500"}
dut = "r1"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1)
- assert result is not True, "Testcase {} : Failed \n"
- "Expected behavior: routes should not present in fib \n"
- "Error: {}".format(tc_name, result)
+ result = verify_fib_routes(
+ tgen, addr_type, dut, input_dict_r1, expected=False
+ ) # pylint: disable=E1123
+ assert result is not True, (
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not present in fib \n"
+ + "Error: {}".format(result)
+ )
step("Verify Ipv4 and Ipv6 network installed in r3 RIB but not in FIB")
input_dict_r3 = {
dut = "r3"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_fib_routes(tgen, addr_type, dut, input_dict_r1)
- assert result is not True, "Testcase {} : Failed \n"
- "Expected behavior: routes should not present in fib \n"
- "Error: {}".format(tc_name, result)
+ result = verify_fib_routes(
+ tgen, addr_type, dut, input_dict_r1, expected=False
+ ) # pylint: disable=E1123
+ assert result is not True, (
+ "Testcase {} : Failed \n".format(tc_name)
+ + "Expected behavior: routes should not present in fib \n"
+ + "Error: {}".format(result)
+ )
write_test_footer(tc_name)
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
from lib.common_config import step
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
-from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import time
-import json
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp_and_verify,
verify_bgp_rib,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from copy import deepcopy
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_communities.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
NEXT_HOP_IP = {}
-class BGPCOMMUNITIES(Topo):
- """
- Test BGPCOMMUNITIES - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(BGPCOMMUNITIES, mod.__name__)
+ json_file = "{}/bgp_communities.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert result is not True, "Testcase {} : Failed \n ".format(
+ tc_name
+ ) + " Routes still present in R3 router. Error: {}".format(result)
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes still present in R3 router. Error: {}".format(
+ tc_name, result
+ )
step("Remove and Add no advertise community")
# Configure neighbor for route map
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes still present in R3 router. Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes still present in R3 router. Error: {}".format(
+ tc_name, result
+ )
step("Repeat above steps when IBGP nbr configured between R1, R2 & R2, R3")
topo1 = deepcopy(topo)
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes still present in R3 router. Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes still present in R3 router. Error: {}".format(
+ tc_name, result
+ )
step("Remove and Add no advertise community")
# Configure neighbor for route map
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict)
- assert result is True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes still present in R3 router. Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n "
- " Routes still present in R3 router. Error: {}".format(tc_name, result)
+ assert (
+ result is True
+ ), "Testcase {} : Failed \n Routes still present in R3 router. Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
import os
import sys
import time
-import json
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
check_address_types,
step,
create_route_maps,
- create_prefix_lists,
create_route_maps,
required_linux_kernel_version,
)
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp_and_verify,
verify_bgp_rib,
verify_bgp_community,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
-from copy import deepcopy
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_communities_topo2.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
}
-class BGPCOMMUNITIES(Topo):
- """
- Test BGPCOMMUNITIES - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(BGPCOMMUNITIES, mod.__name__)
+ json_file = "{}/bgp_communities_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
input_dict_4,
next_hop=topo["routers"]["r1"]["links"]["r2"][addr_type].split("/")[0],
)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
0
],
)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
else:
],
expected=False,
)
- assert result is not True, "Testcase : Failed \n Error: {}".format(
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
}
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Configure redistribute static")
input_dict_2 = {
}
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step(
"Verify that these prefixes, originated on R1, are now"
input_dict_4,
next_hop=topo["routers"]["r1"]["links"]["r2"][addr_type].split("/")[0],
)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_4,
next_hop=topo["routers"]["r1"]["links"]["r3"][addr_type].split("/")[0],
)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
from lib.common_config import step
from time import sleep
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ tgen.add_router("z1")
+ tgen.add_router("y1")
+ tgen.add_router("y2")
+ tgen.add_router("y3")
+ tgen.add_router("x1")
+ tgen.add_router("c1")
- tgen.add_router("z1")
- tgen.add_router("y1")
- tgen.add_router("y2")
- tgen.add_router("y3")
- tgen.add_router("x1")
- tgen.add_router("c1")
+ # 10.0.1.0/30
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["c1"])
+ switch.add_link(tgen.gears["x1"])
- # 10.0.1.0/30
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["c1"])
- switch.add_link(tgen.gears["x1"])
+ # 10.0.2.0/30
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["x1"])
+ switch.add_link(tgen.gears["y1"])
- # 10.0.2.0/30
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["x1"])
- switch.add_link(tgen.gears["y1"])
+ # 10.0.3.0/30
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["y1"])
+ switch.add_link(tgen.gears["y2"])
- # 10.0.3.0/30
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["y1"])
- switch.add_link(tgen.gears["y2"])
+ # 10.0.4.0/30
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["y1"])
+ switch.add_link(tgen.gears["y3"])
- # 10.0.4.0/30
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["y1"])
- switch.add_link(tgen.gears["y3"])
+ # 10.0.5.0/30
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["y2"])
+ switch.add_link(tgen.gears["y3"])
- # 10.0.5.0/30
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["y2"])
- switch.add_link(tgen.gears["y3"])
+ # 10.0.6.0/30
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["y2"])
+ switch.add_link(tgen.gears["z1"])
- # 10.0.6.0/30
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["y2"])
- switch.add_link(tgen.gears["z1"])
-
- # 10.0.7.0/30
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["y3"])
- switch.add_link(tgen.gears["z1"])
+ # 10.0.7.0/30
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["y3"])
+ switch.add_link(tgen.gears["z1"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BgpConditionalAdvertisementTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ r1 = tgen.add_router("r1")
+ r2 = tgen.add_router("r2")
+ r3 = tgen.add_router("r3")
- r1 = tgen.add_router("r1")
- r2 = tgen.add_router("r2")
- r3 = tgen.add_router("r3")
+ switch = tgen.add_switch("s1")
+ switch.add_link(r1)
+ switch.add_link(r2)
- switch = tgen.add_switch("s1")
- switch.add_link(r1)
- switch.add_link(r2)
-
- switch = tgen.add_switch("s2")
- switch.add_link(r2)
- switch.add_link(r3)
+ switch = tgen.add_switch("s2")
+ switch.add_link(r2)
+ switch.add_link(r3)
def setup_module(mod):
logger.info("Running setup_module to create topology")
- tgen = Topogen(BgpConditionalAdvertisementTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import json
import pytest
-import functools
pytestmark = [pytest.mark.bgpd]
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
-from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
from lib.common_config import step
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
from lib.common_config import step
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
def _bgp_default_route_has_metric(router):
output = json.loads(router.vtysh_cmd("show ip bgp 0.0.0.0/0 json"))
expected = {
- "paths": [{"aspath": {"string": "65000 65000 65000 65000"}, "metric": 123}]
+ "paths": [
+ {
+ "aspath": {"string": "65000 65000 65000 65000"},
+ "metric": 123,
+ "community": None,
+ }
+ ]
}
return topotest.json_cmp(output, expected)
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
--- /dev/null
+!
+router bgp 65001
+ timers 3 10
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+ neighbor 192.168.1.2 timers connect 5
+ address-family ipv4 unicast
+ neighbor 192.168.1.2 disable-addpath-rx
+ exit-address-family
+!
--- /dev/null
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
--- /dev/null
+router bgp 65002
+ timers 3 10
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ neighbor 192.168.1.1 timers connect 5
+ neighbor 192.168.2.3 remote-as external
+ neighbor 192.168.2.3 timers connect 5
+ neighbor 192.168.2.4 remote-as external
+ neighbor 192.168.2.4 timers connect 5
+ address-family ipv4 unicast
+ neighbor 192.168.1.1 addpath-tx-all-paths
+ exit-address-family
+!
--- /dev/null
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
+int r2-eth1
+ ip address 192.168.2.2/24
+!
--- /dev/null
+router bgp 65003
+ timers 3 10
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers connect 5
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
--- /dev/null
+!
+int lo
+ ip address 172.16.16.254/32
+!
+int r3-eth0
+ ip address 192.168.2.3/24
+!
--- /dev/null
+router bgp 65004
+ timers 3 10
+ no bgp ebgp-requires-policy
+ neighbor 192.168.2.2 remote-as external
+ neighbor 192.168.2.2 timers connect 5
+ address-family ipv4 unicast
+ redistribute connected
+ exit-address-family
+!
--- /dev/null
+!
+int lo
+ ip address 172.16.16.254/32
+!
+int r4-eth0
+ ip address 192.168.2.4/24
+!
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2021 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if AddPath RX direction is not negotiated via AddPath capability.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.common_config import step
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_disable_addpath_rx():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+
+ step(
+ "Check if r2 advertised only 2 paths to r1 (despite addpath-tx-all-paths enabled on r2)."
+ )
+
+ def check_bgp_advertised_routes(router):
+ output = json.loads(
+ router.vtysh_cmd(
+ "show bgp ipv4 unicast neighbor 192.168.1.1 advertised-routes json"
+ )
+ )
+ expected = {
+ "advertisedRoutes": {
+ "172.16.16.254/32": {
+ "addrPrefix": "172.16.16.254",
+ "prefixLen": 32,
+ },
+ "192.168.2.0/24": {
+ "addrPrefix": "192.168.2.0",
+ "prefixLen": 24,
+ },
+ },
+ "totalPrefixCounter": 2,
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(check_bgp_advertised_routes, r2)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "AddPath TX not working."
+
+ step("Check if AddPath RX is disabled on r1 and we receive only 2 paths.")
+
+ def check_bgp_disabled_addpath_rx(router):
+ output = json.loads(router.vtysh_cmd("show bgp neighbor 192.168.1.2 json"))
+ expected = {
+ "192.168.1.2": {
+ "bgpState": "Established",
+ "neighborCapabilities": {
+ "addPath": {
+ "ipv4Unicast": {"txReceived": True, "rxReceived": True}
+ },
+ },
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
+ }
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(check_bgp_disabled_addpath_rx, r1)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "AddPath RX advertised, but should not."
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
-
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
-
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ topodef = {"s1": ("r1", "r2")}
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 7):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 7):
- tgen.add_router("r{}".format(routern))
-
- # Scenario 1.
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ # Scenario 1.
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- # Scenario 2.
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
+ # Scenario 2.
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- # Scenario 3.
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r5"])
- switch.add_link(tgen.gears["r6"])
+ # Scenario 3.
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r5"])
+ switch.add_link(tgen.gears["r6"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
#####################################################
-class BGPECMPTopo1(Topo):
- "BGP ECMP Topology 1"
+def build_topo(tgen):
+ router = tgen.add_router("r1")
- def build(self, **_opts):
- tgen = get_topogen(self)
+ # Setup Switches - 1 switch per 5 peering routers
+ for swNum in range(1, (total_ebgp_peers + 4) // 5 + 1):
+ switch = tgen.add_switch("s{}".format(swNum))
+ switch.add_link(router)
- # Create the BGP router
- router = tgen.add_router("r1")
+ # Add 'total_ebgp_peers' number of eBGP ExaBGP neighbors
+ for peerNum in range(1, total_ebgp_peers + 1):
+ swNum = (peerNum - 1) // 5 + 1
- # Setup Switches - 1 switch per 5 peering routers
- for swNum in range(1, (total_ebgp_peers + 4) / 5 + 1):
- switch = tgen.add_switch("s{}".format(swNum))
- switch.add_link(router)
-
- # Add 'total_ebgp_peers' number of eBGP ExaBGP neighbors
- for peerNum in range(1, total_ebgp_peers + 1):
- swNum = (peerNum - 1) / 5 + 1
-
- peer_ip = "10.0.{}.{}".format(swNum, peerNum + 100)
- peer_route = "via 10.0.{}.1".format(swNum)
- peer = tgen.add_exabgp_peer(
- "peer{}".format(peerNum), ip=peer_ip, defaultRoute=peer_route
- )
+ peer_ip = "10.0.{}.{}".format(swNum, peerNum + 100)
+ peer_route = "via 10.0.{}.1".format(swNum)
+ peer = tgen.add_exabgp_peer(
+ "peer{}".format(peerNum), ip=peer_ip, defaultRoute=peer_route
+ )
- switch = tgen.gears["s{}".format(swNum)]
- switch.add_link(peer)
+ switch = tgen.gears["s{}".format(swNum)]
+ switch.add_link(peer)
#####################################################
def setup_module(module):
- tgen = Topogen(BGPECMPTopo1, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
# Starting Routers
def teardown_module(module):
+ del module
tgen = get_topogen()
tgen.stop_topology()
import os
import sys
import time
-import json
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/ebgp_ecmp_topo2.json".format(CWD)
-
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
NEXT_HOPS = {"ipv4": [], "ipv6": []}
INTF_LIST_R3 = []
BGP_CONVERGENCE = False
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment.
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ebgp_ecmp_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
- """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+ """Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
tc_name = request.node.name
write_test_header(tc_name)
import os
import sys
import time
-import json
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/ibgp_ecmp_topo2.json".format(CWD)
-
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
NEXT_HOPS = {"ipv4": [], "ipv6": []}
INTF_LIST_R3 = []
BGP_CONVERGENCE = False
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment.
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ibgp_ecmp_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
@pytest.mark.parametrize("test_type", ["redist_static", "advertise_nw"])
def test_ecmp_after_clear_bgp(request, ecmp_num, test_type):
- """ Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
+ """Verify BGP table and RIB in DUT after clear BGP routes and neighbors"""
tc_name = request.node.name
write_test_header(tc_name)
--- /dev/null
+{
+ "address_types": [
+ "ipv4",
+ "ipv6"
+ ],
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24,
+ "ipv6": "fd00::",
+ "v6mask": 64
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32,
+ "ipv6": "2001:DB8:F::",
+ "v6mask": 128
+ },
+ "routers": {
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r1-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r3-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2-link1": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "next_hop_self": true
+ },
+ "r2-link2": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "next_hop_self": true
+ }
+ }
+ }
+ },
+ "redistribute": [
+ {
+ "redist_type": "static"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "ipv6": "auto",
+ "type": "loopback"
+ },
+ "r2-link1": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ },
+ "r2-link2": {
+ "ipv4": "auto",
+ "ipv6": "auto"
+ }
+ },
+ "route_maps": {
+ "rmap_global": [{
+ "action": "permit",
+ "set": {
+ "ipv6": {
+ "nexthop": "prefer-global"
+ }
+ }
+ }]
+ },
+ "bgp": {
+ "local_as": "100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 2
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180
+ },
+ "r3-link2": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "maximum_paths": {
+ "ibgp": 2
+ },
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3-link1": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ },
+ "r3-link2": {
+ "keepalivetimer": 60,
+ "holddowntimer": 180,
+ "route_maps": [{
+ "name": "rmap_global",
+ "direction": "in"
+ }]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2019 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""
+Following tests are covered to test ecmp functionality on iBGP.
+1. Verify bgp fast-convergence functionality
+"""
+import os
+import sys
+import time
+import pytest
+from time import sleep
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import get_topogen
+from lib import topojson
+
+from lib.common_config import (
+ write_test_header,
+ write_test_footer,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ reset_config_on_routers,
+ shutdown_bringup_interface,
+ apply_raw_config,
+)
+from lib.topolog import logger
+from lib.bgp import create_router_bgp, verify_bgp_convergence
+
+
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+
+
+# Global variables
+NEXT_HOPS = {"ipv4": [], "ipv6": []}
+NETWORK = {"ipv4": "192.168.1.10/32", "ipv6": "fd00:0:0:1::10/128"}
+NEXT_HOP_IP = {"ipv4": "10.0.0.1", "ipv6": "fd00::1"}
+BGP_CONVERGENCE = False
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment.
+
+ * `mod`: module name
+ """
+ global ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ tgen = topojson.setup_module_from_json(mod.__file__)
+ topo = tgen.json_topo
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Api call verify whether BGP is converged
+ ADDR_TYPES = check_address_types()
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ # STATIC_ROUTE = True
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ get_topogen().stop_topology()
+
+
+def static_or_nw(tgen, topo, tc_name, test_type, dut):
+
+ if test_type == "redist_static":
+ input_dict_static = {
+ dut: {
+ "static_routes": [
+ {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]},
+ ]
+ }
+ }
+ logger.info("Configuring static route on router %s", dut)
+ result = create_static_routes(tgen, input_dict_static)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ input_dict_2 = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ },
+ "ipv6": {
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ },
+ }
+ }
+ }
+ }
+
+ logger.info("Configuring redistribute static route on router %s", dut)
+ result = create_router_bgp(tgen, topo, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ elif test_type == "advertise_nw":
+ input_dict_nw = {
+ dut: {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [{"network": NETWORK["ipv4"]}]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "advertise_networks": [{"network": NETWORK["ipv6"]}]
+ }
+ },
+ }
+ }
+ }
+ }
+
+ logger.info(
+ "Advertising networks %s %s from router %s",
+ NETWORK["ipv4"],
+ NETWORK["ipv6"],
+ dut,
+ )
+ result = create_router_bgp(tgen, topo, input_dict_nw)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+
+@pytest.mark.parametrize("test_type", ["redist_static"])
+def test_ecmp_fast_convergence(request, test_type, tgen, topo):
+ """This test is to verify bgp fast-convergence cli functionality"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+
+ # Verifying RIB routes
+ dut = "r3"
+ protocol = "bgp"
+
+ reset_config_on_routers(tgen)
+ static_or_nw(tgen, topo, tc_name, test_type, "r2")
+
+ for addr_type in ADDR_TYPES:
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
+
+ logger.info("Verifying %s routes on r3", addr_type)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ protocol=protocol,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ intf1 = topo["routers"]["r2"]["links"]["r3-link1"]["interface"]
+ intf2 = topo["routers"]["r2"]["links"]["r3-link2"]["interface"]
+
+ logger.info("Shutdown one of the link b/w r2 and r3")
+ shutdown_bringup_interface(tgen, "r2", intf1, False)
+
+ logger.info("Verify bgp neighbors are still up")
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ logger.info("Shutdown another link b/w r2 and r3")
+ shutdown_bringup_interface(tgen, "r2", intf2, False)
+
+ logger.info("Wait for 10 sec and make sure bgp neighbors are still up")
+ sleep(10)
+ result = verify_bgp_convergence(tgen, topo)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ logger.info("No shut links b/w r2 and r3")
+ shutdown_bringup_interface(tgen, "r2", intf1, True)
+ shutdown_bringup_interface(tgen, "r2", intf2, True)
+
+ logger.info("Enable bgp fast-convergence cli")
+ raw_config = {
+ "r2": {
+ "raw_config": [
+ "router bgp {}".format(topo["routers"]["r2"]["bgp"]["local_as"]),
+ "bgp fast-convergence",
+ ]
+ }
+ }
+ result = apply_raw_config(tgen, raw_config)
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ logger.info("Shutdown one link b/w r2 and r3")
+ shutdown_bringup_interface(tgen, "r2", intf1, False)
+
+ logger.info("Verify bgp neighbors goes down immediately")
+ result = verify_bgp_convergence(tgen, topo, dut="r2", expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ logger.info("Shutdown second link b/w r2 and r3")
+ shutdown_bringup_interface(tgen, "r2", intf2, False)
+
+ logger.info("Verify bgp neighbors goes down immediately")
+ result = verify_bgp_convergence(tgen, topo, dut="r2", expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
"""
import os
-import re
import sys
+import subprocess
+from functools import partial
+
import pytest
import json
import platform
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
+from lib.topogen import Topogen, TopoRouter, get_topogen
pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
-
#####################################################
##
## Network Topology Definition
#####################################################
-class NetworkTopo(Topo):
+def build_topo(tgen):
"""
EVPN Multihoming Topology -
1. Two level CLOS
4. Two dual attached hosts per-rack - hostdx1, hostdx2
"""
- def build(self, **_opts):
- "Build function"
-
- tgen = get_topogen(self)
-
- tgen.add_router("spine1")
- tgen.add_router("spine2")
- tgen.add_router("torm11")
- tgen.add_router("torm12")
- tgen.add_router("torm21")
- tgen.add_router("torm22")
- tgen.add_router("hostd11")
- tgen.add_router("hostd12")
- tgen.add_router("hostd21")
- tgen.add_router("hostd22")
-
- # On main router
- # First switch is for a dummy interface (for local network)
-
- ##################### spine1 ########################
- # spine1-eth0 is connected to torm11-eth0
- switch = tgen.add_switch("sw1")
- switch.add_link(tgen.gears["spine1"])
- switch.add_link(tgen.gears["torm11"])
-
- # spine1-eth1 is connected to torm12-eth0
- switch = tgen.add_switch("sw2")
- switch.add_link(tgen.gears["spine1"])
- switch.add_link(tgen.gears["torm12"])
-
- # spine1-eth2 is connected to torm21-eth0
- switch = tgen.add_switch("sw3")
- switch.add_link(tgen.gears["spine1"])
- switch.add_link(tgen.gears["torm21"])
-
- # spine1-eth3 is connected to torm22-eth0
- switch = tgen.add_switch("sw4")
- switch.add_link(tgen.gears["spine1"])
- switch.add_link(tgen.gears["torm22"])
-
- ##################### spine2 ########################
- # spine2-eth0 is connected to torm11-eth1
- switch = tgen.add_switch("sw5")
- switch.add_link(tgen.gears["spine2"])
- switch.add_link(tgen.gears["torm11"])
-
- # spine2-eth1 is connected to torm12-eth1
- switch = tgen.add_switch("sw6")
- switch.add_link(tgen.gears["spine2"])
- switch.add_link(tgen.gears["torm12"])
-
- # spine2-eth2 is connected to torm21-eth1
- switch = tgen.add_switch("sw7")
- switch.add_link(tgen.gears["spine2"])
- switch.add_link(tgen.gears["torm21"])
-
- # spine2-eth3 is connected to torm22-eth1
- switch = tgen.add_switch("sw8")
- switch.add_link(tgen.gears["spine2"])
- switch.add_link(tgen.gears["torm22"])
-
- ##################### torm11 ########################
- # torm11-eth2 is connected to hostd11-eth0
- switch = tgen.add_switch("sw9")
- switch.add_link(tgen.gears["torm11"])
- switch.add_link(tgen.gears["hostd11"])
-
- # torm11-eth3 is connected to hostd12-eth0
- switch = tgen.add_switch("sw10")
- switch.add_link(tgen.gears["torm11"])
- switch.add_link(tgen.gears["hostd12"])
-
- ##################### torm12 ########################
- # torm12-eth2 is connected to hostd11-eth1
- switch = tgen.add_switch("sw11")
- switch.add_link(tgen.gears["torm12"])
- switch.add_link(tgen.gears["hostd11"])
-
- # torm12-eth3 is connected to hostd12-eth1
- switch = tgen.add_switch("sw12")
- switch.add_link(tgen.gears["torm12"])
- switch.add_link(tgen.gears["hostd12"])
-
- ##################### torm21 ########################
- # torm21-eth2 is connected to hostd21-eth0
- switch = tgen.add_switch("sw13")
- switch.add_link(tgen.gears["torm21"])
- switch.add_link(tgen.gears["hostd21"])
-
- # torm21-eth3 is connected to hostd22-eth0
- switch = tgen.add_switch("sw14")
- switch.add_link(tgen.gears["torm21"])
- switch.add_link(tgen.gears["hostd22"])
-
- ##################### torm22 ########################
- # torm22-eth2 is connected to hostd21-eth1
- switch = tgen.add_switch("sw15")
- switch.add_link(tgen.gears["torm22"])
- switch.add_link(tgen.gears["hostd21"])
-
- # torm22-eth3 is connected to hostd22-eth1
- switch = tgen.add_switch("sw16")
- switch.add_link(tgen.gears["torm22"])
- switch.add_link(tgen.gears["hostd22"])
+ tgen.add_router("spine1")
+ tgen.add_router("spine2")
+ tgen.add_router("torm11")
+ tgen.add_router("torm12")
+ tgen.add_router("torm21")
+ tgen.add_router("torm22")
+ tgen.add_router("hostd11")
+ tgen.add_router("hostd12")
+ tgen.add_router("hostd21")
+ tgen.add_router("hostd22")
+
+ # On main router
+ # First switch is for a dummy interface (for local network)
+
+ ##################### spine1 ########################
+ # spine1-eth0 is connected to torm11-eth0
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["spine1"])
+ switch.add_link(tgen.gears["torm11"])
+
+ # spine1-eth1 is connected to torm12-eth0
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["spine1"])
+ switch.add_link(tgen.gears["torm12"])
+
+ # spine1-eth2 is connected to torm21-eth0
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["spine1"])
+ switch.add_link(tgen.gears["torm21"])
+
+ # spine1-eth3 is connected to torm22-eth0
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["spine1"])
+ switch.add_link(tgen.gears["torm22"])
+
+ ##################### spine2 ########################
+ # spine2-eth0 is connected to torm11-eth1
+ switch = tgen.add_switch("sw5")
+ switch.add_link(tgen.gears["spine2"])
+ switch.add_link(tgen.gears["torm11"])
+
+ # spine2-eth1 is connected to torm12-eth1
+ switch = tgen.add_switch("sw6")
+ switch.add_link(tgen.gears["spine2"])
+ switch.add_link(tgen.gears["torm12"])
+
+ # spine2-eth2 is connected to torm21-eth1
+ switch = tgen.add_switch("sw7")
+ switch.add_link(tgen.gears["spine2"])
+ switch.add_link(tgen.gears["torm21"])
+
+ # spine2-eth3 is connected to torm22-eth1
+ switch = tgen.add_switch("sw8")
+ switch.add_link(tgen.gears["spine2"])
+ switch.add_link(tgen.gears["torm22"])
+
+ ##################### torm11 ########################
+ # torm11-eth2 is connected to hostd11-eth0
+ switch = tgen.add_switch("sw9")
+ switch.add_link(tgen.gears["torm11"])
+ switch.add_link(tgen.gears["hostd11"])
+
+ # torm11-eth3 is connected to hostd12-eth0
+ switch = tgen.add_switch("sw10")
+ switch.add_link(tgen.gears["torm11"])
+ switch.add_link(tgen.gears["hostd12"])
+
+ ##################### torm12 ########################
+ # torm12-eth2 is connected to hostd11-eth1
+ switch = tgen.add_switch("sw11")
+ switch.add_link(tgen.gears["torm12"])
+ switch.add_link(tgen.gears["hostd11"])
+
+ # torm12-eth3 is connected to hostd12-eth1
+ switch = tgen.add_switch("sw12")
+ switch.add_link(tgen.gears["torm12"])
+ switch.add_link(tgen.gears["hostd12"])
+
+ ##################### torm21 ########################
+ # torm21-eth2 is connected to hostd21-eth0
+ switch = tgen.add_switch("sw13")
+ switch.add_link(tgen.gears["torm21"])
+ switch.add_link(tgen.gears["hostd21"])
+
+ # torm21-eth3 is connected to hostd22-eth0
+ switch = tgen.add_switch("sw14")
+ switch.add_link(tgen.gears["torm21"])
+ switch.add_link(tgen.gears["hostd22"])
+
+ ##################### torm22 ########################
+ # torm22-eth2 is connected to hostd21-eth1
+ switch = tgen.add_switch("sw15")
+ switch.add_link(tgen.gears["torm22"])
+ switch.add_link(tgen.gears["hostd21"])
+
+ # torm22-eth3 is connected to hostd22-eth1
+ switch = tgen.add_switch("sw16")
+ switch.add_link(tgen.gears["torm22"])
+ switch.add_link(tgen.gears["hostd22"])
#####################################################
def setup_module(module):
"Setup topology"
- tgen = Topogen(NetworkTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
krel = platform.release()
def ping_anycast_gw(tgen):
# ping the anycast gw from the local and remote hosts to populate
# the mac address on the PEs
+ python3_path = tgen.net.get_exec_path(["python3", "python"])
script_path = os.path.abspath(os.path.join(CWD, "../lib/scapy_sendpkt.py"))
intf = "torbond"
ipaddr = "45.0.0.1"
ping_cmd = [
+ python3_path,
script_path,
"--imports=Ether,ARP",
"--interface=" + intf,
- "'Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=\"{}\")'".format(ipaddr)
+ 'Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst="{}")'.format(ipaddr),
]
for name in ("hostd11", "hostd21"):
- host = tgen.net[name]
- stdout = host.cmd(ping_cmd)
+ host = tgen.net.hosts[name]
+ _, stdout, _ = host.cmd_status(ping_cmd, warn=False, stderr=subprocess.STDOUT)
stdout = stdout.strip()
if stdout:
- host.logger.debug("%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout)
+ host.logger.debug(
+ "%s: arping on %s for %s returned: %s", name, intf, ipaddr, stdout
+ )
def check_mac(dut, vni, mac, m_type, esi, intf, ping_gw=False, tgen=None):
!
-int host1-eth0
+int host2-eth0
ip address 50.0.1.21/24
ipv6 address 50:0:1::21/48
import time
import platform
-#Current Working Directory
+# Current Working Directory
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
)
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-#Global variables
-PES = ['PE1', 'PE2']
-HOSTS = ['host1', 'host2']
-PE_SUFFIX = {'PE1': '1', 'PE2': '2'}
-HOST_SUFFIX = {'host1': '1', 'host2': '2'}
+# Global variables
+PES = ["PE1", "PE2"]
+HOSTS = ["host1", "host2"]
+PE_SUFFIX = {"PE1": "1", "PE2": "2"}
+HOST_SUFFIX = {"host1": "1", "host2": "2"}
TRIGGERS = ["base", "no_rt5", "no_rt2"]
-class TemplateTopo(Topo):
- """Test topology builder"""
+def build_topo(tgen):
+ # This function only purpose is to define allocation and relationship
+ # between routers and add links.
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
+ # Create routers
+ for pe in PES:
+ tgen.add_router(pe)
+ for host in HOSTS:
+ tgen.add_router(host)
- # This function only purpose is to define allocation and relationship
- # between routers and add links.
+ krel = platform.release()
+ logger.info("Kernel version " + krel)
- # Create routers
- for pe in PES:
- tgen.add_router(pe)
- for host in HOSTS:
- tgen.add_router(host)
-
- krel = platform.release()
- logger.info('Kernel version ' + krel)
-
- #Add links
- tgen.add_link(tgen.gears['PE1'], tgen.gears['PE2'], 'PE1-eth0', 'PE2-eth0')
- tgen.add_link(tgen.gears['PE1'], tgen.gears['host1'], 'PE1-eth1', 'host1-eth0')
- tgen.add_link(tgen.gears['PE2'], tgen.gears['host2'], 'PE2-eth1', 'host2-eth0')
+ # Add links
+ tgen.add_link(tgen.gears["PE1"], tgen.gears["PE2"], "PE1-eth0", "PE2-eth0")
+ tgen.add_link(tgen.gears["PE1"], tgen.gears["host1"], "PE1-eth1", "host1-eth0")
+ tgen.add_link(tgen.gears["PE2"], tgen.gears["host2"], "PE2-eth1", "host2-eth0")
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
kernelv = platform.release()
if topotest.version_cmp(kernelv, "4.15") < 0:
- logger.info("For EVPN, kernel version should be minimum 4.15. Kernel present {}".format(kernelv))
+ logger.info(
+ "For EVPN, kernel version should be minimum 4.15. Kernel present {}".format(
+ kernelv
+ )
+ )
return
- if topotest.version_cmp(kernelv, '4.15') == 0:
+ if topotest.version_cmp(kernelv, "4.15") == 0:
l3mdev_accept = 1
- logger.info('setting net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept))
+ logger.info("setting net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept))
else:
l3mdev_accept = 0
tgen.start_topology()
# Configure MAC address for hosts as these MACs are advertised with EVPN type-2 routes
- for (name, host) in tgen.gears.items():
+ for name in tgen.gears:
if name not in HOSTS:
continue
+ host = tgen.net[name]
host_mac = "1a:2b:3c:4d:5e:6{}".format(HOST_SUFFIX[name])
- host.run("ip link set dev {}-eth0 down").format(name)
- host.run("ip link set dev {0}-eth0 address {1}".format(name, host_mac))
- host.run("ip link set dev {}-eth0 up").format(name)
+ host.cmd_raises("ip link set dev {}-eth0 down".format(name))
+ host.cmd_raises("ip link set dev {0}-eth0 address {1}".format(name, host_mac))
+ host.cmd_raises("ip link set dev {}-eth0 up".format(name))
# Configure PE VxLAN and Bridge interfaces
- for (name, pe) in tgen.gears.items():
+ for name in tgen.gears:
if name not in PES:
continue
+ pe = tgen.net[name]
+
vtep_ip = "10.100.0.{}".format(PE_SUFFIX[name])
bridge_ip = "50.0.1.{}/24".format(PE_SUFFIX[name])
bridge_ipv6 = "50:0:1::{}/48".format(PE_SUFFIX[name])
- pe.run("ip link add vrf-blue type vrf table 10")
- pe.run("ip link set dev vrf-blue up")
- pe.run("ip link add vxlan100 type vxlan id 100 dstport 4789 local {}".format(vtep_ip))
- pe.run("ip link add name br100 type bridge stp_state 0")
- pe.run("ip link set dev vxlan100 master br100")
- pe.run("ip link set dev {}-eth1 master br100".format(name))
- pe.run("ip addr add {} dev br100".format(bridge_ip))
- pe.run("ip link set up dev br100")
- pe.run("ip link set up dev vxlan100")
- pe.run("ip link set up dev {}-eth1".format(name))
- pe.run("ip link set dev br100 master vrf-blue")
- pe.run("ip -6 addr add {} dev br100".format(bridge_ipv6))
-
- pe.run("ip link add vxlan1000 type vxlan id 1000 dstport 4789 local {}".format(vtep_ip))
- pe.run("ip link add name br1000 type bridge stp_state 0")
- pe.run("ip link set dev vxlan1000 master br100")
- pe.run("ip link set up dev br1000")
- pe.run("ip link set up dev vxlan1000")
- pe.run("ip link set dev br1000 master vrf-blue")
-
- pe.run("sysctl -w net.ipv4.ip_forward=1")
- pe.run("sysctl -w net.ipv6.conf.all.forwarding=1")
- pe.run("sysctl -w net.ipv4.udp_l3mdev_accept={}".format(l3mdev_accept))
- pe.run("sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept))
+ pe.cmd_raises("ip link add vrf-blue type vrf table 10")
+ pe.cmd_raises("ip link set dev vrf-blue up")
+ pe.cmd_raises(
+ "ip link add vxlan100 type vxlan id 100 dstport 4789 local {}".format(
+ vtep_ip
+ )
+ )
+ pe.cmd_raises("ip link add name br100 type bridge stp_state 0")
+ pe.cmd_raises("ip link set dev vxlan100 master br100")
+ pe.cmd_raises("ip link set dev {}-eth1 master br100".format(name))
+ pe.cmd_raises("ip addr add {} dev br100".format(bridge_ip))
+ pe.cmd_raises("ip link set up dev br100")
+ pe.cmd_raises("ip link set up dev vxlan100")
+ pe.cmd_raises("ip link set up dev {}-eth1".format(name))
+ pe.cmd_raises("ip link set dev br100 master vrf-blue")
+ pe.cmd_raises("ip -6 addr add {} dev br100".format(bridge_ipv6))
+
+ pe.cmd_raises(
+ "ip link add vxlan1000 type vxlan id 1000 dstport 4789 local {}".format(
+ vtep_ip
+ )
+ )
+ pe.cmd_raises("ip link add name br1000 type bridge stp_state 0")
+ pe.cmd_raises("ip link set dev vxlan1000 master br100")
+ pe.cmd_raises("ip link set up dev br1000")
+ pe.cmd_raises("ip link set up dev vxlan1000")
+ pe.cmd_raises("ip link set dev br1000 master vrf-blue")
+
+ pe.cmd_raises("sysctl -w net.ipv4.ip_forward=1")
+ pe.cmd_raises("sysctl -w net.ipv6.conf.all.forwarding=1")
+ pe.cmd_raises("sysctl -w net.ipv4.udp_l3mdev_accept={}".format(l3mdev_accept))
+ pe.cmd_raises("sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept))
# For all registred routers, load the zebra configuration file
for (name, router) in tgen.routers().items():
logger.info("Running setup_module() done")
+ time.sleep(10)
+
def teardown_module(mod):
"""Teardown the pytest environment"""
if trigger not in TRIGGERS:
return "Unexpected trigger", "Unexpected trigger {}".format(trigger)
- show_commands = {'bgp_vni_routes': 'show bgp l2vpn evpn route vni 100 json',
- 'bgp_vrf_ipv4' : 'show bgp vrf vrf-blue ipv4 json',
- 'bgp_vrf_ipv6' : 'show bgp vrf vrf-blue ipv6 json',
- 'zebra_vrf_ipv4': 'show ip route vrf vrf-blue json',
- 'zebra_vrf_ipv6': 'show ipv6 route vrf vrf-blue json'}
+ show_commands = {
+ "bgp_vni_routes": "show bgp l2vpn evpn route vni 100 json",
+ "bgp_vrf_ipv4": "show bgp vrf vrf-blue ipv4 json",
+ "bgp_vrf_ipv6": "show bgp vrf vrf-blue ipv6 json",
+ "zebra_vrf_ipv4": "show ip route vrf vrf-blue json",
+ "zebra_vrf_ipv6": "show ipv6 route vrf vrf-blue json",
+ }
for (name, pe) in tgen.gears.items():
if name not in PES:
continue
for (cmd_key, command) in show_commands.items():
- expected_op_file = "{0}/{1}/{2}_{3}.json".format(CWD, name, cmd_key, trigger)
+ expected_op_file = "{0}/{1}/{2}_{3}.json".format(
+ CWD, name, cmd_key, trigger
+ )
expected_op = json.loads(open(expected_op_file).read())
test_func = partial(topotest.router_json_cmp, pe, command, expected_op)
tc_name = request.node.name
write_test_header(tc_name)
+ # Temporarily Disabled
+ tgen.set_error(
+ "%s: Failing under new micronet framework, please debug and re-enable", tc_name
+ )
+
kernelv = platform.release()
if topotest.version_cmp(kernelv, "4.15") < 0:
logger.info("For EVPN, kernel version should be minimum 4.15")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- h1 = tgen.gears['host1']
+ h1 = tgen.gears["host1"]
step("Withdraw type-5 routes")
- h1.run('vtysh -c "config t" \
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv4" \
- -c "no network 100.0.0.21/32"')
- h1.run('vtysh -c "config t" \
+ -c "no network 100.0.0.21/32"'
+ )
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv6" \
- -c "no network 100::21/128"')
+ -c "no network 100::21/128"'
+ )
result, assertmsg = evpn_gateway_ip_show_op_check("no_rt5")
if result is not None:
step("Advertise type-5 routes again")
- h1.run('vtysh -c "config t" \
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv4" \
- -c "network 100.0.0.21/32"')
- h1.run('vtysh -c "config t" \
+ -c "network 100.0.0.21/32"'
+ )
+ h1.run(
+ 'vtysh -c "config t" \
-c "router bgp 111" \
-c "address-family ipv6" \
- -c "network 100::21/128"')
+ -c "network 100::21/128"'
+ )
result, assertmsg = evpn_gateway_ip_show_op_check("base")
if result is not None:
def test_evpn_gateway_ip_flap_rt2(request):
"""
- Withdraw EVPN type-2 routes and check O/Ps at PE1 and PE2
- """
+ Withdraw EVPN type-2 routes and check O/Ps at PE1 and PE2
+ """
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
-
step("Shut down VxLAN interface at PE1 which results in withdraw of type-2 routes")
- pe1 = tgen.gears['PE1']
+ pe1 = tgen.net["PE1"]
- pe1.run('ip link set dev vxlan100 down')
+ pe1.cmd_raises("ip link set dev vxlan100 down")
result, assertmsg = evpn_gateway_ip_show_op_check("no_rt2")
if result is not None:
step("Bring up VxLAN interface at PE1 and advertise type-2 routes again")
- pe1.run('ip link set dev vxlan100 up')
+ pe1.cmd_raises("ip link set dev vxlan100 up")
result, assertmsg = evpn_gateway_ip_show_op_check("base")
if result is not None:
tgen.report_memory_leaks()
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import os
import sys
-import json
-from functools import partial
import pytest
import platform
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BGPEVPNTopo(Topo):
- "Test topology builder"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- tgen.add_router("r1")
- tgen.add_router("r2")
+ tgen.add_router("r1")
+ tgen.add_router("r2")
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r1"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BGPEVPNTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
"ip link set dev loop101 master {}-vrf-101",
"ip link set dev loop101 up",
]
- cmds_netns = [
- "ip netns add {}-vrf-101",
- "ip link add loop101 type dummy",
- "ip link set dev loop101 netns {}-vrf-101",
- "ip netns exec {}-vrf-101 ip link set dev loop101 up",
- ]
cmds_r2 = [ # config routing 101
"ip link add name bridge-101 up type bridge stp_state 0",
"ip link set vxlan-101 up type bridge_slave learning off flood off mcast_flood off",
]
- cmds_r1_netns_method3 = [
- "ip link add name vxlan-{1} type vxlan id {1} dstport 4789 dev {0}-eth0 local 192.168.100.21",
- "ip link set dev vxlan-{1} netns {0}-vrf-{1}",
- "ip netns exec {0}-vrf-{1} ip li set dev lo up",
- "ip netns exec {0}-vrf-{1} ip link add name bridge-{1} up type bridge stp_state 0",
- "ip netns exec {0}-vrf-{1} ip link set dev vxlan-{1} master bridge-{1}",
- "ip netns exec {0}-vrf-{1} ip link set bridge-{1} up",
- "ip netns exec {0}-vrf-{1} ip link set vxlan-{1} up",
- ]
+ # cmds_r1_netns_method3 = [
+ # "ip link add name vxlan-{1} type vxlan id {1} dstport 4789 dev {0}-eth0 local 192.168.100.21",
+ # "ip link set dev vxlan-{1} netns {0}-vrf-{1}",
+ # "ip netns exec {0}-vrf-{1} ip li set dev lo up",
+ # "ip netns exec {0}-vrf-{1} ip link add name bridge-{1} up type bridge stp_state 0",
+ # "ip netns exec {0}-vrf-{1} ip link set dev vxlan-{1} master bridge-{1}",
+ # "ip netns exec {0}-vrf-{1} ip link set bridge-{1} up",
+ # "ip netns exec {0}-vrf-{1} ip link set vxlan-{1} up",
+ # ]
router = tgen.gears["r1"]
- for cmd in cmds_netns:
- logger.info("cmd to r1: " + cmd)
- output = router.run(cmd.format("r1"))
- logger.info("result: " + output)
+
+ ns = "r1-vrf-101"
+ tgen.net["r1"].add_netns(ns)
+ tgen.net["r1"].cmd_raises("ip link add loop101 type dummy")
+ tgen.net["r1"].set_intf_netns("loop101", ns, up=True)
router = tgen.gears["r2"]
for cmd in cmds_vrflite:
logger.info("cmd to r2: " + cmd.format("r2"))
- output = router.run(cmd.format("r2"))
+ output = router.cmd_raises(cmd.format("r2"))
logger.info("result: " + output)
for cmd in cmds_r2:
logger.info("cmd to r2: " + cmd.format("r2"))
- output = router.run(cmd.format("r2"))
+ output = router.cmd_raises(cmd.format("r2"))
logger.info("result: " + output)
- router = tgen.gears["r1"]
- bridge_id = "101"
- for cmd in cmds_r1_netns_method3:
- logger.info("cmd to r1: " + cmd.format("r1", bridge_id))
- output = router.run(cmd.format("r1", bridge_id))
- logger.info("result: " + output)
- router = tgen.gears["r1"]
+ tgen.net["r1"].cmd_raises(
+ "ip link add name vxlan-101 type vxlan id 101 dstport 4789 dev r1-eth0 local 192.168.100.21"
+ )
+ tgen.net["r1"].set_intf_netns("vxlan-101", "r1-vrf-101", up=True)
+ tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set lo up")
+ tgen.net["r1"].cmd_raises(
+ "ip -n r1-vrf-101 link add name bridge-101 up type bridge stp_state 0"
+ )
+ tgen.net["r1"].cmd_raises(
+ "ip -n r1-vrf-101 link set dev vxlan-101 master bridge-101"
+ )
+ tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set bridge-101 up")
+ tgen.net["r1"].cmd_raises("ip -n r1-vrf-101 link set vxlan-101 up")
for rname, router in router_list.items():
if rname == "r1":
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
- cmds_rx_netns = ["ip netns del {}-vrf-101"]
- router = tgen.gears["r1"]
- for cmd in cmds_rx_netns:
- logger.info("cmd to r1: " + cmd.format("r1"))
- output = router.run(cmd.format("r1"))
+ tgen.net["r1"].delete_netns("r1-vrf-101")
tgen.stop_topology()
"vtepIp":"10.10.10.10",
"mcastGroup":"0.0.0.0",
"advertiseGatewayMacip":"No",
- "numMacs":6,
- "numArpNd":6,
"numRemoteVteps":[
"10.30.30.30"
]
"vtepIp":"10.30.30.30",
"mcastGroup":"0.0.0.0",
"advertiseGatewayMacip":"No",
- "numMacs":6,
- "numArpNd":6,
"numRemoteVteps":[
"10.10.10.10"
]
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ #
+ # Create routers
+ tgen.add_router("P1")
+ tgen.add_router("PE1")
+ tgen.add_router("PE2")
+ tgen.add_router("host1")
+ tgen.add_router("host2")
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- #
- # Create routers
- tgen.add_router("P1")
- tgen.add_router("PE1")
- tgen.add_router("PE2")
- tgen.add_router("host1")
- tgen.add_router("host2")
+ # Host1-PE1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["host1"])
+ switch.add_link(tgen.gears["PE1"])
- # Host1-PE1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["host1"])
- switch.add_link(tgen.gears["PE1"])
+ # PE1-P1
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["PE1"])
+ switch.add_link(tgen.gears["P1"])
- # PE1-P1
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["PE1"])
- switch.add_link(tgen.gears["P1"])
+ # P1-PE2
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["P1"])
+ switch.add_link(tgen.gears["PE2"])
- # P1-PE2
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["P1"])
- switch.add_link(tgen.gears["PE2"])
-
- # PE2-host2
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["PE2"])
- switch.add_link(tgen.gears["host2"])
+ # PE2-host2
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["PE2"])
+ switch.add_link(tgen.gears["host2"])
def setup_module(mod):
"Sets up the pytest environment"
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
return topotest.json_cmp(output_json, expected)
+def check_vni_macs_present(tgen, router, vni, maclist):
+ result = router.vtysh_cmd("show evpn mac vni {} json".format(vni), isjson=True)
+ for rname, ifname in maclist:
+ m = tgen.net.macs[(rname, ifname)]
+ if m not in result["macs"]:
+ return "MAC ({}) for interface {} on {} missing on {} from {}".format(
+ m, ifname, rname, router.name, json.dumps(result, indent=4)
+ )
+ return None
+
+
def test_pe1_converge_evpn():
"Wait for protocol convergence"
expected = json.loads(open(json_file).read())
test_func = partial(show_vni_json_elide_ifindex, pe1, 101, expected)
- _, result = topotest.run_and_expect(test_func, None, count=125, wait=1)
+ _, result = topotest.run_and_expect(test_func, None, count=45, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(pe1.name)
- assert result is None, assertmsg
- # tgen.mininet_cli()
+
+ test_func = partial(
+ check_vni_macs_present,
+ tgen,
+ pe1,
+ 101,
+ (("host1", "host1-eth0"), ("host2", "host2-eth0")),
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ if result:
+ logger.warning("%s", result)
+ assert None, '"{}" missing expected MACs'.format(pe1.name)
def test_pe2_converge_evpn():
expected = json.loads(open(json_file).read())
test_func = partial(show_vni_json_elide_ifindex, pe2, 101, expected)
- _, result = topotest.run_and_expect(test_func, None, count=125, wait=1)
+ _, result = topotest.run_and_expect(test_func, None, count=45, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(pe2.name)
assert result is None, assertmsg
- # tgen.mininet_cli()
+
+ test_func = partial(
+ check_vni_macs_present,
+ tgen,
+ pe2,
+ 101,
+ (("host1", "host1-eth0"), ("host2", "host2-eth0")),
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
+ if result:
+ logger.warning("%s", result)
+ assert None, '"{}" missing expected MACs'.format(pe2.name)
def mac_learn_test(host, local):
def test_local_remote_mac_pe1():
- " Test MAC transfer PE1 local and PE2 remote"
+ "Test MAC transfer PE1 local and PE2 remote"
tgen = get_topogen()
# Don't run this test if we have any failure.
def test_local_remote_mac_pe2():
- " Test MAC transfer PE2 local and PE1 remote"
+ "Test MAC transfer PE2 local and PE1 remote"
tgen = get_topogen()
# Don't run this test if we have any failure.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd]
#####################################################
-class BGPFeaturesTopo1(Topo):
- "BGP Features Topology 1"
+def build_topo(tgen):
+ for rtrNum in range(1, 6):
+ tgen.add_router("r{}".format(rtrNum))
- def build(self, **_opts):
- tgen = get_topogen(self)
-
- # Create the routers
- for rtrNum in range(1, 6):
- tgen.add_router("r{}".format(rtrNum))
-
- # Setup Switches and connections
- for swNum in range(1, 11):
- tgen.add_switch("sw{}".format(swNum))
-
- # Add connections to stub switches
- tgen.gears["r1"].add_link(tgen.gears["sw6"])
- tgen.gears["r2"].add_link(tgen.gears["sw7"])
- tgen.gears["r3"].add_link(tgen.gears["sw8"])
- tgen.gears["r4"].add_link(tgen.gears["sw9"])
- tgen.gears["r5"].add_link(tgen.gears["sw10"])
-
- # Add connections to R1-R2-R3 core
- tgen.gears["r1"].add_link(tgen.gears["sw1"])
- tgen.gears["r1"].add_link(tgen.gears["sw3"])
- tgen.gears["r2"].add_link(tgen.gears["sw1"])
- tgen.gears["r2"].add_link(tgen.gears["sw2"])
- tgen.gears["r3"].add_link(tgen.gears["sw2"])
- tgen.gears["r3"].add_link(tgen.gears["sw3"])
+ # create ExaBGP peers
+ for peer_num in range(1, 5):
+ tgen.add_exabgp_peer(
+ "peer{}".format(peer_num),
+ ip="192.168.101.{}".format(peer_num + 2),
+ defaultRoute="via 192.168.101.1",
+ )
- # Add connections to external R4/R5 Routers
- tgen.gears["r1"].add_link(tgen.gears["sw4"])
- tgen.gears["r4"].add_link(tgen.gears["sw4"])
- tgen.gears["r2"].add_link(tgen.gears["sw5"])
- tgen.gears["r5"].add_link(tgen.gears["sw5"])
+ # Setup Switches and connections
+ for swNum in range(1, 11):
+ tgen.add_switch("sw{}".format(swNum))
+
+ # Add connections to stub switches
+ tgen.gears["r1"].add_link(tgen.gears["sw6"])
+ tgen.gears["r2"].add_link(tgen.gears["sw7"])
+ tgen.gears["r3"].add_link(tgen.gears["sw8"])
+ tgen.gears["r4"].add_link(tgen.gears["sw9"])
+ tgen.gears["r5"].add_link(tgen.gears["sw10"])
+
+ # Add connections to R1-R2-R3 core
+ tgen.gears["r1"].add_link(tgen.gears["sw1"])
+ tgen.gears["r1"].add_link(tgen.gears["sw3"])
+ tgen.gears["r2"].add_link(tgen.gears["sw1"])
+ tgen.gears["r2"].add_link(tgen.gears["sw2"])
+ tgen.gears["r3"].add_link(tgen.gears["sw2"])
+ tgen.gears["r3"].add_link(tgen.gears["sw3"])
+
+ # Add connections to external R4/R5 Routers
+ tgen.gears["r1"].add_link(tgen.gears["sw4"])
+ tgen.gears["r4"].add_link(tgen.gears["sw4"])
+ tgen.gears["r2"].add_link(tgen.gears["sw5"])
+ tgen.gears["r5"].add_link(tgen.gears["sw5"])
+
+ # Add ExaBGP peers to sw4
+ tgen.gears["peer1"].add_link(tgen.gears["sw4"])
+ tgen.gears["peer2"].add_link(tgen.gears["sw4"])
+ tgen.gears["peer3"].add_link(tgen.gears["sw4"])
+ tgen.gears["peer4"].add_link(tgen.gears["sw4"])
#####################################################
def setup_module(module):
- tgen = Topogen(BGPFeaturesTopo1, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
# Starting Routers
import os
import sys
import pytest
-import getopt
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from lib.lutil import lUtil
-from lib.lutil import luCommand
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
#####################################################
-class BGPFLOWSPECTopo1(Topo):
- "BGP EBGP Flowspec Topology 1"
+def build_topo(tgen):
+ tgen.add_router("r1")
- def build(self, **_opts):
- tgen = get_topogen(self)
+ # Setup Control Path Switch 1. r1-eth0
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
- # Setup Routers
- tgen.add_router("r1")
-
- # Setup Control Path Switch 1. r1-eth0
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
-
- ## Add eBGP ExaBGP neighbors
- peer_ip = "10.0.1.101" ## peer
- peer_route = "via 10.0.1.1" ## router
- peer = tgen.add_exabgp_peer("peer1", ip=peer_ip, defaultRoute=peer_route)
- switch.add_link(peer)
+ ## Add eBGP ExaBGP neighbors
+ peer_ip = "10.0.1.101" ## peer
+ peer_route = "via 10.0.1.1" ## router
+ peer = tgen.add_exabgp_peer("peer1", ip=peer_ip, defaultRoute=peer_route)
+ switch.add_link(peer)
#####################################################
def setup_module(module):
- tgen = Topogen(BGPFLOWSPECTopo1, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
# check for zebra capability
import os
import sys
-import json
import time
-import inspect
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import Topogen, get_topogen
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
# Import topoJson from lib, to create topology and initial configuration
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.bgp import (
clear_bgp,
verify_bgp_rib,
create_router_bgp,
verify_r_bit,
verify_f_bit,
- verify_graceful_restart_timers,
verify_bgp_convergence,
verify_bgp_convergence_from_running_config,
)
shutdown_bringup_interface,
step,
get_frr_ipv6_linklocal,
- create_route_maps,
required_linux_kernel_version,
)
pytestmark = [pytest.mark.bgpd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_gr_topojson_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- logger.info("Could not read file:", jsonFile)
-
-
# Global variables
NEXT_HOP_IP = {"ipv4": "192.168.1.10", "ipv6": "fd00:0:0:1::10"}
NEXT_HOP_IP_1 = {"ipv4": "192.168.0.1", "ipv6": "fd00::1"}
PREFERRED_NEXT_HOP = "link_local"
-class GenerateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to create topology
- # as defined in input json file.
- #
- # Create topology (setup module)
- # Creating 2 routers topology, r1, r2in IBGP
- # Bring up topology
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(GenerateTopo, mod.__name__)
+ json_file = "{}/bgp_gr_topojson_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
import os
import sys
-import json
import time
import pytest
from time import sleep
-from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import Topogen, get_topogen
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
# Import topoJson from lib, to create topology and initial configuration
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.bgp import (
clear_bgp,
verify_bgp_rib,
check_address_types,
write_test_footer,
check_router_status,
- shutdown_bringup_interface,
step,
get_frr_ipv6_linklocal,
- create_route_maps,
required_linux_kernel_version,
)
pytestmark = [pytest.mark.bgpd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_gr_topojson_topo2.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- logger.info("Could not read file:", jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
GR_RESTART_TIMER = 5
NEXT_HOP_6 = ["fd00:0:0:1::1", "fd00:0:0:4::2"]
-class GenerateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to create topology
- # as defined in input json file.
- #
- # Create topology (setup module)
- # Creating 2 routers topology, r1, r2in IBGP
- # Bring up topology
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(GenerateTopo, mod.__name__)
+ json_file = "{}/bgp_gr_topojson_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
configure_gr_followed_by_clear(tgen, topo, input_dict, tc_name, dut="r1", peer="r3")
for addr_type in ADDR_TYPES:
- step("Verifying GR config and operational state for addr_type {}".format(addr_type))
+ step(
+ "Verifying GR config and operational state for addr_type {}".format(
+ addr_type
+ )
+ )
result = verify_graceful_restart(
tgen, topo, addr_type, input_dict, dut="r1", peer="r3"
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r1", peer="r3",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r1",
+ peer="r3",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv4Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv4Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
# verify multi address family
result = verify_gr_address_family(
- tgen, topo, addr_type, "ipv6Unicast", dut="r3", peer="r1",
+ tgen,
+ topo,
+ addr_type,
+ "ipv6Unicast",
+ dut="r3",
+ peer="r1",
)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
import re
import sys
import json
-import time
import pytest
-import functools
import platform
from functools import partial
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 6):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 6):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
-
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r5"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r5"])
def _run_cmd_and_check(router, cmd, results_file, retries=100, intvl=0.5):
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import time
-import json
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
-from time import sleep
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
verify_rib,
- create_static_routes,
check_address_types,
- interface_status,
reset_config_on_routers,
step,
get_frr_ipv6_linklocal,
start_router,
create_route_maps,
create_bgp_community_lists,
- delete_route_maps,
required_linux_kernel_version,
)
from lib.topolog import logger
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp,
verify_bgp_rib,
verify_bgp_attributes,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
-pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/ebgp_gshut_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- logger.info("Could not read file:", jsonFile)
# Global variables
NETWORK = {"ipv4": "100.0.10.1/32", "ipv6": "1::1/128"}
BGP_CONVERGENCE = False
-class GenerateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to create topology
- # as defined in input json file.
- #
- # Create topology (setup module)
- # Creating 2 routers topology, r1, r2in IBGP
- # Bring up topology
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(GenerateTopo, mod.__name__)
+ json_file = "{}/ebgp_gshut_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
step("local pref for routes coming from R1 is set to 0.")
for addr_type in ADDR_TYPES:
- rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+ rmap_dict = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [{"set": {"locPrf": 0}}],
+ }
+ }
+ }
static_routes = [NETWORK[addr_type]]
result = verify_bgp_attributes(
import os
import sys
import time
-import json
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
-from time import sleep
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
verify_rib,
- create_static_routes,
check_address_types,
- interface_status,
reset_config_on_routers,
step,
get_frr_ipv6_linklocal,
- kill_router_daemons,
- start_router_daemons,
- stop_router,
- start_router,
create_route_maps,
create_bgp_community_lists,
- delete_route_maps,
required_linux_kernel_version,
)
from lib.topolog import logger
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp,
verify_bgp_rib,
verify_bgp_attributes,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/ibgp_gshut_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- logger.info("Could not read file:", jsonFile)
-
# Global variables
NETWORK = {"ipv4": "100.0.10.1/32", "ipv6": "1::1/128"}
NEXT_HOP_IP_1 = {"ipv4": "10.0.3.1", "ipv6": "fd00:0:0:3::1"}
BGP_CONVERGENCE = False
-class GenerateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to create topology
- # as defined in input json file.
- #
- # Create topology (setup module)
- # Creating 2 routers topology, r1, r2in IBGP
- # Bring up topology
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(GenerateTopo, mod.__name__)
+ json_file = "{}/ibgp_gshut_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
step("local pref for routes coming from R1 is set to 0.")
for addr_type in ADDR_TYPES:
- rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+ rmap_dict = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [{"set": {"locPrf": 0}}],
+ }
+ }
+ }
static_routes = [NETWORK[addr_type]]
result = verify_bgp_attributes(
step("local pref for routes coming from R1 is set to 0.")
for addr_type in ADDR_TYPES:
- rmap_dict = {"r1": {"route_maps": {"GSHUT-OUT": [{"set": {"locPrf": 0}}],}}}
+ rmap_dict = {
+ "r1": {
+ "route_maps": {
+ "GSHUT-OUT": [{"set": {"locPrf": 0}}],
+ }
+ }
+ }
static_routes = [NETWORK[addr_type]]
result = verify_bgp_attributes(
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link5": {"ipv4": "auto", "ipv6": "auto"}}
+ },
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r0-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link5": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link0": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop"
+ }
+ }
+ }
+ }
+ }
+ }
+ }}},
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link0": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "200",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link0": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop"
+ }
+ }
+ },
+ "r3": {"dest_link": {"r2": {}}}}
+ }
+ }
+ }}},
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "200",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {"unicast": {"neighbor": {"r2": {"dest_link": {"r3": {}}}}}}
+ }}},
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}}
+ }}}
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link5": {"ipv4": "auto", "ipv6": "auto"}}
+ },
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r0-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link5": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link0": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4"
+ }
+ }
+ }
+ }
+ }
+ }
+ }}},
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link0": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "200",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {"dest_link": {"r2": {"activate": "ipv4"}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link0": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4"
+ }
+ }
+ },
+ "r3": {"dest_link": {"r2": {}}}}
+ }
+ }}}},
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "300",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {"unicast": {"neighbor": {"r2": {"dest_link": {"r3": {}}}}}}
+ }}},
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {"unicast": {"neighbor": {"r2": {"dest_link": {"r4": {}}}}}}
+ }}}}}
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link5": {"ipv4": "auto", "ipv6": "auto"}}
+ },
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r0-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link5": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link0": {"ipv4": "auto"}},
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ "neighbor_type": "unnumbered"
+ }
+ }
+ }
+ }
+ }
+ }
+ }}},
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link0": {"ipv4": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "200",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {"dest_link": {"r2": {"activate": "ipv4"}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link0": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ "neighbor_type": "unnumbered"
+ }
+ }
+ },
+ "r3": {"dest_link": {"r2": {}}}}
+ }
+ }}}},
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "300",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {"unicast": {"neighbor": {"r2": {"dest_link": {"r3": {}}}}}}
+ }}},
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {"unicast": {"neighbor": {"r2": {"dest_link": {"r4": {}}}}}}
+ }}}}}
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r1-link5": {"ipv4": "auto", "ipv6": "auto"}}
+ },
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r0-link1": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link2": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link3": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link4": {"ipv4": "auto", "ipv6": "auto"},
+ "r0-link5": {"ipv4": "auto", "ipv6": "auto"},
+ "r2-link0": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4"
+ }
+ }
+ }
+ }
+ }
+ }
+ }}},
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link0": {"ipv4": "auto", "ipv6": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {"dest_link": {"r2": {"activate": "ipv4"}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link0": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4"
+ }
+ }
+ },
+ "r3": {"dest_link": {"r2": {}}}}
+ }
+ }}}},
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "300",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {"unicast": {"neighbor": {"r2": {"dest_link": {"r3": {}}}}}}
+ }}},
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {"unicast": {"neighbor": {"r2": {"dest_link": {"r4": {}}}}}}
+ }}}}}
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "ipv6base": "fd00::",
+ "ipv6mask": 64,
+ "link_ip_start": {"ipv4": "10.0.0.0", "v4mask": 24, "ipv6": "fd00::", "v6mask": 64},
+ "lo_prefix": {"ipv4": "1.0.", "v4mask": 32, "ipv6": "2001:DB8:F::", "v6mask": 128},
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link1": {"ipv4": "auto"},
+ "r1-link2": {"ipv4": "auto"},
+ "r1-link3": {"ipv4": "auto"},
+ "r1-link4": {"ipv4": "auto"},
+ "r1-link5": {"ipv4": "auto"}}
+ },
+ "r1": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r0-link1": {"ipv4": "auto"},
+ "r0-link2": {"ipv4": "auto"},
+ "r0-link3": {"ipv4": "auto"},
+ "r0-link4": {"ipv4": "auto"},
+ "r0-link5": {"ipv4": "auto"},
+ "r2-link0": {"ipv4": "auto"}},
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "neighbor_type": "unnumbered",
+ "capability": "extended-nexthop",
+ "activate": "ipv4"
+ }
+ }
+ }
+ }
+ }
+ }
+ }}},
+ "r2": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1-link0": {"ipv4": "auto"},
+ "r3": {"ipv4": "auto", "ipv6": "auto"},
+ "r4": {"ipv4": "auto"}},
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r4": {"dest_link": {"r2": {"activate": "ipv4"}}}
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link0": {
+ "neighbor_type": "unnumbered",
+ "capability": "extended-nexthop",
+ "activate": "ipv4"
+ }
+ }
+ },
+ "r3": {"dest_link": {"r2": {}}}}
+ }
+ }}}},
+ "r3": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto", "ipv6": "auto"}},
+ "bgp": {
+ "local_as": "300",
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {"unicast": {"neighbor": {"r2": {"dest_link": {"r3": {}}}}}}
+ }}},
+ "r4": {
+ "links": {
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2": {"ipv4": "auto"}},
+ "bgp": {
+ "local_as": "400",
+ "address_family": {
+ "ipv4": {"unicast": {"neighbor": {"r2": {"dest_link": {"r4": {}}}}}}
+ }}}}}
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""RFC5549 Automation."""
+import os
+import sys
+import time
+import pytest
+from copy import deepcopy
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ get_frr_ipv6_linklocal,
+ write_test_footer,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ reset_config_on_routers,
+ step,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_bgp_rib,
+)
+from lib.topojson import build_config_from_json
+
+# Global variables
+topo = None
+
+# Global variables
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["1::1/128", "1::2/128", "1::3/128", "1::4/128", "1::5/128"],
+}
+MASK = {"ipv4": "32", "ipv6": "128"}
+NEXT_HOP = {
+ "ipv4": ["10.0.0.1", "10.0.1.1", "10.0.2.1", "10.0.3.1", "10.0.4.1"],
+ "ipv6": ["Null0", "Null0", "Null0", "Null0", "Null0"],
+}
+NO_OF_RTES = 2
+NETWORK_CMD_IP = "1.0.1.17/32"
+ADDR_TYPES = check_address_types()
+TOPOOLOGY = """
+ Please view in a fixed-width font such as Courier.
+
+ +----+
+ | R4 |
+ | |
+ +--+-+
+ | ipv4 nbr
+ no bgp ebgp/ibgp |
+ | ebgp/ibgp
+ +----+ 5links +----+ 8links +--+-+ +----+
+ |R0 +----------+ R1 +------------+ R2 | ipv6 nbr |R3 |
+ | +----------+ +------------+ +-------------+ |
+ +----+ +----+ ipv6 nbr +----+ +----+
+"""
+
+TESTCASES = """
+1. Verify Ipv4 route next hop is changed when advertised using
+next hop -self command
+2. Verify IPv4 route advertised to peer when IPv6 BGP session established
+ using peer-group
+3. Verify IPv4 routes received with IPv6 nexthop are getting advertised
+ to another IBGP peer without changing the nexthop
+4. Verify IPv4 routes advertised with correct nexthop when nexthop
+unchange is configure on EBGP peers
+ """
+
+
+def setup_module(mod):
+ """Set up the pytest environment."""
+
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/rfc5549_ebgp_ibgp_nbr.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """
+ Teardown the pytest environment.
+
+ * `mod`: module name
+ """
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+def test_ibgp_to_ibgp_p1(request):
+ """
+
+ Test Capability extended nexthop.
+
+ Verify IPv4 routes received with IPv6 nexthop are getting advertised to
+ another IBGP peer without changing the nexthop
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+ global topo
+ topo23 = deepcopy(topo)
+ build_config_from_json(tgen, topo23, save_bkup=False)
+
+ step("Configure IPv6 EBGP session between R1 and R2 with " "global IPv6 address")
+ step("Configure IPv6 IBGP session betn R2 & R3 using IPv6 global address")
+ step("Enable capability extended-nexthop on both the IPv6 BGP peers")
+ step("Activate same IPv6 nbr from IPv4 unicast family")
+ step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+ step("Verify bgp convergence as ipv6 nbr is enabled on ipv4 addr family.")
+
+ # verify bgp convergence as ipv6 nbr is enabled on ipv4 addr family.
+ bgp_convergence = verify_bgp_convergence(tgen, topo23)
+ assert bgp_convergence is True, "Testcase :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+
+ step(" Configure 5 IPv4 static" " routes on R1, Nexthop as different links of R0")
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 "
+ "unicast family respectively from R1 using red static cmd "
+ "Advertise loopback from IPv4 unicast family using network command "
+ "from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo23, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "IPv4 routes advertised using static and network command are "
+ " received on R2 BGP and routing table , "
+ "verify using show ip bgp, show ip route for IPv4 routes ."
+ )
+
+ gllip = get_llip("r1", "r2-link0")
+ assert gllip is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r3 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "IPv4 routes installed on R3 with global address without "
+ "changing the nexthop ( nexthop should IPv6 link local which is"
+ " received from R1)"
+ )
+ gipv6 = get_glipv6("r1", "r2-link0")
+ dut = "r3"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": gipv6,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gipv6
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ write_test_footer(tc_name)
+
+
+def test_ext_nh_cap_red_static_network_ibgp_peer_p1(request):
+ """
+
+ Test Extended capability next hop, with ibgp peer.
+
+ Verify IPv4 routes advertise using "redistribute static" and
+ "network command" are received on EBGP peer with IPv6 nexthop
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+ step(
+ " Configure IPv6 EBGP session between R1 & R2 with global IPv6 address"
+ " Enable capability extended-nexthop on the nbr from both the routers"
+ " Activate same IPv6 nbr from IPv4 unicast family"
+ )
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r2": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4",
+ "next_hop_self": True,
+ "activate": "ipv4",
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r3 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r3": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "default_ipv4_unicast": "False",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ }
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ gllip = get_llip("r1", "r2-link0")
+ assert gllip is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ verify_nh_for_nw_cmd_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_ip": 1,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ gllip = get_glipv6("r2", "r3")
+ assert gllip is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r3"
+ protocol = "bgp"
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ verify_nh_for_nw_cmd_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_ip": 1,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=gllip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_bgp_peer_group_p1(request):
+ """
+ Test extended capability next hop with peer groups.
+
+ Verify IPv4 routes received with IPv6 nexthop are getting advertised to
+ another IBGP peer without changing the nexthop
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+ global topo
+ topo1 = deepcopy(topo)
+ step("Configure IPv6 EBGP session between R1 and R2 with " "global IPv6 address")
+ step("Configure IPv6 IBGP session betn R2 & R3 using IPv6 global address")
+ step("Enable capability extended-nexthop on both the IPv6 BGP peers")
+ step("Activate same IPv6 nbr from IPv4 unicast family")
+ step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "default_ipv4_unicast": "False",
+ "peer-group": {
+ "rfc5549": {"capability": "extended-nexthop", "remote-as": "200"}
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ "peer-group": "rfc5549",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "default_ipv4_unicast": "False",
+ "peer-group": {
+ "rfc5549": {"capability": "extended-nexthop", "remote-as": "100"}
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link0": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4",
+ "peer-group": "rfc5549",
+ }
+ }
+ },
+ "r3": {"dest_link": {"r2": {}}},
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r3 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"neighbor": {"r2": {"dest_link": {"r3": {}}}}}}
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp convergence as ipv6 nbr is enabled on ipv4 addr family.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+
+ step(" Configure 2 IPv4 static" " routes on R1, Nexthop as different links of R0")
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 "
+ "unicast family respectively from R1 using red static cmd "
+ "Advertise loopback from IPv4 unicast family using network command "
+ "from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "IPv4 routes advertised using static and network command are "
+ " received on R2 BGP and routing table , "
+ "verify using show ip bgp, show ip route for IPv4 routes ."
+ )
+
+ gllip = get_llip("r1", "r2-link0")
+ assert gllip is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "default_ipv4_unicast": "False",
+ "peer-group": {
+ "rfc5549": {"capability": "extended-nexthop", "remote-as": "200"}
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ "peer-group": "rfc5549",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "default_ipv4_unicast": "False",
+ "peer-group": {
+ "rfc5549": {"capability": "extended-nexthop", "remote-as": "100"}
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2-link0": {
+ "capability": "extended-nexthop",
+ "activate": "ipv4",
+ "peer-group": "rfc5549",
+ }
+ }
+ },
+ "r3": {"dest_link": {"r2": {}}},
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r3 = {
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"neighbor": {"r2": {"dest_link": {"r3": {}}}}}}
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp convergence as ipv6 nbr is enabled on ipv4 addr family.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+
+ step(" Configure 2 IPv4 static" " routes on R1, Nexthop as different links of R0")
+ for rte in range(0, NO_OF_RTES):
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 "
+ "unicast family respectively from R1 using red static cmd "
+ "Advertise loopback from IPv4 unicast family using network command "
+ "from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "IPv4 routes advertised using static and network command are "
+ " received on R2 BGP and routing table , "
+ "verify using show ip bgp, show ip route for IPv4 routes ."
+ )
+
+ gllip = get_llip("r1", "r2-link0")
+ assert gllip is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""RFC5549 Automation."""
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ get_frr_ipv6_linklocal,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ reset_config_on_routers,
+ step,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_bgp_rib,
+)
+from lib.topojson import build_config_from_json
+
+# Global variables
+topo = None
+
+# Global variables
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["1::1/128", "1::2/128", "1::3/128", "1::4/128", "1::5/128"],
+}
+MASK = {"ipv4": "32", "ipv6": "128"}
+NEXT_HOP = {
+ "ipv4": ["10.0.0.1", "10.0.1.1", "10.0.2.1", "10.0.3.1", "10.0.4.1"],
+ "ipv6": ["Null0", "Null0", "Null0", "Null0", "Null0"],
+}
+NO_OF_RTES = 2
+NETWORK_CMD_IP = "1.0.1.17/32"
+ADDR_TYPES = check_address_types()
+BGP_CONVERGENCE_TIMEOUT = 10
+TOPOOLOGY = """
+ Please view in a fixed-width font such as Courier.
+ +----+
+ | R4 |
+ | |
+ +--+-+
+ | ipv4 nbr
+ no bgp ebgp |
+ | ebgp/ibgp
+ +----+ 5links +----+ +--+-+ +----+
+ |R0 +----------+ R1 | | R2 | ipv6 nbr |R3 |
+ | +----------+ +------------+ +-------------+ |
+ +----+ +----+ ipv6 nbr +----+ +----+
+"""
+
+TESTCASES = """
+TC6. Verify BGP speaker advertise IPv4 route to peer only if "extended
+ nexthop capability" is negotiated
+TC7. Verify ipv4 route nexthop updated dynamically when in route-map is
+ applied on receiving BGP peer
+TC8. Verify IPv4 routes advertise using "redistribute static" and "network
+ command" are received on EBGP peer with IPv6 nexthop
+TC10. Verify IPv4 routes are deleted after un-configuring of "network
+command" and "redistribute static knob"
+TC18. Verify IPv4 routes installed with correct nexthop after deactivate
+ and activate neighbor from address family
+TC19. Verify IPv4 route ping is working fine and nexhop installed in kernel
+ as IPv4 link-local address
+TC24. Verify IPv4 prefix-list routes advertised to peer when prefix -list
+ applied in out direction
+TC27. Verify IPv4 routes are intact after BGPd process restart
+TC30. Verify Ipv4 route installed with correct next hop when same route
+ is advertised via IPV4 and IPv6 BGP peers
+TC32. Verify IPv4 route received with IPv6 nexthop can be advertised to
+ another IPv4 BGP peers
+ """
+
+
+def setup_module(mod):
+ """Set up the pytest environment."""
+ global topo, ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/rfc5549_ebgp_nbr.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment."""
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ext_nh_cap_red_static_network_ebgp_peer_tc8_p0(request):
+ """
+
+ Test exted capability nexthop with route map in.
+
+ Verify IPv4 routes advertise using "redistribute static" and
+ "network command" are received on EBGP peer with IPv6 nexthop
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step("Configure IPv6 EBGP session between R1 and R2 with global" " IPv6 address")
+ reset_config_on_routers(tgen)
+
+ step(
+ "Enable capability extended-nexthop on the nbr from both the "
+ " routers Activate same IPv6 nbr from IPv4 unicast family"
+ )
+ step(
+ " Configure 2 IPv4 static "
+ "routes on R1 (nexthop for static route exists on different "
+ "link of R0"
+ )
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv6"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv6"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 "
+ "unicast family respectively from R1 using red static cmd "
+ "Advertise loopback from IPv4 unicast family using network command "
+ "from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "True",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ },
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ glip = get_llip("r1", "r2-link0")
+ assert glip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "IPv4 and IPv6 routes advertised using static and network command "
+ "are received on R2 BGP & routing table , verify using show ip bgp "
+ "show ip route for IPv4 routes and show bgp ipv6,show ipv6 routes "
+ "for IPv6 routes ."
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ for addr_type in ADDR_TYPES:
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK[addr_type][0],
+ "no_of_ip": 2,
+ "next_hop": glip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, addr_type, dut, verify_nh_for_static_rtes, next_hop=glip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_rib
+ )
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ verify_nh_for_static_rtes,
+ next_hop=glip,
+ protocol=protocol,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Verify IPv4 routes are installed with IPv6 global nexthop of R1"
+ " R1 to R2 connected link"
+ )
+
+ verify_nh_for_nw_cmd_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_ip": 1,
+ "next_hop": glip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=glip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=glip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+
+def test_ext_nh_cap_remove_red_static_network_ebgp_peer_tc10_p1(request):
+ """
+
+ Test exted capability nexthop with route map in.
+
+ Verify IPv4 routes are deleted after un-configuring of
+ network command and redistribute static knob
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ step(
+ "Configure IPv6 EBGP session between R1 and R2 with global IPv6"
+ " address Enable capability extended-nexthop on the nbr from both"
+ " the routers , Activate same IPv6 nbr from IPv4 unicast family"
+ )
+ step(
+ " Configure 2 IPv4 static routes "
+ " on R1 nexthop for static route exists on different link of R0"
+ )
+ reset_config_on_routers(tgen)
+
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 unicast"
+ " family respectively from R1. Configure loopback on R1 with IPv4 "
+ "address Advertise loobak from IPv4 unicast family using network "
+ "command from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "local_as": "100",
+ "default_ipv4_unicast": "True",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ }
+ },
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "IPv4 and IPv6 routes advertised using static and network command are"
+ " received on R2 BGP and routing table , verify using show ip bgp"
+ " show ip route for IPv4 routes and show bgp, show ipv6 routes"
+ " for IPv6 routes ."
+ )
+
+ glipv6 = get_llip("r1", "r2-link0")
+ assert glipv6 is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "advertise_networks": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": get_glipv6,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=get_glipv6
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ verify_nh_for_static_rtes,
+ next_hop=get_glipv6,
+ protocol=protocol,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify IPv4 routes are installed with IPv6 global nexthop of R1 "
+ " R1 to R2 connected link"
+ )
+ verify_nh_for_nw_cmd_rtes = {
+ "r1": {
+ "advertise_networks": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_ip": 1,
+ "next_hop": glipv6,
+ }
+ ]
+ }
+ }
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=glipv6, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ },
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": glipv6,
+ }
+ ]
+ }
+ }
+
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=glipv6, expected=False
+ )
+ assert (
+ bgp_rib is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in BGP rib".format(
+ tc_name
+ )
+ result = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ verify_nh_for_static_rtes,
+ next_hop=glipv6,
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes " "still present in RIB".format(tc_name)
+
+ step(
+ "After removing IPv4 routes from redistribute static those routes"
+ " are removed from R2, after re-advertising routes which are "
+ " advertised using network are still present in the on R2 with "
+ " IPv6 global nexthop, verify using show ip bgp and show ip routes"
+ )
+
+ verify_nh_for_nw_cmd_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_ip": 1,
+ "next_hop": glipv6,
+ }
+ ]
+ }
+ }
+
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=glipv6, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "advertise_networks": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_network": 1,
+ "delete": True,
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ verify_nh_for_nw_cmd_rtes,
+ next_hop=glipv6,
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n " "Error: Routes still present in BGP rib".format(
+ tc_name
+ )
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""RFC5549 Automation."""
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ write_test_header,
+ start_topology,
+ write_test_footer,
+ start_router,
+ stop_router,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ reset_config_on_routers,
+ step,
+ get_frr_ipv6_linklocal,
+)
+from lib.topolog import logger
+from lib.bgp import create_router_bgp, verify_bgp_convergence, verify_bgp_rib
+
+from lib.topojson import build_config_from_json
+
+# Global variables
+topo = None
+
+# Global variables
+NO_OF_RTES = 2
+NETWORK_CMD_IP = "1.0.1.17/32"
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["1::1/128", "1::2/128", "1::3/128", "1::4/128", "1::5/128"],
+}
+MASK = {"ipv4": "32", "ipv6": "128"}
+NEXT_HOP = {
+ "ipv4": ["10.0.0.1", "10.0.1.1", "10.0.2.1", "10.0.3.1", "10.0.4.1"],
+ "ipv6": ["Null0", "Null0", "Null0", "Null0", "Null0"],
+}
+INTF_LIST = [
+ "r2-link0",
+ "r2-link1",
+ "r2-link2",
+ "r2-link3",
+ "r2-link4",
+ "r2-link5",
+ "r2-link6",
+ "r2-link7",
+]
+ADDR_TYPES = check_address_types()
+TOPOOLOGY = """
+ Please view in a fixed-width font such as Courier.
+
+ +----+
+ | R4 |
+ | |
+ +--+-+
+ | ipv4 nbr
+ no bgp ebgp/ibgp |
+ | ebgp/ibgp
+ +----+ 5links +----+ 8links +--+-+ +----+
+ |R0 +----------+ R1 +------------+ R2 | ipv6 nbr |R3 |
+ | +----------+ +------------+ +-------------+ |
+ +----+ +----+ ipv6 nbr +----+ +----+
+"""
+
+TESTCASES = """
+1. Verify IPv4 routes are advertised when IPv6 EBGP loopback session
+ established using Unnumbered interface
+2. Verify IPv4 routes are installed with correct nexthop after
+shut / no shut of nexthop and BGP peer interfaces
+3. Verify IPv4 routes are intact after stop and start the FRR services
+ """
+
+
+def setup_module(mod):
+ """Set up the pytest environment."""
+ global topo, ADDR_TYPES
+
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/rfc5549_ebgp_unnumbered_nbr.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment."""
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_unnumbered_loopback_ebgp_nbr_p0(request):
+ """
+
+ Test extended capability nexthop with un numbered ebgp.
+
+ Verify IPv4 routes are advertised when IPv6 EBGP loopback
+ session established using Unnumbered interface
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+
+ step("Configure IPv6 EBGP Unnumbered session between R1 and R2")
+ step("Enable capability extended-nexthop on both the IPv6 BGP peers")
+ step("Activate same IPv6 nbr from IPv4 unicast family")
+ step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+ step("Verify bgp convergence as ipv6 nbr is enabled on ipv4 addr family.")
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step(" Configure 5 IPv4 static" " routes on R1, Nexthop as different links of R0")
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 "
+ "unicast family respectively from R1 using red static cmd "
+ "Advertise loopback from IPv4 unicast family using network command "
+ "from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ },
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "IPv4 routes advertised using static and network command are "
+ " received on R2 BGP and routing table , "
+ "verify using show ip bgp, show ip route for IPv4 routes ."
+ )
+
+ llip = get_llip("r1", "r2-link0")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, llip)
+
+ dut = "r2"
+ protocol = "bgp"
+ for rte in range(0, NO_OF_RTES):
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"][rte], "no_of_ip": 1, "next_hop": llip}
+ ]
+ }
+ }
+ """ interface_list = ['r1-link0','r1-link1']
+ nh_list =[]
+ for i in range(NO_OF_RTES):
+ nh_list.append(topo['routers']['r2']['links'][i][
+ 'interface']) """
+ bgp_rib = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ # verify_nh_for_static_rtes, next_hop='r2-r1-eth0')
+ verify_nh_for_static_rtes,
+ next_hop=llip,
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_rib
+ )
+ result = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ verify_nh_for_static_rtes,
+ next_hop=llip,
+ protocol=protocol,
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ # verify the routes with nh as ext_nh
+ verify_nh_for_nw_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK_CMD_IP, "no_of_ip": 1, "next_hop": llip}
+ ]
+ }
+ }
+
+ bgp_rib = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ # verify_nh_for_nw_rtes, next_hop='r2-r1-eth0')
+ verify_nh_for_nw_rtes,
+ next_hop=llip,
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ # stop/start -> restart FRR router and verify
+ stop_router(tgen, "r1")
+ stop_router(tgen, "r2")
+ start_router(tgen, "r1")
+ start_router(tgen, "r2")
+ step(
+ "After stop/start of FRR services , verify session up and routes "
+ "came up fine ,nh is proper using show bgp & show ipv6 route on R2 "
+ )
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ llip = get_llip("r1", "r2-link0")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": llip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen,
+ "ipv4",
+ dut,
+ # verify_nh_for_static_rtes, next_hop='r2-r1-eth0')
+ verify_nh_for_static_rtes,
+ next_hop=llip,
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ verify_nh_for_nw_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK_CMD_IP, "no_of_ip": 1, "next_hop": llip}
+ ]
+ }
+ }
+ bgp_rib = verify_rib(
+ tgen,
+ "ipv4",
+ dut,
+ # verify_nh_for_nw_rtes, next_hop='r2-r1-eth0')
+ verify_nh_for_nw_rtes,
+ next_hop=llip,
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ write_test_footer(tc_name)
+
+
+def test_restart_frr_p2(request):
+ """
+
+ Test extended capability nexthop , restart frr.
+
+ Verify IPv4 routes are intact after stop and start the FRR services
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+ step("Configure IPv6 EBGP Unnumbered session between R1 and R2")
+ step("Enable capability extended-nexthop on both the IPv6 BGP peers")
+ step("Activate same IPv6 nbr from IPv4 unicast family")
+ step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+ step("Verify bgp convergence as ipv6 nbr is enabled on ipv4 addr family.")
+ reset_config_on_routers(tgen)
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step(" Configure 5 IPv4 static" " routes on R1, Nexthop as different links of R0")
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 "
+ "unicast family respectively from R1 using red static cmd "
+ "Advertise loopback from IPv4 unicast family using network command "
+ "from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ },
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "IPv4 routes advertised using static and network command are "
+ " received on R2 BGP and routing table , "
+ "verify using show ip bgp, show ip route for IPv4 routes ."
+ )
+
+ llip = get_llip("r1", "r2-link0")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r2"
+ protocol = "bgp"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": llip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_rib(tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=llip)
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ verify_nh_for_nw_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK_CMD_IP, "no_of_ip": 1, "next_hop": llip}
+ ]
+ }
+ }
+
+ bgp_rib = verify_rib(tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=llip)
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # stop/start -> restart FRR router and verify
+ stop_router(tgen, "r1")
+ stop_router(tgen, "r2")
+ start_router(tgen, "r1")
+ start_router(tgen, "r2")
+
+ step(
+ "After stop/start of FRR services , verify session up and routes "
+ "came up fine ,nh is proper using show bgp & show ipv6 route on R2 "
+ )
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase {} :Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ llip = get_llip("r1", "r2-link0")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK["ipv4"][0], "no_of_ip": 1, "next_hop": llip}
+ ]
+ }
+ }
+ bgp_rib = verify_rib(tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=llip)
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # verify the routes with nh as ext_nh
+ verify_nh_for_nw_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK_CMD_IP, "no_of_ip": 1, "next_hop": llip}
+ ]
+ }
+ }
+ bgp_rib = verify_rib(tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=llip)
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""RFC5549 Automation."""
+import os
+import sys
+import time
+import pytest
+from copy import deepcopy
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ addKernelRoute,
+ write_test_footer,
+ create_prefix_lists,
+ verify_rib,
+ create_static_routes,
+ reset_config_on_routers,
+ step,
+ create_route_maps,
+ get_frr_ipv6_linklocal,
+)
+from lib.topolog import logger
+from lib.bgp import (
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_bgp_rib,
+)
+from lib.topojson import build_config_from_json
+
+# Global variables
+topo = None
+
+# Global variables
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["1::1/128", "1::2/128", "1::3/128", "1::4/128", "1::5/128"],
+}
+MASK = {"ipv4": "32", "ipv6": "128"}
+NEXT_HOP = {
+ "ipv4": ["10.0.0.1", "10.0.1.1", "10.0.2.1", "10.0.3.1", "10.0.4.1"],
+ "ipv6": ["Null0", "Null0", "Null0", "Null0", "Null0"],
+}
+NETWORK_CMD_IP = "1.0.1.17/32"
+NO_OF_RTES = 2
+TOPOOLOGY = """
+ Please view in a fixed-width font such as Courier.
+
+ +----+
+ | R4 |
+ | |
+ +--+-+
+ | ipv4 nbr
+ no bgp ebgp/ibgp |
+ | ebgp/ibgp
+ +----+ 5links +----+ +--+-+ +----+
+ |R0 +----------+ R1 | | R2 | ipv6 nbr |R3 |
+ | +----------+ +------------+ +-------------+ |
+ +----+ +----+ ipv6 nbr +----+ +----+
+"""
+
+TESTCASES = """
+1. Verify IPv4 and IPv6 routes advertise using "redistribute static"
+ and "network command" are received on IBGP peer with IPv6 nexthop
+2. Verify IPv4 routes are advertised and withdrawn when IPv6 IBGP session
+ established using loopback interface
+3. Verify IPv4 routes are advertised to peer when static routes are
+ configured with ADMIN distance and tag option
+4. Verify IPv4 routes advertised to peer when BGP session established
+ using link-local address
+ """
+
+
+def setup_module(mod):
+ """Set up the pytest environment."""
+
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/rfc5549_ibgp_nbr.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment."""
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ext_nh_cap_red_static_network_ibgp_peer_p1(request):
+ """
+
+ Test extended capability nexthop with ibgp peer.
+
+ Verify IPv4 and IPv6 routes advertise using "redistribute static"
+ and "network command" are received on IBGP peer with IPv6 nexthop
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+ step(
+ "Configure IPv6 EBGP session between R1 and R2 with global IPv6"
+ " address Enable capability extended-nexthop on the nbr from both"
+ " the routers"
+ )
+ step(
+ "Change ebgp to ibgp nbrs between r1 and r2 , Activate same IPv6"
+ " nbr from IPv4 unicast family "
+ )
+
+ step(
+ " Configure 5 IPv4 static routes"
+ " on R1 nexthop for static route exists on different link of R0"
+ )
+
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 unicast"
+ " family respectively from R1.Configure loopback on R1 with IPv4 addr"
+ " & Advertise loopback from IPv4 unicast family using network cmd "
+ " from R1"
+ )
+ # this test case needs ipv6 routes to be configured
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ },
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ glip = get_llip("r1", "r2-link0")
+ assert glip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "IPv4 and IPv6 routes advertised using static & network command are"
+ "received on R2 BGP and routing table , verify using show ip bgp"
+ "show ip route for IPv4 routes and show bgp, show ipv6 routes"
+ "for IPv6 routes ."
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": glip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=glip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=glip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify IPv4 routes are installed with IPv6 global nexthop of R1"
+ "R1 to R2 connected link"
+ )
+ verify_nh_for_nw_cmd_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_ip": 1,
+ "next_hop": glip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=glip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=glip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+def test_ext_nh_cap_admin_dist_tag_ibgp_peer_p1(request):
+ """
+
+ Test extended capability nexthop with admin distance and route tag.
+
+ Verify IPv4 routes are advertised to peer when static routes
+ are configured with ADMIN distance and tag option
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+ step(
+ "Configure IPv6 EBGP session between R1 and R2 with global IPv6"
+ " address Enable capability extended-nexthop on the nbr from both"
+ " the routers"
+ )
+ step(
+ "Change ebgp to ibgp nbrs between r1 and r2 , Activate same IPv6"
+ " nbr from IPv4 unicast family "
+ )
+ step(
+ " Configure 5 IPv4 static routes"
+ " on R1 nexthop for static route exists on different link of R0"
+ )
+ count = 0
+ for rte in range(0, NO_OF_RTES):
+ count += 1
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ "admin_distance": 100 + count,
+ "tag": 4001 + count,
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Advertise static routes from IPv4 unicast family & IPv6 unicast"
+ " family respectively from R1.Configure loopback on R1 with IPv4 "
+ "address & Advertise loopback from IPv4 unicast family "
+ "using network cmd from R1"
+ )
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}}
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ glip = get_llip("r1", "r2-link0")
+ assert glip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "IPv4 and IPv6 routes advertised using static & network cmd are"
+ "received on R2 BGP and routing table , verify using show ip bgp"
+ "show ip route for IPv4 routes and show bgp, show ipv6 routes"
+ "for IPv6 routes ."
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ count = 0
+ # verify the routes with nh as ext_nh
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": glip,
+ "admin_distance": 100 + count,
+ "tag": 4001 + count,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=glip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=glip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ count = 0
+ for rte in range(0, NO_OF_RTES):
+ count += 10
+ input_dict_2 = {
+ "r3": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {
+ "seqid": 0 + count,
+ "action": "permit",
+ "network": NETWORK["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n " "Error: {}".format(
+ tc_name, result
+ )
+
+ # Create route map
+ input_dict_6 = {
+ "r3": {
+ "route_maps": {
+ "rmap_match_tag_1_{}".format("ipv4"): [
+ {
+ "action": "deny",
+ "match": {
+ "ipv4": {"prefix_lists": "pf_list_1_{}".format("ipv4")}
+ },
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_6)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ # Configure neighbor for route map
+ input_dict_7 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1-link0": {
+ "route_maps": [
+ {
+ "name": "rmap_match_tag_1_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ result = create_router_bgp(tgen, topo, input_dict_7)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_ibgp_loopback_nbr_p1(request):
+ """
+ Verify Extended capability nexthop with loopback interface.
+
+ Verify IPv4 routes are advertised and withdrawn when IPv6 IBGP
+ session established using loopback interface
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ global topo
+ topo1 = deepcopy(topo)
+ reset_config_on_routers(tgen)
+ step("Configure IPv6 global address between R1 and R2")
+ step(
+ "Configure loopback on R1 and R2 and establish EBGP session "
+ "between R1 and R2 over loopback global ip"
+ )
+ step("Configure static route on R1 and R2 for loopback reachability")
+ step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+
+ for routerN in ["r1", "r2"]:
+ for addr_type in ["ipv6"]:
+ for bgp_neighbor in topo1["routers"][routerN]["bgp"]["address_family"][
+ addr_type
+ ]["unicast"]["neighbor"].keys():
+ # Adding ['source_link'] = 'lo' key:value pair
+ if bgp_neighbor == "r1" or bgp_neighbor == "r2":
+ topo1["routers"][routerN]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]["neighbor"][bgp_neighbor]["dest_link"] = {
+ "lo": {
+ "source_link": "lo",
+ "ebgp_multihop": 2,
+ "capability": "extended-nexthop",
+ "activate": "ipv4",
+ }
+ }
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo1, save_bkup=False)
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {"r1-link0": {"deactivate": "ipv6"}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {"r2-link0": {"deactivate": "ipv6"}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {"r1-link0": {"deactivate": "ipv4"}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r2 = {
+ "r2": {
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {"r2-link0": {"deactivate": "ipv4"}}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ r2_lo_v4 = topo["routers"]["r2"]["links"]["lo"]["ipv4"]
+ r2_lo_v6 = topo["routers"]["r2"]["links"]["lo"]["ipv6"]
+ r1_lo_v4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+ r1_lo_v6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+ r1_r2_intf = topo["routers"]["r1"]["links"]["r2-link0"]["interface"]
+ r2_r1_intf = topo["routers"]["r2"]["links"]["r1-link0"]["interface"]
+
+ r1_r2_v6_nh = topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0]
+ r2_r1_v6_nh = topo["routers"]["r2"]["links"]["r1-link0"]["ipv6"].split("/")[0]
+
+ ipv4_list = [("r1", r1_r2_intf, [r2_lo_v4]), ("r2", r2_r1_intf, [r1_lo_v4])]
+
+ ipv6_list = [
+ ("r1", r1_r2_intf, [r2_lo_v6], r2_r1_v6_nh),
+ ("r2", r2_r1_intf, [r1_lo_v6], r1_r2_v6_nh),
+ ]
+
+ for dut, intf, loop_addr in ipv4_list:
+ result = addKernelRoute(tgen, dut, intf, loop_addr)
+ # assert result is True, "Testcase {}:Failed \n Error: {}". \
+ # format(tc_name, result)
+
+ for dut, intf, loop_addr, next_hop in ipv6_list:
+ result = addKernelRoute(tgen, dut, intf, loop_addr, next_hop)
+ # assert result is True, "Testcase {}:Failed \n Error: {}". \
+ # format(tc_name, result)
+
+ r2_lo_v4 = topo["routers"]["r2"]["links"]["lo"]["ipv4"]
+ r2_lo_v6 = topo["routers"]["r2"]["links"]["lo"]["ipv6"]
+ r1_lo_v4 = topo["routers"]["r1"]["links"]["lo"]["ipv4"]
+ r1_lo_v6 = topo["routers"]["r1"]["links"]["lo"]["ipv6"]
+ r1_r2_intf = topo["routers"]["r1"]["links"]["r2-link0"]["interface"]
+ r2_r1_intf = topo["routers"]["r2"]["links"]["r1-link0"]["interface"]
+
+ r1_r2_v6_nh = topo["routers"]["r1"]["links"]["r2-link0"]["ipv6"].split("/")[0]
+ r2_r1_v6_nh = topo["routers"]["r2"]["links"]["r1-link0"]["ipv6"].split("/")[0]
+
+ r1_r2_v4_nh = topo["routers"]["r1"]["links"]["r2-link0"]["ipv4"].split("/")[0]
+ r2_r1_v4_nh = topo["routers"]["r2"]["links"]["r1-link0"]["ipv4"].split("/")[0]
+
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {"network": r2_lo_v4, "next_hop": r2_r1_v4_nh},
+ {"network": r2_lo_v6, "next_hop": r2_r1_v6_nh},
+ ]
+ },
+ "r2": {
+ "static_routes": [
+ {"network": r1_lo_v4, "next_hop": r1_r2_v4_nh},
+ {"network": r1_lo_v6, "next_hop": r1_r2_v6_nh},
+ ]
+ },
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ # Api call verify whether BGP is converged
+ result = verify_bgp_convergence(tgen, topo1)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Enable cap ext nh on r1 and r2 and activate in ipv4 addr family")
+ configure_bgp_on_r1 = {
+ "r1": {
+ "default_ipv4_unicast": False,
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "lo": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r2 = {
+ "r2": {
+ "default_ipv4_unicast": False,
+ "bgp": {
+ "address_family": {
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "lo": {
+ "activate": "ipv4",
+ "capability": "extended-nexthop",
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify bgp convergence.")
+ bgp_convergence = verify_bgp_convergence(tgen, topo1)
+ assert bgp_convergence is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, bgp_convergence
+ )
+
+ step("Configure 2 IPv4 static" " routes on R1, Nexthop as different links of R0")
+
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 "
+ "unicast family respectively from R1 using red static cmd "
+ "Advertise loopback from IPv4 unicast family using network command "
+ "from R1"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "IPv4 routes advertised using static and network command are "
+ " received on R2 BGP and routing table , "
+ "verify using show ip bgp, show ip route for IPv4 routes ."
+ )
+
+ gllip = (topo1["routers"]["r1"]["links"]["lo"]["ipv6"].split("/")[0]).lower()
+ assert gllip is not None, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": gllip,
+ }
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip
+ )
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ verify_nh_for_nw_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK_CMD_IP, "no_of_ip": 1, "next_hop": gllip}
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=gllip)
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Remove IPv4 routes advertised using network command"
+ " from R1 and advertise again"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_network": 1,
+ "delete": True,
+ }
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_network": 1,
+ }
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "After removing IPv4 routes from network command , routes which are "
+ "advertised using redistribute static are still present in the on "
+ "R2 , verify using show ip bgp and show ip route"
+ )
+
+ verify_nh_for_nw_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK_CMD_IP, "no_of_ip": 1, "next_hop": gllip}
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=gllip)
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Remove IPv4 routes advertised using redistribute static"
+ " command from R1 and advertise again"
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static", "delete": True}]
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}}
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo1, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ "After removing IPv4 routes from redistribute static , routes which"
+ " are advertised using network are still present in the on R2 , "
+ "verify using show ip bgp and show ip route"
+ )
+
+ verify_nh_for_nw_rtes = {
+ "r1": {
+ "static_routes": [
+ {"network": NETWORK_CMD_IP, "no_of_ip": 1, "next_hop": gllip}
+ ]
+ }
+ }
+ bgp_rib = verify_bgp_rib(tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=gllip)
+ assert bgp_rib is True, "Testcase {} : Failed \n Error: {}".format(tc_name, bgp_rib)
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_rtes, next_hop=gllip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""RFC5549 Automation."""
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ step,
+ reset_config_on_routers,
+ get_frr_ipv6_linklocal,
+)
+from lib.topolog import logger
+from lib.bgp import create_router_bgp, verify_bgp_convergence
+from lib.topojson import build_config_from_json
+
+# Global variables
+topo = None
+
+
+# Global variables
+NETWORK_CMD_IP = "1.0.1.17/32"
+NETWORK = {
+ "ipv4": [
+ "11.0.20.1/32",
+ "11.0.20.2/32",
+ "11.0.20.3/32",
+ "11.0.20.4/32",
+ "11.0.20.5/32",
+ ],
+ "ipv6": ["1::1/128", "1::2/128", "1::3/128", "1::4/128", "1::5/128"],
+}
+MASK = {"ipv4": "32", "ipv6": "128"}
+NEXT_HOP = {
+ "ipv4": ["10.0.0.1", "10.0.1.1", "10.0.2.1", "10.0.3.1", "10.0.4.1"],
+ "ipv6": ["Null0", "Null0", "Null0", "Null0", "Null0"],
+}
+ADDR_TYPES = check_address_types()
+NO_OF_RTES = 2
+TOPOOLOGY = """
+ Please view in a fixed-width font such as Courier.
+ +----+
+ | R4 |
+ | |
+ +--+-+
+ | ipv4 nbr
+ no bgp ebgp/ibgp |
+ | ebgp/ibgp
+ +----+ 2links +----+ 8links +--+-+ +----+
+ |R0 +----------+ R1 + + R2 | ipv6 nbr |R3 |
+ | +----------+ +------------+ +-------------+ |
+ +----+ +----+ ipv6 nbr +----+ +----+
+"""
+
+TESTCASES = """
+1. Verify IPv4 routes are deleted after un-configuring "network command
+" and "redistribute static knob" with Unnumbered IPv6 IBGP session
+ """
+
+
+def setup_module(mod):
+ """Set up the pytest environment."""
+
+ global topo
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/rfc5549_ibgp_unnumbered_nbr.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment."""
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology()
+
+
+def get_llip(onrouter, intf):
+ """
+ API to get the link local ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `fromnode`: Source node
+ * `tonode` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_llip('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) link local ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ tgen = get_topogen()
+ intf = topo["routers"][onrouter]["links"][intf]["interface"]
+ llip = get_frr_ipv6_linklocal(tgen, onrouter, intf)
+
+ if llip:
+ logger.info("llip ipv6 address to be set as NH is %s", llip)
+ return llip
+ return None
+
+
+def get_glipv6(onrouter, intf):
+ """
+ API to get the global ipv6 address of a perticular interface
+
+ Parameters
+ ----------
+ * `onrouter`: Source node
+ * `intf` : interface for which link local ip needs to be returned.
+
+ Usage
+ -----
+ result = get_glipv6('r1', 'r2-link0')
+
+ Returns
+ -------
+ 1) global ipv6 address from the interface.
+ 2) errormsg - when link local ip not found.
+ """
+ glipv6 = (topo["routers"][onrouter]["links"][intf]["ipv6"]).split("/")[0]
+ if glipv6:
+ logger.info("Global ipv6 address to be set as NH is %s", glipv6)
+ return glipv6
+ return None
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ext_nh_cap_red_static_network_ebgp_peer_unnumbered_nbr_p1(request):
+ """
+
+ Test extended capability nexthop.
+
+ Verify IPv4 routes advertise using "redistribute static" and
+ "network command" are received on EBGP peer with IPv6 nexthop
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+ reset_config_on_routers(tgen)
+ step(
+ "Configure IPv6 IBGP Unnumbered session between R1 and R2 and enable "
+ "ipv6 nd ra-interval 10 in the interface"
+ )
+
+ step(
+ "Enable capability extended-nexthop"
+ "on the neighbor from both the routers and "
+ "ipv6 nd ra-interval 10 on link connected between R1 and R2"
+ )
+
+ bgp_convergence = verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence is True, "Testcase :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
+
+ for rte in range(0, NO_OF_RTES):
+ # Create Static routes
+ input_dict = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][rte],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP["ipv4"][rte],
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+ step(
+ "Advertise static routes from IPv4 unicast family and IPv6 unicast "
+ "family respectively from R1 "
+ "Configure loopback on R1 with IPv4 address Advertise loopback "
+ "from IPv4 unicast family using network cmd from R1 "
+ )
+
+ configure_bgp_on_r1 = {
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "redistribute": [{"redist_type": "static"}],
+ "advertise_networks": [
+ {"network": NETWORK_CMD_IP, "no_of_network": 1}
+ ],
+ }
+ }
+ }
+ }
+ }
+ }
+ result = create_router_bgp(tgen, topo, configure_bgp_on_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ llip = get_llip("r1", "r2-link0")
+ assert llip is not None, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step(
+ " IPv4 and IPv6 routes advertised using static and network command are"
+ " received on R2 BGP and routing table , verify using show ip bgp"
+ " show ip route for IPv4 routes and show bgp show ipv6 routes"
+ " for IPv6 routes ."
+ )
+
+ dut = "r2"
+ protocol = "bgp"
+ verify_nh_for_static_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": NO_OF_RTES,
+ "next_hop": llip,
+ }
+ ]
+ }
+ }
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_static_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ verify_nh_for_nw_cmd_rtes = {
+ "r1": {
+ "static_routes": [
+ {
+ "network": NETWORK_CMD_IP,
+ "no_of_ip": 1,
+ "next_hop": llip,
+ }
+ ]
+ }
+ }
+
+ result = verify_rib(
+ tgen, "ipv4", dut, verify_nh_for_nw_cmd_rtes, next_hop=llip, protocol=protocol
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BGPIPV6RTADVTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
- # Create 2 routers.
- tgen.add_router("r1")
- tgen.add_router("r2")
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BGPIPV6RTADVTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
"""
import os
-import re
-import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import get_topogen
from lib.topolog import logger
from lib.ltemplate import ltemplateRtrCmd
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-import shutil
CWD = os.path.dirname(os.path.realpath(__file__))
# test name based on directory
TEST = os.path.basename(CWD)
-class ThisTestTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- # Create P/PE routers
- tgen.add_router("r1")
- # check for mpls
- if tgen.hasmpls != True:
- logger.info("MPLS not available, tests will be skipped")
- return
- for routern in range(2, 5):
- tgen.add_router("r{}".format(routern))
- # Create CE routers
- for routern in range(1, 4):
- tgen.add_router("ce{}".format(routern))
-
- # CE/PE links
- tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4")
- tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4")
- tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4")
-
- # Create a switch with just one router connected to it to simulate a
- # empty network.
- switch = {}
- switch[0] = tgen.add_switch("sw0")
- switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
- switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
-
- switch[1] = tgen.add_switch("sw1")
- switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
- switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
- switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
-
- switch[1] = tgen.add_switch("sw2")
- switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2")
- switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1")
+def build_topo(tgen):
+ "Build function"
+
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ # Create P/PE routers
+ tgen.add_router("r1")
+ # check for mpls
+ if tgen.hasmpls != True:
+ logger.info("MPLS not available, tests will be skipped")
+ return
+ for routern in range(2, 5):
+ tgen.add_router("r{}".format(routern))
+ # Create CE routers
+ for routern in range(1, 4):
+ tgen.add_router("ce{}".format(routern))
+
+ # CE/PE links
+ tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4")
+ tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4")
+ tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4")
+
+ # Create a switch with just one router connected to it to simulate a
+ # empty network.
+ switch = {}
+ switch[0] = tgen.add_switch("sw0")
+ switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
+ switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
+
+ switch[1] = tgen.add_switch("sw1")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
+ switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
+
+ switch[1] = tgen.add_switch("sw2")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1")
def ltemplatePreRouterStartHook():
if tgen.hasmpls != True:
logger.info("MPLS not available, skipping setup")
return False
- # check for normal init
- if len(tgen.net) == 1:
- logger.info("Topology not configured, skipping setup")
- return False
# configure r2 mpls interfaces
intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"]
for intf in intfs:
"""
import os
-import re
-import pytest
import platform
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import get_topogen
from lib.topolog import logger
from lib.ltemplate import ltemplateRtrCmd
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-import shutil
CWD = os.path.dirname(os.path.realpath(__file__))
# test name based on directory
TEST = os.path.basename(CWD)
-class ThisTestTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- # Create P/PE routers
- # check for mpls
- tgen.add_router("r1")
- if tgen.hasmpls != True:
- logger.info("MPLS not available, tests will be skipped")
- return
- mach = platform.machine()
- krel = platform.release()
- if mach[:1] == "a" and topotest.version_cmp(krel, "4.11") < 0:
- logger.info("Need Kernel version 4.11 to run on arm processor")
- return
- for routern in range(2, 5):
- tgen.add_router("r{}".format(routern))
- # Create CE routers
- for routern in range(1, 5):
- tgen.add_router("ce{}".format(routern))
-
- # CE/PE links
- tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4")
- tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4")
- tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4")
- tgen.add_link(tgen.gears["ce4"], tgen.gears["r4"], "ce4-eth0", "r4-eth5")
-
- # Create a switch with just one router connected to it to simulate a
- # empty network.
- switch = {}
- switch[0] = tgen.add_switch("sw0")
- switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
- switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
-
- switch[1] = tgen.add_switch("sw1")
- switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
- switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
- switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
-
- switch[1] = tgen.add_switch("sw2")
- switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2")
- switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1")
+def build_topo(tgen):
+ "Build function"
+
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ # Create P/PE routers
+ # check for mpls
+ tgen.add_router("r1")
+ if tgen.hasmpls != True:
+ logger.info("MPLS not available, tests will be skipped")
+ return
+ mach = platform.machine()
+ krel = platform.release()
+ if mach[:1] == "a" and topotest.version_cmp(krel, "4.11") < 0:
+ logger.info("Need Kernel version 4.11 to run on arm processor")
+ return
+ for routern in range(2, 5):
+ tgen.add_router("r{}".format(routern))
+ # Create CE routers
+ for routern in range(1, 5):
+ tgen.add_router("ce{}".format(routern))
+
+ # CE/PE links
+ tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4")
+ tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4")
+ tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4")
+ tgen.add_link(tgen.gears["ce4"], tgen.gears["r4"], "ce4-eth0", "r4-eth5")
+
+ # Create a switch with just one router connected to it to simulate a
+ # empty network.
+ switch = {}
+ switch[0] = tgen.add_switch("sw0")
+ switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
+ switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
+
+ switch[1] = tgen.add_switch("sw1")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
+ switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
+
+ switch[1] = tgen.add_switch("sw2")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1")
def ltemplatePreRouterStartHook():
if tgen.hasmpls != True:
logger.info("MPLS not available, skipping setup")
return False
- # check for normal init
- if len(tgen.net) == 1:
- logger.info("Topology not configured, skipping setup")
- return False
# trace errors/unexpected output
cc.resetCounts()
# configure r2 mpls interfaces
-from lib.lutil import luCommand
+from lib.lutil import luCommand, luLast
rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"]
for rtr in rtrs:
-from lib.lutil import luCommand
+from lib.lutil import luCommand, luLast
ret = luCommand(
"ce1",
-from lib.lutil import luCommand
+from lib.lutil import luCommand, luLast
num = 50000
b = int(num / (256 * 256))
ltemplateTest("scripts/check_linux_mpls.py", False, CliOnFail, CheckFunc)
-def test_notification_check():
- CliOnFail = None
- # For debugging, uncomment the next line
- # CliOnFail = 'tgen.mininet_cli'
- CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
- # uncomment next line to start cli *before* script is run
- # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc)
-
-
def test_check_scale_up():
CliOnFail = None
# For debugging, uncomment the next line
ltemplateTest("scripts/scale_up.py", False, CliOnFail, CheckFunc)
-def test_notification_check():
- CliOnFail = None
- # For debugging, uncomment the next line
- # CliOnFail = 'tgen.mininet_cli'
- CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
- # uncomment next line to start cli *before* script is run
- # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc)
-
-
def test_check_scale_down():
CliOnFail = None
# For debugging, uncomment the next line
ltemplateTest("scripts/scale_down.py", False, CliOnFail, CheckFunc)
-def test_notification_check():
- CliOnFail = None
- # For debugging, uncomment the next line
- # CliOnFail = 'tgen.mininet_cli'
- CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
- # uncomment next line to start cli *before* script is run
- # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc)
-
-
def SKIP_test_cleanup_all():
CliOnFail = None
# For debugging, uncomment the next line
"lo_prefix": {
"ipv4": "1.0.",
"v4mask": 32,
- "ipv6": "2001:DB8:F::",
+ "ipv6": "2001:db8:f::",
"v6mask": 128
},
"routers": {
import time
from os import path as os_path
import sys
-from json import load as json_load
# Required to instantiate the topology builder class.
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd]
sys.path.append(os_path.join(CWD, "../"))
sys.path.append(os_path.join(CWD, "../lib/"))
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_large_community_topo_1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json_load(topoJson)
-except IOError:
- logger.info("Could not read file:", jsonFile)
# Global variables
bgp_convergence = False
}
-class CreateTopo(Topo):
- """
- Test topology builder
-
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_large_community_topo_1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
import os
import sys
-import json
import pytest
import time
# Import topogen and topotest helpers
# Import topoJson from lib, to create topology and initial configuration
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
reset_config_on_routers,
create_route_maps,
create_bgp_community_lists,
- create_prefix_lists,
verify_bgp_community,
step,
verify_create_community_list,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_large_community_topo_2.json".format(CWD)
+pytestmark = [pytest.mark.bgpd]
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
bgp_convergence = False
NETWORKS = {"ipv4": ["200.50.2.0/32"], "ipv6": ["1::1/128"]}
-class GenerateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(GenerateTopo, mod.__name__)
+ json_file = "{}/bgp_large_community_topo_2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
neighbor 11.1.1.2 timers 3 10
neighbor 11.1.1.6 remote-as external
neighbor 11.1.1.6 timers 3 10
+ neighbor 11.1.1.6 disable-link-bw-encoding-ieee
!
--- /dev/null
+{
+ "prefix":"198.10.1.1/32",
+ "paths":[
+ {
+ "aspath":{
+ "string":"65303 65354",
+ "segments":[
+ {
+ "type":"as-sequence",
+ "list":[
+ 65303,
+ 65354
+ ]
+ }
+ ],
+ "length":2
+ },
+ "valid":true,
+ "extendedCommunity":{
+ "string":"LB:65303:125000 (1.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.3.2"
+ }
+ ]
+ }
+ ]
+}
neighbor 11.1.1.5 timers 3 10
neighbor 11.1.3.2 remote-as external
neighbor 11.1.3.2 timers 3 10
+ neighbor 11.1.3.2 disable-link-bw-encoding-ieee
!
no bgp ebgp-requires-policy
neighbor 11.1.3.1 remote-as external
neighbor 11.1.3.1 timers 3 10
+ neighbor 11.1.3.1 disable-link-bw-encoding-ieee
neighbor 11.1.6.2 remote-as external
neighbor 11.1.6.2 timers 3 10
!
"""
import os
-import re
import sys
from functools import partial
import pytest
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
"""
-class BgpLinkBwTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Create 10 routers - 1 super-spine, 2 spines, 3 leafs
- # and 4 servers
- routers = {}
- for i in range(1, 11):
- routers[i] = tgen.add_router("r{}".format(i))
-
- # Create 13 "switches" - to interconnect the above routers
- switches = {}
- for i in range(1, 14):
- switches[i] = tgen.add_switch("s{}".format(i))
-
- # Interconnect R1 (super-spine) to R2 and R3 (the two spines)
- switches[1].add_link(tgen.gears["r1"])
- switches[1].add_link(tgen.gears["r2"])
- switches[2].add_link(tgen.gears["r1"])
- switches[2].add_link(tgen.gears["r3"])
-
- # Interconnect R2 (spine in pod-1) to R4 and R5 (the associated
- # leaf switches)
- switches[3].add_link(tgen.gears["r2"])
- switches[3].add_link(tgen.gears["r4"])
- switches[4].add_link(tgen.gears["r2"])
- switches[4].add_link(tgen.gears["r5"])
-
- # Interconnect R3 (spine in pod-2) to R6 (associated leaf)
- switches[5].add_link(tgen.gears["r3"])
- switches[5].add_link(tgen.gears["r6"])
-
- # Interconnect leaf switches to servers
- switches[6].add_link(tgen.gears["r4"])
- switches[6].add_link(tgen.gears["r7"])
- switches[7].add_link(tgen.gears["r4"])
- switches[7].add_link(tgen.gears["r8"])
- switches[8].add_link(tgen.gears["r5"])
- switches[8].add_link(tgen.gears["r9"])
- switches[9].add_link(tgen.gears["r6"])
- switches[9].add_link(tgen.gears["r10"])
-
- # Create empty networks for the servers
- switches[10].add_link(tgen.gears["r7"])
- switches[11].add_link(tgen.gears["r8"])
- switches[12].add_link(tgen.gears["r9"])
- switches[13].add_link(tgen.gears["r10"])
+def build_topo(tgen):
+ "Build function"
+
+ # Create 10 routers - 1 super-spine, 2 spines, 3 leafs
+ # and 4 servers
+ routers = {}
+ for i in range(1, 11):
+ routers[i] = tgen.add_router("r{}".format(i))
+
+ # Create 13 "switches" - to interconnect the above routers
+ switches = {}
+ for i in range(1, 14):
+ switches[i] = tgen.add_switch("s{}".format(i))
+
+ # Interconnect R1 (super-spine) to R2 and R3 (the two spines)
+ switches[1].add_link(tgen.gears["r1"])
+ switches[1].add_link(tgen.gears["r2"])
+ switches[2].add_link(tgen.gears["r1"])
+ switches[2].add_link(tgen.gears["r3"])
+
+ # Interconnect R2 (spine in pod-1) to R4 and R5 (the associated
+ # leaf switches)
+ switches[3].add_link(tgen.gears["r2"])
+ switches[3].add_link(tgen.gears["r4"])
+ switches[4].add_link(tgen.gears["r2"])
+ switches[4].add_link(tgen.gears["r5"])
+
+ # Interconnect R3 (spine in pod-2) to R6 (associated leaf)
+ switches[5].add_link(tgen.gears["r3"])
+ switches[5].add_link(tgen.gears["r6"])
+
+ # Interconnect leaf switches to servers
+ switches[6].add_link(tgen.gears["r4"])
+ switches[6].add_link(tgen.gears["r7"])
+ switches[7].add_link(tgen.gears["r4"])
+ switches[7].add_link(tgen.gears["r8"])
+ switches[8].add_link(tgen.gears["r5"])
+ switches[8].add_link(tgen.gears["r9"])
+ switches[9].add_link(tgen.gears["r6"])
+ switches[9].add_link(tgen.gears["r10"])
+
+ # Create empty networks for the servers
+ switches[10].add_link(tgen.gears["r7"])
+ switches[11].add_link(tgen.gears["r8"])
+ switches[12].add_link(tgen.gears["r9"])
+ switches[13].add_link(tgen.gears["r10"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BgpLinkBwTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
r1 = tgen.gears["r1"]
r2 = tgen.gears["r2"]
+ r3 = tgen.gears["r3"]
# Configure anycast IP on additional server r9
logger.info("Configure anycast IP on server r9")
tgen.net["r10"].cmd("ip addr add 198.10.1.1/32 dev r10-eth1")
+ # Check if bandwidth is properly encoded with non IEEE floatig-point (uint32) format on r3
+ logger.info(
+ "Check if bandwidth is properly encoded with non IEEE floatig-point (uint32) format on r3"
+ )
+ json_file = "{}/r3/bgp-route-1.json".format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp, r3, "show bgp ipv4 uni 198.10.1.1/32 json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = "JSON output mismatch on r3"
+ assert result is None, assertmsg
+
# Check multipath on super-spine router r1
logger.info("Check multipath on super-spine router r1")
json_file = "{}/r1/bgp-route-2.json".format(CWD)
import os
import sys
-import json
import pytest
sys.path.append(os.path.join(CWD, "../"))
from lib.topogen import Topogen, get_topogen
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.topojson import linux_intf_config_from_json
from lib.common_config import start_topology
from lib.topotest import router_json_cmp, run_and_expect
-from mininet.topo import Topo
from functools import partial
pytestmark = [pytest.mark.bgpd]
}
-# Reads data from JSON File for topology and configuration creation.
-jsonFile = "{}/bgp_listen_on_multiple_addresses.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
-
-class TemplateTopo(Topo):
- "Topology builder."
-
- def build(self, *_args, **_opts):
- "Defines the allocation and relationship between routers and switches."
- tgen = get_topogen(self)
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"Sets up the test environment."
- tgen = Topogen(TemplateTopo, mod.__name__)
+ json_file = "{}/bgp_listen_on_multiple_addresses.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# Adds extra parameters to bgpd so they listen for connections on specific
# multiple addresses.
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
-from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import json
from functools import partial
-from time import sleep
import pytest
# Save the Current Working Directory to find configuration files.
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
# +-----+ +-----+ +-----+
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ #
+ # Create routers
+ tgen.add_router("R1")
+ tgen.add_router("R2")
+ tgen.add_router("R3")
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- #
- # Create routers
- tgen.add_router("R1")
- tgen.add_router("R2")
- tgen.add_router("R3")
+ # R1-R2
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["R1"])
+ switch.add_link(tgen.gears["R2"])
- # R1-R2
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["R1"])
- switch.add_link(tgen.gears["R2"])
-
- # R2-R3
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["R2"])
- switch.add_link(tgen.gears["R3"])
+ # R2-R3
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["R2"])
+ switch.add_link(tgen.gears["R3"])
def setup_module(mod):
"Sets up the pytest environment"
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
import os
import sys
import json
-import time
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
-from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
--- /dev/null
+router bgp 65000
+ bgp minimum-holdtime 20
+ neighbor 192.168.255.2 remote-as 65001
+ neighbor 192.168.255.2 timers 3 10
+ neighbor 192.168.255.2 timers connect 10
+!
--- /dev/null
+!
+interface r1-eth0
+ ip address 192.168.255.1/24
+!
+ip forwarding
+!
--- /dev/null
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.255.1 remote-as 65000
+ neighbor 192.168.255.1 timers 3 10
+!
--- /dev/null
+!
+interface r2-eth0
+ ip address 192.168.255.2/24
+!
+ip forwarding
+!
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2021 by
+# Takemasa Imada <takemasa.imada@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if minimum-holdtime works.
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_minimum_holdtime():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ def _bgp_neighbor_check_if_notification_sent():
+ output = json.loads(
+ tgen.gears["r1"].vtysh_cmd("show ip bgp neighbor 192.168.255.2 json")
+ )
+ expected = {
+ "192.168.255.2": {
+ "connectionsEstablished": 0,
+ "lastNotificationReason": "OPEN Message Error/Unacceptable Hold Time",
+ "lastResetDueTo": "BGP Notification send",
+ }
+ }
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_neighbor_check_if_notification_sent)
+ success, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
+ assert result is None, "Failed to send notification message\n"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
import os
import sys
-import json
import time
import pytest
-from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.topotest import iproute2_is_vrf_capable
from lib.common_config import (
step,
from lib.topolog import logger
from lib.bgp import (
- clear_bgp,
verify_bgp_rib,
create_router_bgp,
verify_bgp_community,
verify_bgp_convergence,
verify_best_path_as_per_bgp_attribute,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
+
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_multi_vrf_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
}
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_multi_vrf_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
result = verify_rib(tgen, addr_type, dut, denied_routes, expected=False)
assert result is not True, "Testcase {} : Failed \n"
- "{}:Expected behaviour: Routes are denied by prefix-list \nError {}".format(tc_name, result)
+ "{}:Expected behaviour: Routes are denied by prefix-list \nError {}".format(
+ tc_name, result
+ )
step(
"On router R1, configure prefix-lists to permit 2 "
)
result = verify_rib(tgen, addr_type, dut, denied_routes, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected behaviour: Routes are denied by prefix-list \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected behaviour: Routes are denied by prefix-list \nError {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
import os
import sys
-import json
import time
import pytest
from copy import deepcopy
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.topotest import iproute2_is_vrf_capable
from lib.common_config import (
step,
from lib.topolog import logger
from lib.bgp import clear_bgp, verify_bgp_rib, create_router_bgp, verify_bgp_convergence
-from lib.topojson import build_config_from_json, build_topo_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_multi_vrf_topo2.json".format(CWD)
-
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
NETWORK1_2 = {"ipv4": "1.1.1.2/32", "ipv6": "1::2/128"}
PREFERRED_NEXT_HOP = "link_local"
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_multi_vrf_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
sleep(HOLDDOWNTIMER + 1)
result = verify_bgp_convergence(tgen, topo, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected Behaviour: BGP will not be converged \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected Behaviour: BGP will not be converged \nError {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
dut = "r2"
}
result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
- assert result is not True, "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected Behaviour: Routes are flushed out \nError {}".format(
+ tc_name, result
+ )
step("Bring up connecting interface between R1<<>>R2 on R1.")
for intf in interfaces:
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} : Failed \n Expected Behaviour: Routes are cleaned \n Error {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n Expected Behaviour: Routes are cleaned \n Error {}".format(
+ tc_name, result
+ )
step("Add/reconfigure the same VRF instance again")
result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
+ )
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
- ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
dut = "blue2"
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
- assert result is not True, (
- "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_2, expected=False)
- assert result is not True, (
- "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Expected Behaviour: Routes are not present \n Error {}".format(
+ tc_name, result
)
step("Create 2 new VRFs PINK_A and GREY_A IN R3")
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
+stdout.write("announce route 10.0.1.0/24 med %i next-hop 172.16.1.%i\n" % (peer, peer))
stdout.flush()
# Loop endlessly to allow ExaBGP to continue running
!
router bgp 100 view 1
bgp router-id 172.30.1.1
+ bgp always-compare-med
no bgp ebgp-requires-policy
network 172.20.0.0/28 route-map local1
timers bgp 60 180
!
router bgp 100 view 2
bgp router-id 172.30.1.1
+ bgp always-compare-med
no bgp ebgp-requires-policy
network 172.20.0.0/28 route-map local2
timers bgp 60 180
!
router bgp 100 view 3
bgp router-id 172.30.1.1
+ bgp always-compare-med
no bgp ebgp-requires-policy
network 172.20.0.0/28
timers bgp 60 180
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1, vrf id -
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
-Origin codes: i - IGP, e - EGP, ? - incomplete
-RPKI validation codes: V valid, I invalid, N Not found
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.5 0 65005 i
-* 172.16.1.2 0 65002 i
-*> 172.16.1.1 0 65001 i
-*> 10.101.0.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.1.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.2.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.3.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.4.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.5.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.6.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.7.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.8.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.9.0/24 172.16.1.1 100 0 65001 i
-*> 10.102.0.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.1.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.2.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.3.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.4.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.5.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.6.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.7.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.8.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.9.0/24 172.16.1.2 100 0 65002 i
-*> 10.105.0.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.1.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.2.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.3.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.4.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.5.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.6.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.7.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.8.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.9.0/24 172.16.1.5 100 0 65005 i
-*> 172.20.0.0/28 0.0.0.0 0 32768 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1, vrf id -
-Default local pref 100, local AS 100
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
-Origin codes: i - IGP, e - EGP, ? - incomplete
-RPKI validation codes: V valid, I invalid, N Not found
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.5 0 65005 i
-* 172.16.1.2 0 65002 i
-*> 172.16.1.1 0 65001 i
-*> 10.101.0.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.1.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.2.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.3.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.4.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.5.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.6.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.7.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.8.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.9.0/24 172.16.1.1 100 0 65001 i
-*> 10.102.0.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.1.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.2.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.3.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.4.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.5.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.6.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.7.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.8.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.9.0/24 172.16.1.2 100 0 65002 i
-*> 10.105.0.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.1.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.2.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.3.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.4.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.5.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.6.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.7.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.8.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.9.0/24 172.16.1.5 100 0 65005 i
-*> 172.20.0.0/28 0.0.0.0 0 32768 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Origin codes: i - IGP, e - EGP, ? - incomplete
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.5 0 65005 i
-* 172.16.1.2 0 65002 i
-*> 172.16.1.1 0 65001 i
-*> 10.101.0.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.1.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.2.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.3.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.4.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.5.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.6.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.7.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.8.0/24 172.16.1.1 100 0 65001 i
-*> 10.101.9.0/24 172.16.1.1 100 0 65001 i
-*> 10.102.0.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.1.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.2.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.3.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.4.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.5.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.6.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.7.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.8.0/24 172.16.1.2 100 0 65002 i
-*> 10.102.9.0/24 172.16.1.2 100 0 65002 i
-*> 10.105.0.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.1.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.2.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.3.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.4.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.5.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.6.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.7.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.8.0/24 172.16.1.5 100 0 65005 i
-*> 10.105.9.0/24 172.16.1.5 100 0 65005 i
-*> 172.20.0.0/28 0.0.0.0 0 32768 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1, vrf id -
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
-Origin codes: i - IGP, e - EGP, ? - incomplete
-RPKI validation codes: V valid, I invalid, N Not found
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.4 0 65004 i
-*> 172.16.1.3 0 65003 i
-*> 10.103.0.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.1.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.2.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.3.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.4.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.5.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.6.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.7.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.8.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.9.0/24 172.16.1.3 100 0 65003 i
-*> 10.104.0.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.1.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.2.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.3.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.4.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.5.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.6.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.7.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.8.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.9.0/24 172.16.1.4 100 0 65004 i
-*> 172.20.0.0/28 0.0.0.0 9999 32768 100 100 100 100 100 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1, vrf id -
-Default local pref 100, local AS 100
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
-Origin codes: i - IGP, e - EGP, ? - incomplete
-RPKI validation codes: V valid, I invalid, N Not found
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.4 0 65004 i
-*> 172.16.1.3 0 65003 i
-*> 10.103.0.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.1.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.2.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.3.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.4.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.5.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.6.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.7.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.8.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.9.0/24 172.16.1.3 100 0 65003 i
-*> 10.104.0.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.1.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.2.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.3.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.4.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.5.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.6.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.7.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.8.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.9.0/24 172.16.1.4 100 0 65004 i
-*> 172.20.0.0/28 0.0.0.0 9999 32768 100 100 100 100 100 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Origin codes: i - IGP, e - EGP, ? - incomplete
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.4 0 65004 i
-*> 172.16.1.3 0 65003 i
-*> 10.103.0.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.1.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.2.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.3.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.4.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.5.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.6.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.7.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.8.0/24 172.16.1.3 100 0 65003 i
-*> 10.103.9.0/24 172.16.1.3 100 0 65003 i
-*> 10.104.0.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.1.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.2.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.3.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.4.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.5.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.6.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.7.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.8.0/24 172.16.1.4 100 0 65004 i
-*> 10.104.9.0/24 172.16.1.4 100 0 65004 i
-*> 172.20.0.0/28 0.0.0.0 9999 32768 100 100 100 100 100 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1, vrf id -
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
-Origin codes: i - IGP, e - EGP, ? - incomplete
-RPKI validation codes: V valid, I invalid, N Not found
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.8 0 65008 i
-* 172.16.1.7 0 65007 i
-*> 172.16.1.6 0 65006 i
-*> 10.106.0.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.1.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.2.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.3.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.4.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.5.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.6.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.7.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.8.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.9.0/24 172.16.1.6 100 0 65006 i
-*> 10.107.0.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.1.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.2.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.3.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.4.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.5.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.6.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.7.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.8.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.9.0/24 172.16.1.7 100 0 65007 i
-*> 10.108.0.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.1.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.2.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.3.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.4.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.5.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.6.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.7.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.8.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.9.0/24 172.16.1.8 100 0 65008 i
-*> 172.20.0.0/28 0.0.0.0 0 32768 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1, vrf id -
-Default local pref 100, local AS 100
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
-Origin codes: i - IGP, e - EGP, ? - incomplete
-RPKI validation codes: V valid, I invalid, N Not found
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.8 0 65008 i
-* 172.16.1.7 0 65007 i
-*> 172.16.1.6 0 65006 i
-*> 10.106.0.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.1.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.2.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.3.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.4.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.5.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.6.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.7.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.8.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.9.0/24 172.16.1.6 100 0 65006 i
-*> 10.107.0.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.1.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.2.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.3.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.4.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.5.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.6.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.7.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.8.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.9.0/24 172.16.1.7 100 0 65007 i
-*> 10.108.0.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.1.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.2.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.3.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.4.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.5.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.6.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.7.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.8.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.9.0/24 172.16.1.8 100 0 65008 i
-*> 172.20.0.0/28 0.0.0.0 0 32768 i
+++ /dev/null
-BGP table version is XXX, local router ID is 172.30.1.1
-Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
- i internal, r RIB-failure, S Stale, R Removed
-Origin codes: i - IGP, e - EGP, ? - incomplete
-
- Network Next Hop Metric LocPrf Weight Path
-* 10.0.1.0/24 172.16.1.8 0 65008 i
-* 172.16.1.7 0 65007 i
-*> 172.16.1.6 0 65006 i
-*> 10.106.0.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.1.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.2.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.3.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.4.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.5.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.6.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.7.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.8.0/24 172.16.1.6 100 0 65006 i
-*> 10.106.9.0/24 172.16.1.6 100 0 65006 i
-*> 10.107.0.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.1.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.2.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.3.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.4.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.5.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.6.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.7.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.8.0/24 172.16.1.7 100 0 65007 i
-*> 10.107.9.0/24 172.16.1.7 100 0 65007 i
-*> 10.108.0.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.1.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.2.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.3.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.4.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.5.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.6.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.7.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.8.0/24 172.16.1.8 100 0 65008 i
-*> 10.108.9.0/24 172.16.1.8 100 0 65008 i
-*> 172.20.0.0/28 0.0.0.0 0 32768 i
--- /dev/null
+{
+ "vrfName": "1",
+ "routerId": "172.30.1.1",
+ "defaultLocPrf": 100,
+ "localAS": 100,
+ "routes": {
+ "10.0.1.0/24": [
+ {
+ "valid": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 5,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ },
+ {
+ "valid": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 2,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ },
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 1,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.0.0",
+ "prefixLen": 24,
+ "network": "10.101.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.1.0",
+ "prefixLen": 24,
+ "network": "10.101.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.2.0",
+ "prefixLen": 24,
+ "network": "10.101.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.3.0",
+ "prefixLen": 24,
+ "network": "10.101.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.4.0",
+ "prefixLen": 24,
+ "network": "10.101.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.5.0",
+ "prefixLen": 24,
+ "network": "10.101.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.6.0",
+ "prefixLen": 24,
+ "network": "10.101.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.7.0",
+ "prefixLen": 24,
+ "network": "10.101.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.8.0",
+ "prefixLen": 24,
+ "network": "10.101.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.101.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.101.9.0",
+ "prefixLen": 24,
+ "network": "10.101.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.1",
+ "path": "65001",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.1",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.0.0",
+ "prefixLen": 24,
+ "network": "10.102.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.1.0",
+ "prefixLen": 24,
+ "network": "10.102.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.2.0",
+ "prefixLen": 24,
+ "network": "10.102.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.3.0",
+ "prefixLen": 24,
+ "network": "10.102.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.4.0",
+ "prefixLen": 24,
+ "network": "10.102.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.5.0",
+ "prefixLen": 24,
+ "network": "10.102.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.6.0",
+ "prefixLen": 24,
+ "network": "10.102.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.7.0",
+ "prefixLen": 24,
+ "network": "10.102.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.8.0",
+ "prefixLen": 24,
+ "network": "10.102.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.102.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.102.9.0",
+ "prefixLen": 24,
+ "network": "10.102.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.2",
+ "path": "65002",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.2",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.0.0",
+ "prefixLen": 24,
+ "network": "10.105.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.1.0",
+ "prefixLen": 24,
+ "network": "10.105.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.2.0",
+ "prefixLen": 24,
+ "network": "10.105.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.3.0",
+ "prefixLen": 24,
+ "network": "10.105.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.4.0",
+ "prefixLen": 24,
+ "network": "10.105.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.5.0",
+ "prefixLen": 24,
+ "network": "10.105.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.6.0",
+ "prefixLen": 24,
+ "network": "10.105.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.7.0",
+ "prefixLen": 24,
+ "network": "10.105.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.8.0",
+ "prefixLen": 24,
+ "network": "10.105.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.105.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.105.9.0",
+ "prefixLen": 24,
+ "network": "10.105.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.5",
+ "path": "65005",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.5",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+{
+ "vrfName": "2",
+ "routerId": "172.30.1.1",
+ "defaultLocPrf": 100,
+ "localAS": 100,
+ "routes": {
+ "10.0.1.0/24": [
+ {
+ "valid": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 4,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ },
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 3,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.0.0",
+ "prefixLen": 24,
+ "network": "10.103.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.1.0",
+ "prefixLen": 24,
+ "network": "10.103.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.2.0",
+ "prefixLen": 24,
+ "network": "10.103.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.3.0",
+ "prefixLen": 24,
+ "network": "10.103.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.4.0",
+ "prefixLen": 24,
+ "network": "10.103.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.5.0",
+ "prefixLen": 24,
+ "network": "10.103.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.6.0",
+ "prefixLen": 24,
+ "network": "10.103.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.7.0",
+ "prefixLen": 24,
+ "network": "10.103.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.8.0",
+ "prefixLen": 24,
+ "network": "10.103.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.103.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.103.9.0",
+ "prefixLen": 24,
+ "network": "10.103.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.3",
+ "path": "65003",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.3",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.0.0",
+ "prefixLen": 24,
+ "network": "10.104.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.1.0",
+ "prefixLen": 24,
+ "network": "10.104.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.2.0",
+ "prefixLen": 24,
+ "network": "10.104.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.3.0",
+ "prefixLen": 24,
+ "network": "10.104.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.4.0",
+ "prefixLen": 24,
+ "network": "10.104.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.5.0",
+ "prefixLen": 24,
+ "network": "10.104.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.6.0",
+ "prefixLen": 24,
+ "network": "10.104.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.7.0",
+ "prefixLen": 24,
+ "network": "10.104.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.8.0",
+ "prefixLen": 24,
+ "network": "10.104.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.104.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.104.9.0",
+ "prefixLen": 24,
+ "network": "10.104.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.4",
+ "path": "65004",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.4",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
--- /dev/null
+{
+ "vrfName": "3",
+ "routerId": "172.30.1.1",
+ "defaultLocPrf": 100,
+ "localAS": 100,
+ "routes": {
+ "10.0.1.0/24": [
+ {
+ "valid": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 8,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ },
+ {
+ "valid": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 7,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ },
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.0.1.0",
+ "prefixLen": 24,
+ "network": "10.0.1.0/24",
+ "metric": 6,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.0.0",
+ "prefixLen": 24,
+ "network": "10.106.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.1.0",
+ "prefixLen": 24,
+ "network": "10.106.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.2.0",
+ "prefixLen": 24,
+ "network": "10.106.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.3.0",
+ "prefixLen": 24,
+ "network": "10.106.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.4.0",
+ "prefixLen": 24,
+ "network": "10.106.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.5.0",
+ "prefixLen": 24,
+ "network": "10.106.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.6.0",
+ "prefixLen": 24,
+ "network": "10.106.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.7.0",
+ "prefixLen": 24,
+ "network": "10.106.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.8.0",
+ "prefixLen": 24,
+ "network": "10.106.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.106.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.106.9.0",
+ "prefixLen": 24,
+ "network": "10.106.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.6",
+ "path": "65006",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.6",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.0.0",
+ "prefixLen": 24,
+ "network": "10.107.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.1.0",
+ "prefixLen": 24,
+ "network": "10.107.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.2.0",
+ "prefixLen": 24,
+ "network": "10.107.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.3.0",
+ "prefixLen": 24,
+ "network": "10.107.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.4.0",
+ "prefixLen": 24,
+ "network": "10.107.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.5.0",
+ "prefixLen": 24,
+ "network": "10.107.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.6.0",
+ "prefixLen": 24,
+ "network": "10.107.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.7.0",
+ "prefixLen": 24,
+ "network": "10.107.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.8.0",
+ "prefixLen": 24,
+ "network": "10.107.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.107.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.107.9.0",
+ "prefixLen": 24,
+ "network": "10.107.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.7",
+ "path": "65007",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.7",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.0.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.0.0",
+ "prefixLen": 24,
+ "network": "10.108.0.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.1.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.1.0",
+ "prefixLen": 24,
+ "network": "10.108.1.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.2.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.2.0",
+ "prefixLen": 24,
+ "network": "10.108.2.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.3.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.3.0",
+ "prefixLen": 24,
+ "network": "10.108.3.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.4.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.4.0",
+ "prefixLen": 24,
+ "network": "10.108.4.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.5.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.5.0",
+ "prefixLen": 24,
+ "network": "10.108.5.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.6.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.6.0",
+ "prefixLen": 24,
+ "network": "10.108.6.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.7.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.7.0",
+ "prefixLen": 24,
+ "network": "10.108.7.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.8.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.8.0",
+ "prefixLen": 24,
+ "network": "10.108.8.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ],
+ "10.108.9.0/24": [
+ {
+ "valid": true,
+ "bestpath": true,
+ "pathFrom": "external",
+ "prefix": "10.108.9.0",
+ "prefixLen": 24,
+ "network": "10.108.9.0/24",
+ "metric": 100,
+ "weight": 0,
+ "peerId": "172.16.1.8",
+ "path": "65008",
+ "origin": "IGP",
+ "nexthops": [
+ {
+ "ip": "172.16.1.8",
+ "afi": "ipv4",
+ "used": true
+ }
+ ]
+ }
+ ]
+ }
+}
~~~~~~~~~~~~~
"""
+import json
import os
-import re
import sys
import pytest
-import glob
+import json
from time import sleep
-from mininet.topo import Topo
-from mininet.net import Mininet
-from mininet.node import Node, OVSSwitch, Host
-from mininet.log import setLogLevel, info
-from mininet.cli import CLI
-from mininet.link import Intf
-
from functools import partial
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
+from lib.topogen import get_topogen, Topogen
+from lib.common_config import step
pytestmark = [pytest.mark.bgpd]
#####################################################
-class NetworkTopo(Topo):
- "BGP Multiview Topology 1"
+def build_topo(tgen):
+ # Setup Routers
+ router = tgen.add_router("r1")
- def build(self, **_opts):
+ # Setup Provider BGP peers
+ peer = {}
+ for i in range(1, 9):
+ peer[i] = tgen.add_exabgp_peer(
+ "peer%s" % i, ip="172.16.1.%s/24" % i, defaultRoute="via 172.16.1.254"
+ )
- exabgpPrivateDirs = ["/etc/exabgp", "/var/run/exabgp", "/var/log"]
+ # First switch is for a dummy interface (for local network)
+ switch = tgen.add_switch("sw0")
+ switch.add_link(router, nodeif="r1-stub")
- # Setup Routers
- router = {}
- for i in range(1, 2):
- router[i] = topotest.addRouter(self, "r%s" % i)
-
- # Setup Provider BGP peers
- peer = {}
- for i in range(1, 9):
- peer[i] = self.addHost(
- "peer%s" % i,
- ip="172.16.1.%s/24" % i,
- defaultRoute="via 172.16.1.254",
- privateDirs=exabgpPrivateDirs,
- )
-
- # Setup Switches
- switch = {}
- # First switch is for a dummy interface (for local network)
- switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch)
- self.addLink(switch[0], router[1], intfName2="r1-stub")
- # Second switch is for connection to all peering routers
- switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
- self.addLink(switch[1], router[1], intfName2="r1-eth0")
- for j in range(1, 9):
- self.addLink(switch[1], peer[j], intfName2="peer%s-eth0" % j)
+ # Second switch is for connection to all peering routers
+ switch = tgen.add_switch("sw1")
+ switch.add_link(router, nodeif="r1-eth0")
+ for j in range(1, 9):
+ switch.add_link(peer[j], nodeif="peer%s-eth0" % j)
#####################################################
def setup_module(module):
- global topo, net
-
- print("\n\n** %s: Setup Topology" % module.__name__)
- print("******************************************\n")
-
- print("Cleanup old Mininet runs")
- os.system("sudo mn -c > /dev/null 2>&1")
-
thisDir = os.path.dirname(os.path.realpath(__file__))
- topo = NetworkTopo()
-
- net = Mininet(controller=None, topo=topo)
- net.start()
+ tgen = Topogen(build_topo, module.__name__)
+ tgen.start_topology()
# Starting Routers
- for i in range(1, 2):
- net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
- net["r%s" % i].loadConf("bgpd", "%s/r%s/bgpd.conf" % (thisDir, i))
- net["r%s" % i].startRouter()
+ router = tgen.net["r1"]
+ router.loadConf("zebra", "%s/r1/zebra.conf" % thisDir)
+ router.loadConf("bgpd", "%s/r1/bgpd.conf" % thisDir)
+ tgen.gears["r1"].start()
# Starting PE Hosts and init ExaBGP on each of them
- print("*** Starting BGP on all 8 Peers")
- for i in range(1, 9):
- net["peer%s" % i].cmd("cp %s/exabgp.env /etc/exabgp/exabgp.env" % thisDir)
- net["peer%s" % i].cmd("cp %s/peer%s/* /etc/exabgp/" % (thisDir, i))
- net["peer%s" % i].cmd("chmod 644 /etc/exabgp/*")
- net["peer%s" % i].cmd("chmod 755 /etc/exabgp/*.py")
- net["peer%s" % i].cmd("chown -R exabgp:exabgp /etc/exabgp")
- net["peer%s" % i].cmd("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
- print("peer%s" % i),
- print("")
-
- # For debugging after starting FRR daemons, uncomment the next line
- # CLI(net)
+ peer_list = tgen.exabgp_peers()
+ for pname, peer in peer_list.items():
+ peer_dir = os.path.join(thisDir, pname)
+ env_file = os.path.join(thisDir, "exabgp.env")
+ peer.start(peer_dir, env_file)
def teardown_module(module):
- global net
-
- print("\n\n** %s: Shutdown Topology" % module.__name__)
- print("******************************************\n")
-
- # Shutdown - clean up everything
- print("*** Killing BGP on Peer routers")
- # Killing ExaBGP
- for i in range(1, 9):
- net["peer%s" % i].cmd("kill `cat /var/run/exabgp/exabgp.pid`")
-
- # End - Shutdown network
- net.stop()
+ tgen = get_topogen()
+ tgen.stop_topology()
def test_router_running():
- global fatal_error
- global net
-
- # Skip if previous fatal error condition is raised
- if fatal_error != "":
- pytest.skip(fatal_error)
-
- print("\n\n** Check if FRR is running on each Router node")
- print("******************************************\n")
-
- # Starting Routers
- for i in range(1, 2):
- fatal_error = net["r%s" % i].checkRouterRunning()
- assert fatal_error == "", fatal_error
+ tgen = get_topogen()
- # For debugging after starting FRR daemons, uncomment the next line
- # CLI(net)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
def test_bgp_converge():
"Check for BGP converged on all peers and BGP views"
- global fatal_error
- global net
+ tgen = get_topogen()
- # Skip if previous fatal error condition is raised
- if fatal_error != "":
- pytest.skip(fatal_error)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
# Wait for BGP to converge (All Neighbors in either Full or TwoWay State)
- print("\n\n** Verify for BGP to converge")
- print("******************************************\n")
+ step("Verify for BGP to converge")
+
timeout = 125
while timeout > 0:
print("Timeout in %s: " % timeout),
# Look for any node not yet converged
for i in range(1, 2):
for view in range(1, 4):
- notConverged = net["r%s" % i].cmd(
+ notConverged = tgen.net["r%s" % i].cmd(
'vtysh -c "show ip bgp view %s summary" 2> /dev/null | grep ^[0-9] | grep -vP " 11\s+(\d+)"'
% view
)
break
else:
# Bail out with error if a router fails to converge
- bgpStatus = net["r%s" % i].cmd('vtysh -c "show ip bgp view %s summary"' % view)
+ bgpStatus = tgen.net["r%s" % i].cmd(
+ 'vtysh -c "show ip bgp view %s summary"' % view
+ )
assert False, "BGP did not converge:\n%s" % bgpStatus
- # Wait for an extra 5s to announce all routes
- print("Waiting 5s for routes to be announced")
- sleep(5)
-
- print("BGP converged.")
-
- # if timeout < 60:
- # # Only wait if we actually went through a convergence
- # print("\nwaiting 15s for routes to populate")
- # sleep(15)
-
- # Make sure that all daemons are running
- for i in range(1, 2):
- fatal_error = net["r%s" % i].checkRouterRunning()
- assert fatal_error == "", fatal_error
-
- # For debugging after starting FRR daemons, uncomment the next line
- # CLI(net)
+ tgen.routers_have_failure()
def test_bgp_routingTable():
- global fatal_error
- global net
+ tgen = get_topogen()
- # Skip if previous fatal error condition is raised
- if fatal_error != "":
- pytest.skip(fatal_error)
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
thisDir = os.path.dirname(os.path.realpath(__file__))
- print("\n\n** Verifying BGP Routing Tables")
- print("******************************************\n")
- diffresult = {}
- for i in range(1, 2):
- for view in range(1, 4):
- success = 0
- # This glob pattern should work as long as number of views < 10
- for refTableFile in glob.glob(
- "%s/r%s/show_ip_bgp_view_%s*.ref" % (thisDir, i, view)
- ):
-
- if os.path.isfile(refTableFile):
- # Read expected result from file
- expected = open(refTableFile).read().rstrip()
- # Fix newlines (make them all the same)
- expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
-
- # Actual output from router
- actual = (
- net["r%s" % i]
- .cmd('vtysh -c "show ip bgp view %s" 2> /dev/null' % view)
- .rstrip()
- )
-
- # Fix inconsitent spaces between 0.99.24 and newer versions
- actual = re.sub("0 0", "0 0", actual)
- actual = re.sub(
- r"([0-9]) 32768", r"\1 32768", actual
- )
- # Remove summary line (changed recently)
- actual = re.sub(r"Total number.*", "", actual)
- actual = re.sub(r"Displayed.*", "", actual)
- actual = actual.rstrip()
- # Fix table version (ignore it)
- actual = re.sub(r"(BGP table version is )[0-9]+", r"\1XXX", actual)
-
- # Fix newlines (make them all the same)
- actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
-
- # Generate Diff
- diff = topotest.get_textdiff(
- actual,
- expected,
- title1="actual BGP routing table",
- title2="expected BGP routing table",
- )
-
- if diff:
- diffresult[refTableFile] = diff
- else:
- success = 1
- print("template %s matched: r%s ok" % (refTableFile, i))
- break
-
- if not success:
- resultstr = "No template matched.\n"
- for f in diffresult.keys():
- resultstr += (
- "template %s: r%s failed Routing Table Check for view %s:\n%s\n"
- % (f, i, view, diffresult[f])
- )
- raise AssertionError(
- "Routing Table verification failed for router r%s, view %s:\n%s"
- % (i, view, resultstr)
- )
-
- # Make sure that all daemons are running
- for i in range(1, 2):
- fatal_error = net["r%s" % i].checkRouterRunning()
- assert fatal_error == "", fatal_error
-
- # For debugging after starting FRR daemons, uncomment the next line
- # CLI(net)
-
-
-def test_shutdown_check_stderr():
- global fatal_error
- global net
-
- # Skip if previous fatal error condition is raised
- if fatal_error != "":
- pytest.skip(fatal_error)
-
- if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
- print(
- "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n"
+ step("Verifying BGP Routing Tables")
+
+ router = tgen.gears["r1"]
+ for view in range(1, 4):
+ json_file = "{}/{}/view_{}.json".format(thisDir, router.name, view)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip bgp view {} json".format(view),
+ expected,
)
- pytest.skip("Skipping test for Stderr output")
-
- thisDir = os.path.dirname(os.path.realpath(__file__))
-
- print("\n\n** Verifying unexpected STDERR output from daemons")
- print("******************************************\n")
-
- net["r1"].stopRouter()
+ _, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
+ assertmsg = "Routing Table verification failed for router {}, view {}".format(
+ router.name, view
+ )
+ assert result is None, assertmsg
- log = net["r1"].getStdErr("bgpd")
- if log:
- print("\nBGPd StdErr Log:\n" + log)
- log = net["r1"].getStdErr("zebra")
- if log:
- print("\nZebra StdErr Log:\n" + log)
+ tgen.routers_have_failure()
def test_shutdown_check_memleak():
- global fatal_error
- global net
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
- # Skip if previous fatal error condition is raised
- if fatal_error != "":
- pytest.skip(fatal_error)
-
- if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None:
- print(
- "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n"
- )
- pytest.skip("Skipping test for memory leaks")
-
- thisDir = os.path.dirname(os.path.realpath(__file__))
-
- net["r1"].stopRouter()
- net["r1"].report_memory_leaks(
- os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__)
- )
+ tgen.report_memory_leaks()
if __name__ == "__main__":
-
- setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
import os
import sys
-import pdb
-import json
import time
-import inspect
-from time import sleep
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import Topogen, get_topogen
# Required to instantiate the topology builder class.
from lib.common_config import (
verify_rib,
create_static_routes,
create_prefix_lists,
- verify_prefix_lists,
create_route_maps,
check_address_types,
)
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp_and_verify,
verify_best_path_as_per_bgp_attribute,
verify_best_path_as_per_admin_distance,
- modify_as_number,
- verify_as_numbers,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_path_attributes.json".format(CWD)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Address read from env variables
ADDR_TYPES = check_address_types()
####
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Building topology and configuration from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_path_attributes.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
def test_aspath_attribute(request):
- " Verifying AS_PATH attribute functionality"
+ "Verifying AS_PATH attribute functionality"
tgen = get_topogen()
def test_localpref_attribute(request):
- " Verifying LOCAL PREFERENCE attribute functionality"
+ "Verifying LOCAL PREFERENCE attribute functionality"
tgen = get_topogen()
def test_admin_distance(request):
- " Verifying admin distance functionality"
+ "Verifying admin distance functionality"
tgen = get_topogen()
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-class PeerTypeRelaxTopo(Topo):
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- # Set up routers
- tgen.add_router("r1") # DUT
- tgen.add_router("r2")
+ # Set up routers
+ tgen.add_router("r1") # DUT
+ tgen.add_router("r2")
- # Set up peers
- for peern in range(1, 5):
- peer = tgen.add_exabgp_peer(
- "peer{}".format(peern),
- ip="10.0.{}.2/24".format(peern),
- defaultRoute="via 10.0.{}.1".format(peern),
- )
- if peern == 2:
- tgen.add_link(tgen.gears["r2"], peer)
- else:
- tgen.add_link(tgen.gears["r1"], peer)
- tgen.add_link(tgen.gears["r1"], tgen.gears["r2"])
+ # Set up peers
+ for peern in range(1, 5):
+ peer = tgen.add_exabgp_peer(
+ "peer{}".format(peern),
+ ip="10.0.{}.2/24".format(peern),
+ defaultRoute="via 10.0.{}.1".format(peern),
+ )
+ if peern == 2:
+ tgen.add_link(tgen.gears["r2"], peer)
+ else:
+ tgen.add_link(tgen.gears["r1"], peer)
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(PeerTypeRelaxTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
# For all registered routers, load the zebra configuration file
"""
import sys
-import json
import time
import os
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/prefix_lists.json".format(CWD)
+pytestmark = [pytest.mark.bgpd]
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
bgp_convergence = False
-class BGPPrefixListTopo(Topo):
- """
- Test BGPPrefixListTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(BGPPrefixListTopo, mod.__name__)
+ json_file = "{}/prefix_lists.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- routesavefile.write(line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
group controller {
process receive-routes {
- run "/etc/exabgp/exa-receive.py 2";
+ run "/etc/exabgp/exa-receive.py --no-timestamp 2";
receive-routes;
encoder json;
}
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, **_opts):
- tgen = get_topogen(self)
- router = tgen.add_router("r1")
- switch = tgen.add_switch("s1")
- switch.add_link(router)
+def build_topo(tgen):
+ router = tgen.add_router("r1")
+ switch = tgen.add_switch("s1")
+ switch.add_link(router)
- switch = tgen.gears["s1"]
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
- )
- peer2 = tgen.add_exabgp_peer(
- "peer2", ip="10.0.0.102", defaultRoute="via 10.0.0.1"
- )
- switch.add_link(peer1)
- switch.add_link(peer2)
+ switch = tgen.gears["s1"]
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1")
+ peer2 = tgen.add_exabgp_peer("peer2", ip="10.0.0.102", defaultRoute="via 10.0.0.1")
+ switch.add_link(peer1)
+ switch.add_link(peer2)
def setup_module(module):
- tgen = Topogen(TemplateTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
router = tgen.gears["r1"]
def exabgp_get_update_prefix(filename, afi, nexthop, prefix):
- with open("/tmp/peer2-received.log") as f:
+ with open(filename) as f:
for line in f.readlines():
output = json.loads(line)
ret = output.get("neighbor")
def test_peer2_receive_prefix_sid_type1():
tgen = get_topogen()
peer2 = tgen.gears["peer2"]
+ logfile = "{}/{}-received.log".format(peer2.gearlogdir, peer2.name)
def _check_type1_peer2(prefix, labelindex):
output = exabgp_get_update_prefix(
- "/tmp/peer2-received.log", "ipv4 nlri-mpls", "10.0.0.101", prefix
+ logfile, "ipv4 nlri-mpls", "10.0.0.101", prefix
)
expected = {
"type": "update",
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, **_opts):
- tgen = get_topogen(self)
- router = tgen.add_router("r1")
- switch = tgen.add_switch("s1")
- switch.add_link(router)
+def build_topo(tgen):
+ router = tgen.add_router("r1")
+ switch = tgen.add_switch("s1")
+ switch.add_link(router)
- switch = tgen.gears["s1"]
- peer1 = tgen.add_exabgp_peer(
- "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
- )
- switch.add_link(peer1)
+ switch = tgen.gears["s1"]
+ peer1 = tgen.add_exabgp_peer("peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1")
+ switch.add_link(peer1)
def setup_module(module):
- tgen = Topogen(TemplateTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
router = tgen.gears["r1"]
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, "{}/zebra.conf".format("r1"))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format("r1"))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, "{}/bgpd.conf".format("r1"))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1"))
)
router.start()
return topotest.json_cmp(output, expected)
def check(name, cmd, expected_file):
- logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ logger.info('[+] check {} "{}" {}'.format(name, cmd, expected_file))
tgen = get_topogen()
func = functools.partial(_check, name, cmd, expected_file)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
check("r1", "show bgp ipv6 vpn 2001:1::/64 json", "r1/vpnv6_rib_entry1.json")
check("r1", "show bgp ipv6 vpn 2001:2::/64 json", "r1/vpnv6_rib_entry2.json")
import os
import sys
import time
-import json
import pytest
from time import sleep
-from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
create_route_maps,
create_interface_in_kernel,
shutdown_bringup_interface,
- addKernelRoute,
- delete_route_maps,
)
from lib.topolog import logger
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp_and_verify,
verify_bgp_rib,
verify_bgp_convergence_from_running_config,
modify_as_number,
verify_bgp_attributes,
clear_bgp,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_recursive_route_ebgp_multi_hop.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- logger.info("Could not read file:", jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
KEEP_ALIVE_TIMER = 2
}
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_recursive_route_ebgp_multi_hop.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
"""
import os
-import re
-import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import get_topogen
from lib.topolog import logger
from lib.ltemplate import ltemplateRtrCmd
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-import shutil
CWD = os.path.dirname(os.path.realpath(__file__))
# test name based on directory
TEST = os.path.basename(CWD)
-class ThisTestTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ # Create P/PE routers
+ tgen.add_router("r1")
+ for routern in range(2, 5):
+ tgen.add_router("r{}".format(routern))
+ # Create a switch with just one router connected to it to simulate a
+ # empty network.
+ switch = {}
+ switch[0] = tgen.add_switch("sw0")
+ switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
+ switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- # Create P/PE routers
- tgen.add_router("r1")
- for routern in range(2, 5):
- tgen.add_router("r{}".format(routern))
- # Create a switch with just one router connected to it to simulate a
- # empty network.
- switch = {}
- switch[0] = tgen.add_switch("sw0")
- switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
- switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
+ switch[1] = tgen.add_switch("sw1")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
+ switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
- switch[1] = tgen.add_switch("sw1")
- switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
- switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
- switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
-
- switch[2] = tgen.add_switch("sw2")
- switch[2].add_link(tgen.gears["r2"], nodeif="r2-eth2")
- switch[2].add_link(tgen.gears["r3"], nodeif="r3-eth1")
+ switch[2] = tgen.add_switch("sw2")
+ switch[2].add_link(tgen.gears["r2"], nodeif="r2-eth2")
+ switch[2].add_link(tgen.gears["r3"], nodeif="r3-eth1")
def ltemplatePreRouterStartHook():
cc = ltemplateRtrCmd()
tgen = get_topogen()
logger.info("pre router-start hook")
- # check for normal init
- if len(tgen.net) == 1:
- logger.info("Topology not configured, skipping setup")
- return False
return True
+from lib.lutil import luCommand
+
luCommand(
"r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
)
--- /dev/null
+!
+router bgp 65001
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.2 remote-as external
+!
+route-map r2 permit 10
+ set extcommunity none
+!
--- /dev/null
+!
+int r1-eth0
+ ip address 192.168.1.1/24
+!
--- /dev/null
+router bgp 65002
+ no bgp ebgp-requires-policy
+ neighbor 192.168.1.1 remote-as external
+ address-family ipv4 unicast
+ redistribute connected
+ neighbor 192.168.1.1 route-map r1 out
+ exit-address-family
+!
+route-map r1 permit 10
+ set community 123:123
+ set extcommunity bandwidth 200
+!
--- /dev/null
+!
+int lo
+ ip address 172.16.16.1/32
+!
+int r2-eth0
+ ip address 192.168.1.2/24
+!
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2021 by
+# Donatas Abraitis <donatas.abraitis@gmail.com>
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+Test if route-map extcommunity none works:
+
+route-map <name> permit 10
+ set extcommunity none
+"""
+
+import os
+import sys
+import json
+import pytest
+import functools
+
+pytestmark = pytest.mark.bgpd
+
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+
+pytestmark = [pytest.mark.bgpd]
+
+
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+
+def setup_module(mod):
+ tgen = Topogen(build_topo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+
+ for i, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+
+
+def teardown_module(mod):
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+
+def test_bgp_extcommunity_none():
+ tgen = get_topogen()
+
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ router = tgen.gears["r1"]
+
+ def _bgp_converge(router):
+ output = json.loads(
+ router.vtysh_cmd("show bgp ipv4 unicast 172.16.16.1/32 json")
+ )
+ expected = {
+ "prefix": "172.16.16.1/32",
+ "paths": [
+ {
+ "community": {
+ "string": "123:123",
+ },
+ "extendedCommunity": {"string": "LB:65002:25000000 (200.000 Mbps)"},
+ }
+ ],
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_converge, router)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "BGP Converge failed"
+
+ def _bgp_extcommunity_strip(router):
+ router.vtysh_cmd(
+ "conf t\nrouter bgp 65001\naddress-family ipv4\nneighbor 192.168.1.2 route-map r2 in"
+ )
+ output = json.loads(
+ router.vtysh_cmd("show bgp ipv4 unicast 172.16.16.1/32 json")
+ )
+ expected = {
+ "prefix": "172.16.16.1/32",
+ "paths": [
+ {
+ "community": {
+ "string": "123:123",
+ },
+ "extendedCommunity": None,
+ }
+ ],
+ }
+
+ return topotest.json_cmp(output, expected)
+
+ test_func = functools.partial(_bgp_extcommunity_strip, router)
+ success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
+ assert result is None, "Failed to strip incoming extended communities from r2"
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
import os
import sys
import time
-import json
import pytest
-from time import sleep
-from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
start_topology,
write_test_header,
- apply_raw_config,
write_test_footer,
reset_config_on_routers,
verify_rib,
create_router_bgp,
verify_bgp_rib,
verify_bgp_community,
- verify_bgp_timers_and_functionality,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_aggregation.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- logger.info("Could not read file:", jsonFile)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
BGP_CONVERGENCE = False
]
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_aggregation.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
step("Configuring {} static routes on router R1 ".format(addr_type))
result = create_static_routes(tgen, input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step(
"Configuring redistribute static for {} address-family on router R1 ".format(
}
result = verify_rib(tgen, addr_type, "r3", input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Advertise some prefixes using network command")
step(
}
result = verify_rib(tgen, addr_type, "r3", input_advertise)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Configure aggregate-address to summarise all the advertised routes.")
}
result = verify_rib(tgen, addr_type, "r3", input_static_agg, protocol="bgp")
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(
tgen, addr_type, "r3", input_static, protocol="bgp", expected=False
)
assert (
result is not True
- ), "Testcase : Failed \n " "Routes are still present \n Error: {}".format(
+ ), "Testcase {} : Failed \n " "Routes are still present \n Error: {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r1", input_static_agg, protocol="bgp")
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
result = verify_rib(tgen, addr_type, "r1", input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
for action, value in zip(["removed", "add"], [True, False]):
}
result = create_static_routes(tgen, input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
result = verify_rib(
tgen, addr_type, "r1", input_static_1, expected=False
)
- assert result is not True, (
- "Testcase : Failed \n "
- "Routes are still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes are still present \n Error: {}".format(
+ tc_name, result
)
else:
result = verify_rib(tgen, addr_type, "r1", input_static_1)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r3", input_static_2, protocol="bgp")
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
tgen, addr_type, "r1", input_advertise_1, expected=False
)
assert result is not True, (
- "Testcase : Failed \n "
+ "Testcase {} : Failed \n "
"Routes are still present \n Error: {}".format(tc_name, result)
)
else:
result = verify_bgp_rib(tgen, addr_type, "r1", input_advertise_1)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r3", input_advertise_2)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
}
result = create_static_routes(tgen, input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
input_advertise = {
input_static = {"r1": {"static_routes": [{"network": AGGREGATE_NW[addr_type]}]}}
result = verify_rib(tgen, addr_type, "r3", input_static, protocol="bgp")
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
input_advertise_2 = {
"r1": {
}
result = verify_rib(tgen, addr_type, "r3", input_advertise_2, protocol="bgp")
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
for action, value in zip(["Delete", "Re-add"], [True, False]):
step("{} aggregation command from R1.".format(action))
result = verify_rib(
tgen, addr_type, "r1", input_static_agg, expected=False
)
- assert result is not True, (
- "Testcase : Failed \n "
- "Aggregated route is still present \n Error: {}".format(
- tc_name, result
- )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Aggregated route is still present \n Error: {}".format(
+ tc_name, result
)
result = verify_rib(
tgen, addr_type, "r3", input_static_agg, expected=False
)
- assert result is not True, (
- "Testcase : Failed \n "
- "Aggregated route is still present \n Error: {}".format(
- tc_name, result
- )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Aggregated route is still present \n Error: {}".format(
+ tc_name, result
)
else:
result = verify_rib(tgen, addr_type, "r1", input_static_agg)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r3", input_static_agg)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configuring {} static routes on router R1 ".format(addr_type))
result = create_static_routes(tgen, input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step(
"Configuring redistribute static for {} address-family on router R1 ".format(
}
result = verify_rib(tgen, addr_type, "r3", input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step(
"Configure a route-map to attach a unique community attribute value "
}
result = create_static_routes(tgen, input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step(
"Verify on R3 that whenever we remove the static routes, we still"
}
result = create_static_routes(tgen, input_static)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
for (
result = verify_rib(
tgen, addr_type, "r1", input_static_agg, expected=False
)
- assert result is not True, (
- "Testcase : Failed \n "
- "Aggregated route is still present \n Error: {}".format(
- tc_name, result
- )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Aggregated route is still present \n Error: {}".format(
+ tc_name, result
)
result = verify_rib(
tgen, addr_type, "r3", input_static_agg, expected=False
)
- assert result is not True, (
- "Testcase : Failed \n "
- "Aggregated route is still present \n Error: {}".format(
- tc_name, result
- )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Aggregated route is still present \n Error: {}".format(
+ tc_name, result
)
else:
result = verify_rib(tgen, addr_type, "r1", input_static_agg)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, "r3", input_static_agg)
- assert result is True, "Testcase : Failed \n Error: {}".format(
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
#
import sys
-import json
import time
import pytest
-import inspect
import os
-from time import sleep
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
# Required to instantiate the topology builder class.
-from lib.topojson import *
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
- verify_bgp_community,
verify_rib,
- delete_route_maps,
- create_bgp_community_lists,
- interface_status,
create_route_maps,
create_static_routes,
create_prefix_lists,
- verify_route_maps,
check_address_types,
- shutdown_bringup_interface,
- verify_prefix_lists,
reset_config_on_routers,
)
from lib.topolog import logger
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp_and_verify,
verify_bgp_attributes,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
bgp_convergence = False
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_route_map_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
bgp_convergence = False
ADDR_TYPES = check_address_types()
-class CreateTopo(Topo):
- """
- Test topology builder
-
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_route_map_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
result = verify_rib(
tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
)
- assert result is not True, ("Testcase {} : Failed \n"
- "routes are not present in rib \n Error: {}".format(tc_name, result))
+ assert result is not True, (
+ "Testcase {} : Failed \n"
+ "routes are not present in rib \n Error: {}".format(tc_name, result)
+ )
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
result = verify_rib(
tgen, adt, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, ("Testcase {} : Failed \n "
- "routes are not present in rib \n Error: {}".format(tc_name, result))
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "routes are not present in rib \n Error: {}".format(tc_name, result)
+ )
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
result = verify_rib(
tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
)
- assert result is not True, ("Testcase {} : Failed \n "
- "Routes are still present \n Error: {}".format(tc_name, result))
+ assert result is not True, (
+ "Testcase {} : Failed \n "
+ "Routes are still present \n Error: {}".format(tc_name, result)
+ )
logger.info("Expected behaviour: {}".format(result))
else:
- result = verify_rib(
- tgen, adt, dut, input_dict_2, protocol=protocol
- )
+ result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
TC_60
Create route map to deny outbound prefixes with filter match tag,
and set criteria
-"""
#################################
# TOPOLOGY
#################################
-"""
+-------+
+--------- | R2 |
"""
import sys
-import json
import time
import pytest
import inspect
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
# Required to instantiate the topology builder class.
from lib.common_config import (
verify_rib,
delete_route_maps,
create_bgp_community_lists,
- interface_status,
create_route_maps,
create_prefix_lists,
verify_route_maps,
clear_bgp_and_verify,
verify_bgp_attributes,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/bgp_route_map_topo2.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
# Global variables
ADDR_TYPES = check_address_types()
-class BGPRmapTopo(Topo):
- """BGPRmapTopo.
-
- BGPRmap topology 1
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology and configuration from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""setup_module.
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(BGPRmapTopo, mod.__name__)
+ json_file = "{}/bgp_route_map_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nroutes are not present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "Expected behaviour: routes are not present \n "
- "Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nExpected behaviour: routes are not present \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nroutes are not present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nroutes are not present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n Error"
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error Routes are still present: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
# Remove applied rmap from neighbor
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nroutes are not present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
input_dict_3_addr_type[addr_type],
expected=False,
)
- assert result is not True, "Testcase {} : Failed \n"
- "Attributes are not set \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nAttributes are not set \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
input_dict_3_addr_type[addr_type],
expected=False,
)
- assert result is not True, "Testcase {} : Failed \n"
- "Attributes are not set \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nAttributes are not set \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nroutes are not present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nroutes are not present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
- assert result is not True, "Testcase {} : Failed \n"
- "routes are denied \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \nroutes are denied \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
"""
import os
-import re
import sys
import pytest
import json
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
-pytestmark = [pytest.mark.bgpd]
-#####################################################
-##
-## Network Topology Definition
-##
-#####################################################
-
-
-class NetworkTopo(Topo):
- "BGP_RR_IBGP Topology 1"
+pytestmark = [pytest.mark.bgpd]
- def build(self, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- tgen.add_router("tor1")
- tgen.add_router("tor2")
- tgen.add_router("spine1")
+ tgen.add_router("tor1")
+ tgen.add_router("tor2")
+ tgen.add_router("spine1")
- # First switch is for a dummy interface (for local network)
- # on tor1
- # 192.168.1.0/24
- switch = tgen.add_switch("sw1")
- switch.add_link(tgen.gears["tor1"])
+ # First switch is for a dummy interface (for local network)
+ # on tor1
+ # 192.168.1.0/24
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["tor1"])
- # 192.168.2.0/24 - tor1 <-> spine1 connection
- switch = tgen.add_switch("sw2")
- switch.add_link(tgen.gears["tor1"])
- switch.add_link(tgen.gears["spine1"])
+ # 192.168.2.0/24 - tor1 <-> spine1 connection
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["tor1"])
+ switch.add_link(tgen.gears["spine1"])
- # 3rd switch is for a dummy interface (for local netwokr)
- # 192.168.3.0/24 - tor2
- switch = tgen.add_switch("sw3")
- switch.add_link(tgen.gears["tor2"])
+ # 3rd switch is for a dummy interface (for local netwokr)
+ # 192.168.3.0/24 - tor2
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["tor2"])
- # 192.168.4.0/24 - tor2 <-> spine1 connection
- switch = tgen.add_switch("sw4")
- switch.add_link(tgen.gears["tor2"])
- switch.add_link(tgen.gears["spine1"])
+ # 192.168.4.0/24 - tor2 <-> spine1 connection
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["tor2"])
+ switch.add_link(tgen.gears["spine1"])
#####################################################
def setup_module(module):
"Setup topology"
- tgen = Topogen(NetworkTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
# This is a sample of configuration loading.
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
master agentx
-noRangeCheck yes
\ No newline at end of file
+noRangeCheck yes
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
import os
import sys
-import json
-from functools import partial
from time import sleep
import pytest
-import re
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
from lib.snmptest import SnmpTester
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.snmp]
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- #
- # Create routers
- tgen.add_router("r1")
- tgen.add_router("r2")
- tgen.add_router("r3")
- tgen.add_router("r4")
- tgen.add_router("ce1")
- tgen.add_router("ce2")
- tgen.add_router("ce3")
- tgen.add_router("ce4")
-
- # r1-r2
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- # r1-r3
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r3"])
-
- # r1-r4
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r4"])
-
- # r1-ce1
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["ce1"])
-
- # r1-ce3
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["ce3"])
-
- # r1-ce4
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["ce4"])
-
- # r1-dangling
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["r1"])
-
- # r2-r3
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- # r3-r4
- switch = tgen.add_switch("s9")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
-
- # r4-ce2
- switch = tgen.add_switch("s10")
- switch.add_link(tgen.gears["r4"])
- switch.add_link(tgen.gears["ce2"])
+def build_topo(tgen):
+ "Build function"
+
+ # This function only purpose is to define allocation and relationship
+ # between routers, switches and hosts.
+ #
+ #
+ # Create routers
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("r3")
+ tgen.add_router("r4")
+ tgen.add_router("ce1")
+ tgen.add_router("ce2")
+ tgen.add_router("ce3")
+ tgen.add_router("ce4")
+
+ # r1-r2
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # r1-r3
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+
+ # r1-r4
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r4"])
+
+ # r1-ce1
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["ce1"])
+
+ # r1-ce3
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["ce3"])
+
+ # r1-ce4
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["ce4"])
+
+ # r1-dangling
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["r1"])
+
+ # r2-r3
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+ # r3-r4
+ switch = tgen.add_switch("s9")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+ # r4-ce2
+ switch = tgen.add_switch("s10")
+ switch.add_link(tgen.gears["r4"])
+ switch.add_link(tgen.gears["ce2"])
def setup_module(mod):
pytest.skip(error_msg)
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
r1.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
r1.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
r1.run("sysctl -w net.mpls.conf.r1-eth2.input=1")
- r2.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
- r2.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
- r3.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
- r3.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
- r3.run("sysctl -w net.mpls.conf.r1-eth2.input=1")
- r4.run("sysctl -w net.mpls.conf.r1-eth0.input=1")
- r4.run("sysctl -w net.mpls.conf.r1-eth1.input=1")
router_list = tgen.routers()
"Wait for protocol convergence"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
assertmsg = "BGP SNMP does not seem to be running"
def test_r1_mplsvpn_scalars():
"check scalar values"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
for item in interfaces_up_test.keys():
def test_r1_mplsvpn_scalars_interface():
"check scalar interface changing values"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1_cmd = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
- r1_cmd.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown")
- r1_cmd.vtysh_cmd("conf t\ninterface r1-eth4\nshutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth4\nshutdown")
for item in interfaces_up_test.keys():
assertmsg = "{} should be {}: value {}".format(
)
assert r1_snmp.test_oid(item, interfaces_down_test[item]), assertmsg
- r1_cmd.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown")
- r1_cmd.vtysh_cmd("conf t\ninterface r1-eth4\nno shutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth4\nno shutdown")
for item in interfaces_up_test.keys():
assertmsg = "{} should be {}: value {}".format(
"mplsL3VpnIf table values"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1r = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
# tgen.mininet_cli()
- eth3_ifindex = router_interface_get_ifindex(r1r, "eth3")
- eth4_ifindex = router_interface_get_ifindex(r1r, "eth4")
- eth5_ifindex = router_interface_get_ifindex(r1r, "eth5")
+ eth3_ifindex = router_interface_get_ifindex(r1, "eth3")
+ eth4_ifindex = router_interface_get_ifindex(r1, "eth4")
+ eth5_ifindex = router_interface_get_ifindex(r1, "eth5")
# get ifindex and make sure the oid is correct
def test_r1_mplsvpn_VrfTable():
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1r = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
"mplsL3VpnVrfConfLastChanged.{}".format(snmp_str_to_oid("VRF-a"))
)
ts_val_last_1 = get_timetick_val(ts_last)
- r1r.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth3\nshutdown")
active_int = r1_snmp.get(
"mplsL3VpnVrfActiveInterfaces.{}".format(snmp_str_to_oid("VRF-a"))
)
ts_val_last_2 = get_timetick_val(ts_last)
assertmsg = "mplsL3VpnVrfConfLastChanged does not update on interface change"
assert ts_val_last_2 > ts_val_last_1, assertmsg
- r1r.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth3\nno shutdown")
# take Last changed time, fiddle with associated interfaces, ensure
# time changes and active interfaces change
def test_r1_mplsvpn_VrfRT_table():
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1r = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
def test_r1_mplsvpn_perf_table():
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1r = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
def test_r1_mplsvpn_rte_table():
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1r = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "10.1.1.1", "public", "2c")
# generate ifindex row grabbing ifindices from vtysh
if passed:
ifindex_row = [
- router_interface_get_ifindex(r1r, "eth3"),
- router_interface_get_ifindex(r1r, "eth4"),
- router_interface_get_ifindex(r1r, "eth2"),
- router_interface_get_ifindex(r1r, "eth3"),
+ router_interface_get_ifindex(r1, "eth3"),
+ router_interface_get_ifindex(r1, "eth4"),
+ router_interface_get_ifindex(r1, "eth2"),
+ router_interface_get_ifindex(r1, "eth3"),
"0",
- router_interface_get_ifindex(r1r, "eth4"),
+ router_interface_get_ifindex(r1, "eth4"),
"0",
]
if not r1_snmp.test_oid_walk(
#
import os
-import re
import sys
import json
import functools
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.common_config import required_linux_kernel_version
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class Topology(Topo):
+def build_topo(tgen):
"""
CE1 CE3 CE5
(eth0) (eth0) (eth0)
(eth0) (eth0) (eth0)
CE2 CE4 CE6
"""
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
- tgen.add_router("r1")
- tgen.add_router("r2")
- tgen.add_router("ce1")
- tgen.add_router("ce2")
- tgen.add_router("ce3")
- tgen.add_router("ce4")
- tgen.add_router("ce5")
- tgen.add_router("ce6")
-
- tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "eth0", "eth0")
- tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "eth0", "eth1")
- tgen.add_link(tgen.gears["ce2"], tgen.gears["r2"], "eth0", "eth1")
- tgen.add_link(tgen.gears["ce3"], tgen.gears["r1"], "eth0", "eth2")
- tgen.add_link(tgen.gears["ce4"], tgen.gears["r2"], "eth0", "eth2")
- tgen.add_link(tgen.gears["ce5"], tgen.gears["r1"], "eth0", "eth3")
- tgen.add_link(tgen.gears["ce6"], tgen.gears["r2"], "eth0", "eth3")
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+ tgen.add_router("ce1")
+ tgen.add_router("ce2")
+ tgen.add_router("ce3")
+ tgen.add_router("ce4")
+ tgen.add_router("ce5")
+ tgen.add_router("ce6")
+
+ tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "eth0", "eth0")
+ tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "eth0", "eth1")
+ tgen.add_link(tgen.gears["ce2"], tgen.gears["r2"], "eth0", "eth1")
+ tgen.add_link(tgen.gears["ce3"], tgen.gears["r1"], "eth0", "eth2")
+ tgen.add_link(tgen.gears["ce4"], tgen.gears["r2"], "eth0", "eth2")
+ tgen.add_link(tgen.gears["ce5"], tgen.gears["r1"], "eth0", "eth3")
+ tgen.add_link(tgen.gears["ce6"], tgen.gears["r2"], "eth0", "eth3")
def setup_module(mod):
if result is not True:
pytest.skip("Kernel requirements are not met")
- tgen = Topogen(Topology, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
- router.load_config(TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
tgen.gears["r1"].run("ip link add vrf10 type vrf table 10")
tgen.gears["r1"].run("ip link set vrf10 up")
return topotest.json_cmp(output, expected)
def check(name, cmd, expected_file):
- logger.info("[+] check {} \"{}\" {}".format(name, cmd, expected_file))
+ logger.info('[+] check {} "{}" {}'.format(name, cmd, expected_file))
tgen = get_topogen()
func = functools.partial(_check, name, cmd, expected_file)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
check("r1", "show bgp ipv6 vpn json", "r1/vpnv6_rib.json")
check("r2", "show bgp ipv6 vpn json", "r2/vpnv6_rib.json")
tgen = get_topogen()
func = functools.partial(_check, name, dest_addr, match)
success, result = topotest.run_and_expect(func, None, count=10, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
check("ce1", "2001:2::2", " 0% packet loss")
check("ce1", "2001:3::2", " 0% packet loss")
import os
import sys
import json
-import time
import pytest
from functools import partial
from time import sleep
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+def build_topo(tgen):
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
+CWD = os.path.dirname(os.path.realpath(__file__))
+
- for routern in range(1, 6):
- tgen.add_router("r{}".format(routern))
+def build_topo(tgen):
+ for routern in range(1, 6):
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r5"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r5"])
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import os
import sys
-import json
import time
import pytest
import platform
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
write_test_header,
check_address_types,
write_test_footer,
- reset_config_on_routers,
- verify_rib,
step,
create_route_maps,
- shutdown_bringup_interface,
create_static_routes,
create_prefix_lists,
create_bgp_community_lists,
create_interface_in_kernel,
check_router_status,
verify_cli_json,
- get_frr_ipv6_linklocal,
verify_fib_routes,
)
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp,
verify_bgp_community,
verify_bgp_rib,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/bgp_vrf_dynamic_route_leak_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
NETWORK1_1 = {"ipv4": "11.11.11.1/32", "ipv6": "11:11::1/128"}
LOOPBACK_1 = {
"ipv4": "10.0.0.7/24",
"ipv6": "fd00:0:0:1::7/64",
- "ipv4_mask": "255.255.255.0",
- "ipv6_mask": None,
}
LOOPBACK_2 = {
"ipv4": "10.0.0.16/24",
PREFERRED_NEXT_HOP = "global"
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/bgp_vrf_dynamic_route_leak_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
"""
- tc_name = request.node.name
logger.info("Remove prefer-global rmap applied on neighbors")
input_dict = {
"r1": {
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase :Failed \n Error: {}".format(result)
return True
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
write_test_header,
check_address_types,
write_test_footer,
- verify_rib,
step,
create_route_maps,
- create_static_routes,
- stop_router,
- start_router,
create_prefix_lists,
create_bgp_community_lists,
check_router_status,
PREFERRED_NEXT_HOP = "global"
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
+def build_topo(tgen):
+ """Build function"""
- # Building topology from json file
- build_topo_from_json(tgen, topo)
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
rmap_name="rmap_IMP_{}".format(addr_type),
input_dict=input_rmap,
)
- assert result is True, "Testcase : Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Change community-list to match a different value then " "100:100.")
import json
from functools import partial
import pytest
-import platform
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
from lib.common_config import required_linux_kernel_version
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BGPIPV6RTADVVRFTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # Create 2 routers.
+ tgen.add_router("r1")
+ tgen.add_router("r2")
- # Create 2 routers.
- tgen.add_router("r1")
- tgen.add_router("r2")
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
if result is not True:
pytest.skip("Kernel requirements are not met")
- tgen = Topogen(BGPIPV6RTADVVRFTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
+++ /dev/null
-#!/usr/bin/env python
-
-"""
-exa-receive.py: Save received routes form ExaBGP into file
-"""
-
-from sys import stdin, argv
-from datetime import datetime
-
-# 1st arg is peer number
-peer = int(argv[1])
-
-# When the parent dies we are seeing continual newlines, so we only access so many before stopping
-counter = 0
-
-routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
-
-while True:
- try:
- line = stdin.readline()
- timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
- routesavefile.write(timestamp + line)
- routesavefile.flush()
-
- if line == "":
- counter += 1
- if counter > 100:
- break
- continue
-
- counter = 0
- except KeyboardInterrupt:
- pass
- except IOError:
- # most likely a signal during readline
- pass
-
-routesavefile.close()
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
#####################################################
-class BGPVRFNETNSTopo1(Topo):
- "BGP EBGP VRF NETNS Topology 1"
+def build_topo(tgen):
+ tgen.add_router("r1")
- def build(self, **_opts):
- tgen = get_topogen(self)
+ # Setup Switches
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
- # Setup Routers
- tgen.add_router("r1")
-
- # Setup Switches
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
-
- # Add eBGP ExaBGP neighbors
- peer_ip = "10.0.1.101"
- peer_route = "via 10.0.1.1"
- peer = tgen.add_exabgp_peer("peer1", ip=peer_ip, defaultRoute=peer_route)
- switch = tgen.gears["s1"]
- switch.add_link(peer)
+ # Add eBGP ExaBGP neighbors
+ peer_ip = "10.0.1.101"
+ peer_route = "via 10.0.1.1"
+ peer = tgen.add_exabgp_peer("peer1", ip=peer_ip, defaultRoute=peer_route)
+ switch = tgen.gears["s1"]
+ switch.add_link(peer)
#####################################################
def setup_module(module):
- tgen = Topogen(BGPVRFNETNSTopo1, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
# Get r1 reference
# create VRF r1-bgp-cust1
# move r1-eth0 to VRF r1-bgp-cust1
- cmds = [
- "if [ -e /var/run/netns/{0}-bgp-cust1 ] ; then ip netns del {0}-bgp-cust1 ; fi",
- "ip netns add {0}-bgp-cust1",
- "ip link set {0}-eth0 netns {0}-bgp-cust1 up",
- ]
- for cmd in cmds:
- cmd = cmd.format("r1")
- logger.info("cmd: " + cmd)
- output = router.run(cmd.format("r1"))
- if output != None and len(output) > 0:
- logger.info(
- 'Aborting due to unexpected output: cmd="{}" output=\n{}'.format(
- cmd, output
- )
- )
- return pytest.skip(
- "Skipping BGP VRF NETNS Test. Unexpected output to command: " + cmd
- )
+
+ ns = "{}-bgp-cust1".format("r1")
+ router.net.add_netns(ns)
+ router.net.set_intf_netns("r1-eth0", ns, up=True)
+
# run daemons
router.load_config(
TopoRouter.RD_ZEBRA,
def teardown_module(module):
tgen = get_topogen()
- # move back r1-eth0 to default VRF
- # delete VRF r1-bgp-cust1
- cmds = [
- "ip netns exec {0}-bgp-cust1 ip link set {0}-eth0 netns 1",
- "ip netns delete {0}-bgp-cust1",
- ]
- for cmd in cmds:
- tgen.net["r1"].cmd(cmd.format("r1"))
+
+ # Move interfaces out of vrf namespace and delete the namespace
+ tgen.net["r1"].reset_intf_netns("r1-eth0")
+ tgen.net["r1"].delete_netns("r1-bgp-cust1")
tgen.stop_topology()
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, router, "show bgp vrf r1-bgp-cust1 summary json", expected
+ topotest.router_json_cmp,
+ router,
+ "show bgp vrf r1-bgp-cust1 summary json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=90, wait=0.5)
assertmsg = "BGP router network did not converge"
test_bgp-vrf-route-leak-basic.py.py: Test basic vrf route leaking
"""
-import json
import os
import sys
from functools import partial
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class BGPVRFTopo(Topo):
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- for routern in range(1, 2):
- tgen.add_router("r{}".format(routern))
+ for routern in range(1, 2):
+ tgen.add_router("r{}".format(routern))
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(BGPVRFTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
# For all registered routers, load the zebra configuration file
# pylint: disable=C0413
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.staticd]
-class TimingTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
- tgen.add_router("r1")
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
+
+def build_topo(tgen):
+ tgen.add_router("r1")
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
def setup_module(mod):
- tgen = Topogen(TimingTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in router_list.items():
router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)),
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, "{}/zebra.conf".format(rname)),
)
router.load_config(
TopoRouter.RD_STATIC, os.path.join(CWD, "{}/staticd.conf".format(rname))
tgen = get_topogen()
tgen.stop_topology()
+
def get_ip_networks(super_prefix, count):
count_log2 = math.log(count, 2)
if count_log2 != int(count_log2):
network = ipaddress.ip_network(super_prefix)
return tuple(network.subnets(count_log2))[0:count]
+
def test_static_timing():
tgen = get_topogen()
pytest.skip(tgen.errors)
def do_config(
- count, bad_indices, base_delta, d_multiplier, add=True, do_ipv6=False, super_prefix=None, en_dbg=False
+ count,
+ bad_indices,
+ base_delta,
+ d_multiplier,
+ add=True,
+ do_ipv6=False,
+ super_prefix=None,
+ en_dbg=False,
):
router_list = tgen.routers()
tot_delta = float(0)
optyped = "added" if add else "removed"
for rname, router in router_list.items():
- router.logger.info("{} {} static {} routes".format(
- optype, count, iptype)
- )
+ router.logger.info("{} {} static {} routes".format(optype, count, iptype))
# Generate config file.
config_file = os.path.join(
- router.logdir, rname, "{}-routes-{}.conf".format(
- iptype.lower(), optype
- )
+ router.logdir, rname, "{}-routes-{}.conf".format(iptype.lower(), optype)
)
with open(config_file, "w") as f:
for i, net in enumerate(get_ip_networks(super_prefix, count)):
# Number of static routes
prefix_count = 10000
- prefix_base = [[u"10.0.0.0/8", u"11.0.0.0/8"],
- [u"2100:1111:2220::/44", u"2100:3333:4440::/44"]]
+ prefix_base = [
+ [u"10.0.0.0/8", u"11.0.0.0/8"],
+ [u"2100:1111:2220::/44", u"2100:3333:4440::/44"],
+ ]
bad_indices = []
for ipv6 in [False, True]:
- base_delta = do_config(prefix_count, bad_indices, 0, 0, True, ipv6, prefix_base[ipv6][0])
+ base_delta = do_config(
+ prefix_count, bad_indices, 0, 0, True, ipv6, prefix_base[ipv6][0]
+ )
# Another set of same number of prefixes
- do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][1])
+ do_config(
+ prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][1]
+ )
# Duplicate config
- do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0])
+ do_config(
+ prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0]
+ )
# Remove 1/2 of duplicate
- do_config(prefix_count / 2, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0])
+ do_config(
+ prefix_count // 2,
+ bad_indices,
+ base_delta,
+ 2,
+ False,
+ ipv6,
+ prefix_base[ipv6][0],
+ )
# Add all back in so 1/2 replicate 1/2 new
- do_config(prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0])
+ do_config(
+ prefix_count, bad_indices, base_delta, 2, True, ipv6, prefix_base[ipv6][0]
+ )
# remove all
- delta = do_config(prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0])
- delta += do_config(prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][1])
+ delta = do_config(
+ prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][0]
+ )
+ delta += do_config(
+ prefix_count, bad_indices, base_delta, 2, False, ipv6, prefix_base[ipv6][1]
+ )
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
import os
import pdb
import re
-import pytest
+import subprocess
+import sys
+import time
-from lib.topogen import get_topogen, diagnose_env
-from lib.topotest import json_cmp_result
-from lib.topotest import g_extra_config as topotest_extra_config
+import pytest
+import lib.fixtures
+from lib import topolog
+from lib.micronet import Commander, proc_error
+from lib.micronet_cli import cli
+from lib.micronet_compat import Mininet, cleanup_current, cleanup_previous
+from lib.topogen import diagnose_env, get_topogen
from lib.topolog import logger
+from lib.topotest import g_extra_config as topotest_extra_config
+from lib.topotest import json_cmp_result
try:
from _pytest._code.code import ExceptionInfo
+
leak_check_ok = True
except ImportError:
leak_check_ok = False
help="Configure address sanitizer to abort process on error",
)
+ parser.addoption(
+ "--cli-on-error",
+ action="store_true",
+ help="Mininet cli on test failure",
+ )
+
parser.addoption(
"--gdb-breakpoints",
metavar="SYMBOL[,SYMBOL...]",
)
parser.addoption(
- "--mininet-on-error",
+ "--pause",
action="store_true",
- help="Mininet cli on test failure",
+ help="Pause after each test",
)
parser.addoption(
- "--pause-after",
+ "--pause-on-error",
action="store_true",
- help="Pause after each test",
+ help="Do not pause after (disables default when --shell or -vtysh given)",
)
+ parser.addoption(
+ "--no-pause-on-error",
+ dest="pause_on_error",
+ action="store_false",
+ help="Do not pause after (disables default when --shell or -vtysh given)",
+ )
+
+ rundir_help = "directory for running in and log files"
+ parser.addini("rundir", rundir_help, default="/tmp/topotests")
+ parser.addoption("--rundir", metavar="DIR", help=rundir_help)
+
parser.addoption(
"--shell",
metavar="ROUTER[,ROUTER...]",
latest = []
existing = []
if tgen is not None:
- logdir = "/tmp/topotests/{}".format(tgen.modname)
+ logdir = tgen.logdir
if hasattr(tgen, "valgrind_existing_files"):
existing = tgen.valgrind_existing_files
latest = glob.glob(os.path.join(logdir, "*.valgrind.*"))
vfcontent = vf.read()
match = re.search(r"ERROR SUMMARY: (\d+) errors", vfcontent)
if match and match.group(1) != "0":
- emsg = '{} in {}'.format(match.group(1), vfile)
+ emsg = "{} in {}".format(match.group(1), vfile)
leaks.append(emsg)
if leaks:
logger.error("Memleaks found:\n\t" + "\n\t".join(leaks))
+def pytest_runtest_logstart(nodeid, location):
+ # location is (filename, lineno, testname)
+ topolog.logstart(nodeid, location, topotest_extra_config["rundir"])
+
+
+def pytest_runtest_logfinish(nodeid, location):
+ # location is (filename, lineno, testname)
+ topolog.logfinish(nodeid, location)
+
+
def pytest_runtest_call():
"""
This function must be run after setup_module(), it does standarized post
tgen = get_topogen()
if tgen is not None:
# Allow user to play with the setup.
- tgen.mininet_cli()
+ tgen.cli()
pytest.exit("the topology executed successfully")
Assert that the environment is correctly configured, and get extra config.
"""
- if not diagnose_env():
- pytest.exit("environment has errors, please read the logs")
+ if "PYTEST_XDIST_WORKER" not in os.environ:
+ os.environ["PYTEST_XDIST_MODE"] = config.getoption("dist", "no")
+ os.environ["PYTEST_TOPOTEST_WORKER"] = ""
+ is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no"
+ is_worker = False
+ else:
+ os.environ["PYTEST_TOPOTEST_WORKER"] = os.environ["PYTEST_XDIST_WORKER"]
+ is_xdist = True
+ is_worker = True
+
+ # -----------------------------------------------------
+ # Set some defaults for the pytest.ini [pytest] section
+ # ---------------------------------------------------
+
+ rundir = config.getoption("--rundir")
+ if not rundir:
+ rundir = config.getini("rundir")
+ if not rundir:
+ rundir = "/tmp/topotests"
+ if not config.getoption("--junitxml"):
+ config.option.xmlpath = os.path.join(rundir, "topotests.xml")
+ xmlpath = config.option.xmlpath
+
+ # Save an existing topotest.xml
+ if os.path.exists(xmlpath):
+ fmtime = time.localtime(os.path.getmtime(xmlpath))
+ suffix = "-" + time.strftime("%Y%m%d%H%M%S", fmtime)
+ commander = Commander("pytest")
+ mv_path = commander.get_exec_path("mv")
+ commander.cmd_status([mv_path, xmlpath, xmlpath + suffix])
+
+ topotest_extra_config["rundir"] = rundir
+
+ # Set the log_file (exec) to inside the rundir if not specified
+ if not config.getoption("--log-file") and not config.getini("log_file"):
+ config.option.log_file = os.path.join(rundir, "exec.log")
+
+ # Turn on live logging if user specified verbose and the config has a CLI level set
+ if config.getoption("--verbose") and not is_xdist and not config.getini("log_cli"):
+ if config.getoption("--log-cli-level", None) is None:
+ # By setting the CLI option to the ini value it enables log_cli=1
+ cli_level = config.getini("log_cli_level")
+ if cli_level is not None:
+ config.option.log_cli_level = cli_level
+
+ have_tmux = bool(os.getenv("TMUX", ""))
+ have_screen = not have_tmux and bool(os.getenv("STY", ""))
+ have_xterm = not have_tmux and not have_screen and bool(os.getenv("DISPLAY", ""))
+ have_windows = have_tmux or have_screen or have_xterm
+ have_windows_pause = have_tmux or have_xterm
+ xdist_no_windows = is_xdist and not is_worker and not have_windows_pause
+
+ def assert_feature_windows(b, feature):
+ if b and xdist_no_windows:
+ pytest.exit(
+ "{} use requires byobu/TMUX/XTerm under dist {}".format(
+ feature, os.environ["PYTEST_XDIST_MODE"]
+ )
+ )
+ elif b and not is_xdist and not have_windows:
+ pytest.exit("{} use requires byobu/TMUX/SCREEN/XTerm".format(feature))
+
+ # ---------------------------------------
+ # Record our options in global dictionary
+ # ---------------------------------------
+
+ topotest_extra_config["rundir"] = rundir
asan_abort = config.getoption("--asan-abort")
topotest_extra_config["asan_abort"] = asan_abort
gdb_daemons = config.getoption("--gdb-daemons")
gdb_daemons = gdb_daemons.split(",") if gdb_daemons else []
topotest_extra_config["gdb_daemons"] = gdb_daemons
+ assert_feature_windows(gdb_routers or gdb_daemons, "GDB")
gdb_breakpoints = config.getoption("--gdb-breakpoints")
gdb_breakpoints = gdb_breakpoints.split(",") if gdb_breakpoints else []
topotest_extra_config["gdb_breakpoints"] = gdb_breakpoints
- mincli_on_error = config.getoption("--mininet-on-error")
- topotest_extra_config["mininet_on_error"] = mincli_on_error
+ cli_on_error = config.getoption("--cli-on-error")
+ topotest_extra_config["cli_on_error"] = cli_on_error
+ assert_feature_windows(cli_on_error, "--cli-on-error")
shell = config.getoption("--shell")
topotest_extra_config["shell"] = shell.split(",") if shell else []
+ assert_feature_windows(shell, "--shell")
strace = config.getoption("--strace-daemons")
topotest_extra_config["strace_daemons"] = strace.split(",") if strace else []
- pause_after = config.getoption("--pause-after")
-
shell_on_error = config.getoption("--shell-on-error")
topotest_extra_config["shell_on_error"] = shell_on_error
+ assert_feature_windows(shell_on_error, "--shell-on-error")
topotest_extra_config["valgrind_extra"] = config.getoption("--valgrind-extra")
topotest_extra_config["valgrind_memleaks"] = config.getoption("--valgrind-memleaks")
vtysh = config.getoption("--vtysh")
topotest_extra_config["vtysh"] = vtysh.split(",") if vtysh else []
+ assert_feature_windows(vtysh, "--vtysh")
vtysh_on_error = config.getoption("--vtysh-on-error")
topotest_extra_config["vtysh_on_error"] = vtysh_on_error
+ assert_feature_windows(vtysh_on_error, "--vtysh-on-error")
- topotest_extra_config["pause_after"] = pause_after or shell or vtysh
+ pause_on_error = vtysh or shell or config.getoption("--pause-on-error")
+ if config.getoption("--no-pause-on-error"):
+ pause_on_error = False
+
+ topotest_extra_config["pause_on_error"] = pause_on_error
+ assert_feature_windows(pause_on_error, "--pause-on-error")
+
+ pause = config.getoption("--pause")
+ topotest_extra_config["pause"] = pause
+ assert_feature_windows(pause, "--pause")
topotest_extra_config["topology_only"] = config.getoption("--topology-only")
+ # Check environment now that we have config
+ if not diagnose_env(rundir):
+ pytest.exit("environment has errors, please read the logs")
+
+
+@pytest.fixture(autouse=True, scope="session")
+def setup_session_auto():
+ if "PYTEST_TOPOTEST_WORKER" not in os.environ:
+ is_worker = False
+ elif not os.environ["PYTEST_TOPOTEST_WORKER"]:
+ is_worker = False
+ else:
+ is_worker = True
+
+ logger.debug("Before the run (is_worker: %s)", is_worker)
+ if not is_worker:
+ cleanup_previous()
+ yield
+ if not is_worker:
+ cleanup_current()
+ logger.debug("After the run (is_worker: %s)", is_worker)
+
+
+def pytest_runtest_setup(item):
+ module = item.parent.module
+ script_dir = os.path.abspath(os.path.dirname(module.__file__))
+ os.environ["PYTEST_TOPOTEST_SCRIPTDIR"] = script_dir
+
def pytest_runtest_makereport(item, call):
"Log all assert messages to default logger with error level"
# Nothing happened
if call.when == "call":
- pause = topotest_extra_config["pause_after"]
+ pause = topotest_extra_config["pause"]
else:
pause = False
except:
call.excinfo = ExceptionInfo()
+ title = "unset"
+
if call.excinfo is None:
error = False
else:
modname = parent.module.__name__
# Treat skips as non errors, don't pause after
- if call.excinfo.typename != "AssertionError":
+ if call.excinfo.typename == "Skipped":
pause = False
error = False
logger.info(
- 'assert skipped at "{}/{}": {}'.format(
+ 'test skipped at "{}/{}": {}'.format(
modname, item.name, call.excinfo.value
)
)
# Handle assert failures
parent._previousfailed = item # pylint: disable=W0212
logger.error(
- 'assert failed at "{}/{}": {}'.format(
+ 'test failed at "{}/{}": {}'.format(
modname, item.name, call.excinfo.value
)
)
+ title = "{}/{}".format(modname, item.name)
# We want to pause, if requested, on any error not just test cases
# (e.g., call.when == "setup")
if not pause:
- pause = topotest_extra_config["pause_after"]
+ pause = (
+ topotest_extra_config["pause_on_error"]
+ or topotest_extra_config["pause"]
+ )
# (topogen) Set topology error to avoid advancing in the test.
tgen = get_topogen()
# This will cause topogen to report error on `routers_have_failure`.
tgen.set_error("{}/{}".format(modname, item.name))
- if error and topotest_extra_config["shell_on_error"]:
- for router in tgen.routers():
- pause = True
- tgen.net[router].runInWindow(os.getenv("SHELL", "bash"))
+ commander = Commander("pytest")
+ isatty = sys.stdout.isatty()
+ error_cmd = None
if error and topotest_extra_config["vtysh_on_error"]:
- for router in tgen.routers():
+ error_cmd = commander.get_exec_path(["vtysh"])
+ elif error and topotest_extra_config["shell_on_error"]:
+ error_cmd = os.getenv("SHELL", commander.get_exec_path(["bash"]))
+
+ if error_cmd:
+ is_tmux = bool(os.getenv("TMUX", ""))
+ is_screen = not is_tmux and bool(os.getenv("STY", ""))
+ is_xterm = not is_tmux and not is_screen and bool(os.getenv("DISPLAY", ""))
+
+ channel = None
+ win_info = None
+ wait_for_channels = []
+ wait_for_procs = []
+ # Really would like something better than using this global here.
+ # Not all tests use topogen though so get_topogen() won't work.
+ for node in Mininet.g_mnet_inst.hosts.values():
pause = True
- tgen.net[router].runInWindow("vtysh")
- if error and topotest_extra_config["mininet_on_error"]:
- tgen.mininet_cli()
+ if is_tmux:
+ channel = (
+ "{}-{}".format(os.getpid(), Commander.tmux_wait_gen)
+ if not isatty
+ else None
+ )
+ Commander.tmux_wait_gen += 1
+ wait_for_channels.append(channel)
+
+ pane_info = node.run_in_window(
+ error_cmd,
+ new_window=win_info is None,
+ background=True,
+ title="{} ({})".format(title, node.name),
+ name=title,
+ tmux_target=win_info,
+ wait_for=channel,
+ )
+ if is_tmux:
+ if win_info is None:
+ win_info = pane_info
+ elif is_xterm:
+ assert isinstance(pane_info, subprocess.Popen)
+ wait_for_procs.append(pane_info)
+
+ # Now wait on any channels
+ for channel in wait_for_channels:
+ logger.debug("Waiting on TMUX channel %s", channel)
+ commander.cmd_raises([commander.get_exec_path("tmux"), "wait", channel])
+ for p in wait_for_procs:
+ logger.debug("Waiting on TMUX xterm process %s", p)
+ o, e = p.communicate()
+ if p.wait():
+ logger.warning("xterm proc failed: %s:", proc_error(p, o, e))
+
+ if error and topotest_extra_config["cli_on_error"]:
+ # Really would like something better than using this global here.
+ # Not all tests use topogen though so get_topogen() won't work.
+ if Mininet.g_mnet_inst:
+ cli(Mininet.g_mnet_inst, title=title, background=False)
+ else:
+ logger.error("Could not launch CLI b/c no mininet exists yet")
- if pause:
+ while pause and isatty:
try:
- user = raw_input('Testing paused, "pdb" to debug, "Enter" to continue: ')
+ user = raw_input(
+ 'PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: '
+ )
except NameError:
- user = input('Testing paused, "pdb" to debug, "Enter" to continue: ')
- if user.strip() == "pdb":
+ user = input('PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ')
+ user = user.strip()
+
+ if user == "cli":
+ cli(Mininet.g_mnet_inst)
+ elif user == "pdb":
pdb.set_trace()
+ elif user:
+ print('Unrecognized input: "%s"' % user)
+ else:
+ break
+
+
+#
+# Add common fixtures available to all tests as parameters
+#
+tgen = pytest.fixture(lib.fixtures.tgen)
+topo = pytest.fixture(lib.fixtures.topo)
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
#####################################################
##
#####################################################
-class NetworkTopo(Topo):
- "EIGRP Topology 1"
+def build_topo(tgen):
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- def build(self, **_opts):
- "Build function"
+ # On main router
+ # First switch is for a dummy interface (for local network)
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["r1"])
- tgen = get_topogen(self)
+ # Switches for EIGRP
+ # switch 2 switch is for connection to EIGRP router
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
+ # switch 4 is stub on remote EIGRP router
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["r3"])
- # On main router
- # First switch is for a dummy interface (for local network)
- switch = tgen.add_switch("sw1")
- switch.add_link(tgen.gears["r1"])
-
- # Switches for EIGRP
- # switch 2 switch is for connection to EIGRP router
- switch = tgen.add_switch("sw2")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- # switch 4 is stub on remote EIGRP router
- switch = tgen.add_switch("sw4")
- switch.add_link(tgen.gears["r3"])
-
- # switch 3 is between EIGRP routers
- switch = tgen.add_switch("sw3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ # switch 3 is between EIGRP routers
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
#####################################################
def setup_module(module):
"Setup topology"
- tgen = Topogen(NetworkTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
# This is a sample of configuration loading.
"""
import os
-import re
import sys
import pytest
import json
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.bgpd]
#####################################################
-class NetworkTopo(Topo):
- "evpn-pim Topology 1"
+def build_topo(tgen):
+ tgen.add_router("spine")
+ tgen.add_router("leaf1")
+ tgen.add_router("leaf2")
+ tgen.add_router("host1")
+ tgen.add_router("host2")
- def build(self, **_opts):
- "Build function"
+ # On main router
+ # First switch is for a dummy interface (for local network)
+ # spine-eth0 is connected to leaf1-eth0
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["spine"])
+ switch.add_link(tgen.gears["leaf1"])
- tgen = get_topogen(self)
+ # spine-eth1 is connected to leaf2-eth0
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["spine"])
+ switch.add_link(tgen.gears["leaf2"])
- tgen.add_router("spine")
- tgen.add_router("leaf1")
- tgen.add_router("leaf2")
- tgen.add_router("host1")
- tgen.add_router("host2")
+ # leaf1-eth1 is connected to host1-eth0
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["leaf1"])
+ switch.add_link(tgen.gears["host1"])
- # On main router
- # First switch is for a dummy interface (for local network)
- # spine-eth0 is connected to leaf1-eth0
- switch = tgen.add_switch("sw1")
- switch.add_link(tgen.gears["spine"])
- switch.add_link(tgen.gears["leaf1"])
-
- # spine-eth1 is connected to leaf2-eth0
- switch = tgen.add_switch("sw2")
- switch.add_link(tgen.gears["spine"])
- switch.add_link(tgen.gears["leaf2"])
-
- # leaf1-eth1 is connected to host1-eth0
- switch = tgen.add_switch("sw3")
- switch.add_link(tgen.gears["leaf1"])
- switch.add_link(tgen.gears["host1"])
-
- # leaf2-eth1 is connected to host2-eth0
- switch = tgen.add_switch("sw4")
- switch.add_link(tgen.gears["leaf2"])
- switch.add_link(tgen.gears["host2"])
+ # leaf2-eth1 is connected to host2-eth0
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["leaf2"])
+ switch.add_link(tgen.gears["host2"])
#####################################################
def setup_module(module):
"Setup topology"
- tgen = Topogen(NetworkTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
leaf1 = tgen.gears["leaf1"]
"""
import os
-import re
import sys
-import json
import time
import pytest
import platform
from copy import deepcopy
-from time import sleep
# Save the Current Working Directory to find configuration files.
# Import topogen and topotest helpers
from lib.topotest import version_cmp
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
reset_config_on_routers,
verify_rib,
step,
- start_router_daemons,
create_static_routes,
create_vrf_cfg,
- create_route_maps,
- create_interface_in_kernel,
check_router_status,
configure_vxlan,
configure_brctl,
- apply_raw_config,
verify_vrf_vni,
verify_cli_json,
)
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp,
- verify_best_path_as_per_bgp_attribute,
verify_attributes_for_evpn_routes,
- verify_evpn_routes,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/evpn_type5_chaos_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Reading the data from JSON File for topology creation
# Global variables
TCPDUMP_FILE = "evpn_log.txt"
-LOGDIR = "/tmp/topotests/"
NETWORK1_1 = {"ipv4": "10.1.1.1/32", "ipv6": "10::1/128"}
NETWORK1_2 = {"ipv4": "40.1.1.1/32", "ipv6": "40::1/128"}
NETWORK1_3 = {"ipv4": "40.1.1.2/32", "ipv6": "40::2/128"}
}
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/evpn_type5_chaos_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
}
result = configure_vxlan(tgen, vxlan_input)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(
- tc_name, result
- )
+ assert result is True, "Testcase :Failed \n Error: {}".format(result)
step("Configure bridge interface")
brctl_input = {
}
}
result = configure_brctl(tgen, topo, brctl_input)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(
- tc_name, result
- )
+ assert result is True, "Testcase :Failed \n Error: {}".format(result)
step("Configure default routes")
add_default_routes(tgen)
}
result = create_static_routes(tgen, default_routes)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase :Failed \n Error: {}".format(result)
def test_verify_overlay_index_p1(request):
}
result = verify_rib(tgen, addr_type, "d2", input_routes, expected=False)
- assert result is not True, "Testcase {} :Failed \n "
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present: {}".format(tc_name, result)
logger.info("Expected Behavior: {}".format(result))
step(
result = verify_attributes_for_evpn_routes(
tgen, topo, "d2", input_routes_1, rt="auto", rt_peer="e1", expected=False
)
- assert result is not True, "Testcase {} :Failed \n "
- "Malfaromed Auto-RT value accepted: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Malfaromed Auto-RT value accepted: {}".format(
+ tc_name, result
+ )
logger.info("Expected Behavior: {}".format(result))
step("Configure VNI number more than boundary limit (16777215)")
result = verify_attributes_for_evpn_routes(
tgen, topo, "d2", input_routes_1, rt="auto", rt_peer="e1", expected=False
)
- assert result is not True, "Testcase {} :Failed \n "
- "Malfaromed Auto-RT value accepted: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Malfaromed Auto-RT value accepted: {}".format(
+ tc_name, result
+ )
logger.info("Expected Behavior: {}".format(result))
step("Un-configure VNI number more than boundary limit (16777215)")
"""
import os
-import re
import sys
import json
import time
import pytest
import platform
from copy import deepcopy
-from time import sleep
# Save the Current Working Directory to find configuration files.
# Import topogen and topotest helpers
from lib.topotest import version_cmp
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
verify_rib,
step,
create_route_maps,
- verify_cli_json,
- start_router_daemons,
create_static_routes,
- stop_router,
- start_router,
create_vrf_cfg,
check_router_status,
apply_raw_config,
configure_vxlan,
configure_brctl,
- verify_vrf_vni,
create_interface_in_kernel,
)
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
- clear_bgp,
verify_best_path_as_per_bgp_attribute,
verify_attributes_for_evpn_routes,
verify_evpn_routes,
}
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
+def build_topo(tgen):
+ build_topo_from_json(tgen, topo)
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
}
result = configure_vxlan(tgen, vxlan_input)
- assert result is True, "Testcase {} on {} :Failed \n Error: {}".format(
- tc_name, dut, result
- )
+ assert result is True, "Testcase :Failed \n Error: {}".format(result)
step("Configure bridge interface")
brctl_input = {
}
}
result = configure_brctl(tgen, topo, brctl_input)
- assert result is True, "Testcase {} on {} :Failed \n Error: {}".format(
- tc_name, dut, result
- )
+ assert result is True, "Testcase :Failed \n Error: {}".format(result)
step("Configure default routes")
add_default_routes(tgen)
}
result = create_static_routes(tgen, default_routes)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+ assert result is True, "Testcase :Failed \n Error: {}".format(result)
def test_RD_verification_manual_and_auto_p0(request):
for addr_type in ADDR_TYPES:
input_routes = {key: topo["routers"][key] for key in ["r1"]}
result = verify_rib(tgen, addr_type, "d2", input_routes, expected=False)
- assert result is not True, "Testcase {} :Failed \n "
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase :Failed \n Routes are still present: {}".format(result)
logger.info("Expected Behavior: {}".format(result))
for addr_type in ADDR_TYPES:
input_routes = {key: topo["routers"][key] for key in ["r1"]}
result = verify_rib(tgen, addr_type, "r3", input_routes, expected=False)
- assert result is not True, "Testcase {} :Failed \n "
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present: {}".format(tc_name, result)
logger.info("Expected Behavior: {}".format(result))
step("Re-advertise IP prefixes from VFN(R1).")
}
result = verify_rib(tgen, addr_type, "d2", input_routes, expected=False)
- assert result is not True, "Testcase {} :Failed \n "
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present: {}".format(tc_name, result)
logger.info("Expected Behavior: {}".format(result))
result = verify_rib(tgen, addr_type, "r4", input_routes, expected=False)
- assert result is not True, "Testcase {} :Failed \n "
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present: {}".format(tc_name, result)
logger.info("Expected Behavior: {}".format(result))
step("Add vrf BLUE on router Edge-1 again.")
}
result = verify_rib(tgen, addr_type, "d2", input_routes, expected=False)
- assert result is not True, "Testcase {} :Failed \n "
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present: {}".format(tc_name, result)
logger.info("Expected Behavior: {}".format(result))
result = verify_rib(tgen, addr_type, "r4", input_routes, expected=False)
- assert result is not True, "Testcase {} :Failed \n "
- "Routes are still present: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} :Failed \n Routes are still present: {}".format(tc_name, result)
logger.info("Expected Behavior: {}".format(result))
step("Advertise IPv6 address-family in EVPN advertisements " "for VRF GREEN.")
input_dict_1 = {
"e1": {
"route_maps": {
- "rmap_d1".format(addr_type): [
+ "rmap_d1": [
{
"action": "permit",
"set": {
},
}
],
- "rmap_d2".format(addr_type): [
+ "rmap_d2": [
{
"action": "permit",
"set": {
input_dict_1 = {
"e1": {
"route_maps": {
- "rmap_d1".format(addr_type): [
- {"action": "permit", "set": {attribute: 120}}
- ],
- "rmap_d2".format(addr_type): [
- {"action": "permit", "set": {attribute: 150}}
- ],
+ "rmap_d1": [{"action": "permit", "set": {attribute: 120}}],
+ "rmap_d2": [{"action": "permit", "set": {attribute: 150}}],
}
}
}
--- /dev/null
+interface r1-eth0
+ ip address 192.168.1.1/24
+
+interface r1-eth1
+ ip address 192.168.2.1/24
+
+interface r1-eth2
+ ip address 192.168.3.1/24
\ No newline at end of file
--- /dev/null
+interface r2-eth0
+ ip address 192.168.1.2/24
+interface r2-eth1
+ ip address 192.168.3.2/24
assert True, "Some Text with explaination in case of failure"
+@pytest.mark.xfail
def test_ls_exits_zero():
"Tests for ls command on invalid file"
#!/usr/bin/env python
-
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
#
# <template>.py
# Part of NetDEF Topology Tests
<template>.py: Test <template>.
"""
-import os
import sys
import pytest
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import Topogen, TopoRouter
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
-
# TODO: select markers based on daemons used during test
# pytest module level markers
-"""
-pytestmark = pytest.mark.bfdd # single marker
pytestmark = [
- pytest.mark.bgpd,
- pytest.mark.ospfd,
- pytest.mark.ospf6d
-] # multiple markers
-"""
-
+ # pytest.mark.babeld,
+ # pytest.mark.bfdd,
+ # pytest.mark.bgpd,
+ # pytest.mark.eigrpd,
+ # pytest.mark.isisd,
+ # pytest.mark.ldpd,
+ # pytest.mark.nhrpd,
+ # pytest.mark.ospf6d,
+ pytest.mark.ospfd,
+ # pytest.mark.pathd,
+ # pytest.mark.pbrd,
+ # pytest.mark.pimd,
+ # pytest.mark.ripd,
+ # pytest.mark.ripngd,
+ # pytest.mark.sharpd,
+ # pytest.mark.staticd,
+ # pytest.mark.vrrpd,
+]
+
+# Function we pass to Topogen to create the topology
+def build_topo(tgen):
+ "Build function"
+
+ # Create 2 routers
+ r1 = tgen.add_router("r1")
+ r2 = tgen.add_router("r2")
+
+ # Create a p2p connection between r1 and r2
+ tgen.add_link(r1, r2)
+
+ # Create a switch with one router connected to it to simulate a empty network.
+ switch = tgen.add_switch("s1")
+ switch.add_link(r1)
+
+ # Create a p2p connection between r1 and r2
+ switch = tgen.add_switch("s2")
+ switch.add_link(r1)
+ switch.add_link(r2)
+
+
+# New form of setup/teardown using pytest fixture
+@pytest.fixture(scope="module")
+def tgen(request):
+ "Setup/Teardown the environment and provide tgen argument to tests"
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- # Example
- #
- # Create 2 routers
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- # Create a switch with just one router connected to it to simulate a
- # empty network.
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
-
- # Create a connection between r1 and r2
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
-
-def setup_module(mod):
- "Sets up the pytest environment"
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
- # ... and here it calls Mininet initialization functions.
+ tgen = Topogen(build_topo, request.module.__name__)
+
+ # A basic topology similar to the above could also have be more easily specified
+ # using a # dictionary, remove the build_topo function and use the following
+ # instead:
+ #
+ # topodef = {
+ # "s1": "r1"
+ # "s2": ("r1", "r2")
+ # }
+ # tgen = Topogen(topodef, request.module.__name__)
+
+ # ... and here it calls initialization functions.
tgen.start_topology()
# This is a sample of configuration loading.
router_list = tgen.routers()
- # For all registred routers, load the zebra configuration file
+ # For all routers arrange for:
+ # - starting zebra using config file from <rtrname>/zebra.conf
+ # - starting ospfd using an empty config file.
for rname, router in router_list.items():
- router.load_config(
- TopoRouter.RD_ZEBRA,
- # Uncomment next line to load configuration from ./router/zebra.conf
- # os.path.join(CWD, '{}/zebra.conf'.format(rname))
- )
+ router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ router.load_config(TopoRouter.RD_OSPF)
- # After loading the configurations, this function loads configured daemons.
+ # Start and configure the router daemons
tgen.start_router()
+ # Provide tgen as argument to each test function
+ yield tgen
-def teardown_module(mod):
- "Teardown the pytest environment"
- tgen = get_topogen()
-
- # This function tears down the whole topology.
+ # Teardown after last test runs
tgen.stop_topology()
-def test_call_mininet_cli():
- "Dummy test that just calls mininet CLI so we can interact with the build."
- tgen = get_topogen()
- # Don't run this test if we have any failure.
+# Fixture that executes before each test
+@pytest.fixture(autouse=True)
+def skip_on_failure(tgen):
if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
+ pytest.skip("skipped because of previous test failure")
- logger.info("calling mininet CLI")
- tgen.mininet_cli()
+
+# ===================
+# The tests functions
+# ===================
+
+
+def test_get_version(tgen):
+ "Test the logs the FRR version"
+
+ r1 = tgen.gears["r1"]
+ version = r1.vtysh_cmd("show version")
+ logger.info("FRR version is: " + version)
+
+
+def test_connectivity(tgen):
+ "Test the logs the FRR version"
+
+ r1 = tgen.gears["r1"]
+ r2 = tgen.gears["r2"]
+ output = r1.cmd_raises("ping -c1 192.168.1.2")
+ output = r2.cmd_raises("ping -c1 192.168.3.1")
+
+
+@pytest.mark.xfail
+def test_expect_failure(tgen):
+ "A test that is current expected to fail but should be fixed"
+
+ assert False, "Example of temporary expected failure that will eventually be fixed"
+
+
+@pytest.mark.skip
+def test_will_be_skipped(tgen):
+ "A test that will be skipped"
+ assert False
# Memory leak test template
-def test_memory_leak():
+def test_memory_leak(tgen):
"Run the memory leak test and report results."
- tgen = get_topogen()
+
if not tgen.is_memleak_enabled():
pytest.skip("Memory leak test/report is disabled")
--- /dev/null
+
+{
+ "address_types": ["ipv4","ipv6"],
+ "ipv4base":"10.0.0.0",
+ "ipv4mask":30,
+ "ipv6base":"fd00::",
+ "ipv6mask":64,
+ "link_ip_start":{"ipv4":"10.0.0.0", "v4mask":30, "ipv6":"fd00::", "v6mask":64},
+ "lo_prefix":{"ipv4":"1.0.", "v4mask":32, "ipv6":"2001:DB8:F::", "v6mask":128},
+ "routers":{
+ "r1":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r2": {
+ "dest_link": {
+ "r1": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r1": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r2":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r2": {}
+ }
+ },
+ "r3": {
+ "dest_link": {
+ "r2": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r3":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r1":{"ipv4":"auto", "ipv6":"auto"},
+ "r2":{"ipv4":"auto", "ipv6":"auto"},
+ "r4":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"100",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r2": {
+ "dest_link": {
+ "r3": {}
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "r4":{
+ "links":{
+ "lo": {"ipv4": "auto", "ipv6": "auto", "type": "loopback"},
+ "r3":{"ipv4":"auto", "ipv6":"auto"}
+ },
+ "bgp":{
+ "local_as":"200",
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r4": {}
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
--- /dev/null
+#!/usr/bin/env python3
+#
+# September 5 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+# Copyright (c) 2017 by
+# Network Device Education Foundation, Inc. ("NetDEF")
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+<template>.py: Test <template>.
+"""
+
+import pytest
+
+# Import topogen and topotest helpers
+from lib import bgp
+from lib import fixtures
+
+
+# TODO: select markers based on daemons used during test
+pytestmark = [
+ pytest.mark.bgpd,
+ # pytest.mark.ospfd,
+ # pytest.mark.ospf6d
+ # ...
+]
+
+# Use tgen_json fixture (invoked by use test arg of same name) to
+# setup/teardown standard JSON topotest
+tgen = pytest.fixture(fixtures.tgen_json, scope="module")
+
+
+# tgen is defined above
+# topo is a fixture defined in ../conftest.py
+def test_bgp_convergence(tgen, topo):
+ "Test for BGP convergence."
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ bgp_convergence = bgp.verify_bgp_convergence(tgen, topo)
+ assert bgp_convergence
+
+
+# Memory leak test template
+def test_memory_leak(tgen):
+ "Run the memory leak test and report results."
+
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+
+ tgen.report_memory_leaks()
import sys
import json
import time
-import inspect
import pytest
# Save the Current Working Directory to find configuration files.
from lib.topogen import Topogen, get_topogen
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
input_dict = {}
-class TemplateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- # This function only purpose is to create topology
- # as defined in input json file.
- #
- # Example
- #
- # Creating 2 routers having 2 links in between,
- # one is used to establised BGP neighborship
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having 2 links in between,
+ # one is used to establised BGP neighborship
- # Building topology from json file
- build_topo_from_json(tgen, topo)
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
def test_bgp_convergence(request):
- " Test BGP daemon convergence "
+ "Test BGP daemon convergence"
tgen = get_topogen()
global bgp_convergence
def test_static_routes(request):
- " Test to create and verify static routes. "
+ "Test to create and verify static routes."
tgen = get_topogen()
if bgp_convergence is not True:
from lib.topogen import Topogen, get_topogen
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
input_dict = {}
-class TemplateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- # This function only purpose is to create topology
- # as defined in input json file.
- #
- # Example
- #
- # Creating 2 routers having single links in between,
- # which is used to establised BGP neighborship
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having single links in between,
+ # which is used to establised BGP neighborship
- # Building topology from json file
- build_topo_from_json(tgen, topo)
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
def test_bgp_convergence(request):
- " Test BGP daemon convergence "
+ "Test BGP daemon convergence"
tgen = get_topogen()
global bgp_convergence
def test_static_routes(request):
- " Test to create and verify static routes. "
+ "Test to create and verify static routes."
tgen = get_topogen()
if bgp_convergence is not True:
import sys
import time
import json
-import inspect
import pytest
# Save the Current Working Directory to find configuration files.
from lib.topogen import Topogen, get_topogen
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
input_dict = {}
-class TemplateTopo(Topo):
- """
- Test topology builder
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- # This function only purpose is to create topology
- # as defined in input json file.
- #
- # Example
- #
- # Creating 2 routers having single links in between,
- # which is used to establised BGP neighborship
+ # This function only purpose is to create topology
+ # as defined in input json file.
+ #
+ # Example
+ #
+ # Creating 2 routers having single links in between,
+ # which is used to establised BGP neighborship
- # Building topology from json file
- build_topo_from_json(tgen, topo)
+ # Building topology from json file
+ build_topo_from_json(tgen, topo)
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
def test_bgp_convergence(request):
- " Test BGP daemon convergence "
+ "Test BGP daemon convergence"
tgen = get_topogen()
global bgp_convergence
def test_static_routes(request):
- " Test to create and verify static routes. "
+ "Test to create and verify static routes."
tgen = get_topogen()
if bgp_convergence is not True:
import sys
import pytest
import json
-import re
import tempfile
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
outputs = {}
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]:
- tgen.add_router(router)
-
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt3")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt2")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt4")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt1")
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt1")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt1")
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt7")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt2")
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt7")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt3")
- switch = tgen.add_switch("s9")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt7")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt4")
- switch = tgen.add_switch("s10")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt7")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt5")
- switch = tgen.add_switch("s11")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt7")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt6")
-
- #
- # Populate multi-dimensional dictionary containing all expected outputs
- #
- files = ["show_ipv6_route.ref", "show_yang_interface_isis_adjacencies.ref"]
- for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]:
- outputs[rname] = {}
- for step in range(1, 13 + 1):
- outputs[rname][step] = {}
- for file in files:
- if step == 1:
- # Get snapshots relative to the expected initial network convergence
- filename = "{}/{}/step{}/{}".format(CWD, rname, step, file)
- outputs[rname][step][file] = open(filename).read()
- else:
- if rname != "rt1":
- continue
- if file == "show_yang_interface_isis_adjacencies.ref":
- continue
-
- # Get diff relative to the previous step
- filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file)
-
- # Create temporary files in order to apply the diff
- f_in = tempfile.NamedTemporaryFile()
- f_in.write(outputs[rname][step - 1][file])
- f_in.flush()
- f_out = tempfile.NamedTemporaryFile()
- os.system(
- "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename)
- )
-
- # Store the updated snapshot and remove the temporary files
- outputs[rname][step][file] = open(f_out.name).read()
- f_in.close()
- f_out.close()
+def build_topo(tgen):
+ "Build function"
+
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]:
+ tgen.add_router(router)
+
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt3")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt2")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt7")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt2")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt7")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt3")
+ switch = tgen.add_switch("s9")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt7")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s10")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt7")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt5")
+ switch = tgen.add_switch("s11")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt7")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt6")
+
+ #
+ # Populate multi-dimensional dictionary containing all expected outputs
+ #
+ files = ["show_ipv6_route.ref", "show_yang_interface_isis_adjacencies.ref"]
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]:
+ outputs[rname] = {}
+ for step in range(1, 13 + 1):
+ outputs[rname][step] = {}
+ for file in files:
+ if step == 1:
+ # Get snapshots relative to the expected initial network convergence
+ filename = "{}/{}/step{}/{}".format(CWD, rname, step, file)
+ outputs[rname][step][file] = open(filename).read()
+ else:
+ if rname != "rt1":
+ continue
+ if file == "show_yang_interface_isis_adjacencies.ref":
+ continue
+
+ # Get diff relative to the previous step
+ filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file)
+
+ # Create temporary files in order to apply the diff
+ f_in = tempfile.NamedTemporaryFile(mode="w")
+ f_in.write(outputs[rname][step - 1][file])
+ f_in.flush()
+ f_out = tempfile.NamedTemporaryFile(mode="r")
+ os.system(
+ "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename)
+ )
+
+ # Store the updated snapshot and remove the temporary files
+ outputs[rname][step][file] = open(f_out.name).read()
+ f_in.close()
+ f_out.close()
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# For all registered routers, load the zebra configuration file
- for rname, router in router_list.iteritems():
+ for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
import sys
import pytest
import json
-import re
-import tempfile
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
outputs = {}
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
-
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import pytest
import json
-import re
import tempfile
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd, pytest.mark.ldpd]
outputs = {}
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7", "rt8"]:
- tgen.add_router(router)
-
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt7")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt5")
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt8")
- switch.add_link(tgen.gears["rt8"], nodeif="eth-rt6")
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt8")
- switch.add_link(tgen.gears["rt8"], nodeif="eth-rt7")
-
- #
- # Populate multi-dimensional dictionary containing all expected outputs
- #
- files = [
- "show_ip_route.ref",
- "show_ipv6_route.ref",
- "show_yang_interface_isis_adjacencies.ref",
- ]
- for rname in ["rt1"]:
- outputs[rname] = {}
- for step in range(1, 10 + 1):
- outputs[rname][step] = {}
- for file in files:
- if step == 1:
- # Get snapshots relative to the expected initial network convergence
- filename = "{}/{}/step{}/{}".format(CWD, rname, step, file)
- outputs[rname][step][file] = open(filename).read()
- else:
- if file == "show_yang_interface_isis_adjacencies.ref":
- continue
-
- # Get diff relative to the previous step
- filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file)
-
- # Create temporary files in order to apply the diff
- f_in = tempfile.NamedTemporaryFile()
- f_in.write(outputs[rname][step - 1][file])
- f_in.flush()
- f_out = tempfile.NamedTemporaryFile()
- os.system(
- "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename)
- )
-
- # Store the updated snapshot and remove the temporary files
- outputs[rname][step][file] = open(f_out.name).read()
- f_in.close()
- f_out.close()
+def build_topo(tgen):
+ "Build function"
+
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7", "rt8"]:
+ tgen.add_router(router)
+
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt7")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt5")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt8")
+ switch.add_link(tgen.gears["rt8"], nodeif="eth-rt6")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt8")
+ switch.add_link(tgen.gears["rt8"], nodeif="eth-rt7")
+
+ #
+ # Populate multi-dimensional dictionary containing all expected outputs
+ #
+ files = [
+ "show_ip_route.ref",
+ "show_ipv6_route.ref",
+ "show_yang_interface_isis_adjacencies.ref",
+ ]
+ for rname in ["rt1"]:
+ outputs[rname] = {}
+ for step in range(1, 10 + 1):
+ outputs[rname][step] = {}
+ for file in files:
+ if step == 1:
+ # Get snapshots relative to the expected initial network convergence
+ filename = "{}/{}/step{}/{}".format(CWD, rname, step, file)
+ outputs[rname][step][file] = open(filename).read()
+ else:
+ if file == "show_yang_interface_isis_adjacencies.ref":
+ continue
+
+ # Get diff relative to the previous step
+ filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file)
+
+ # Create temporary files in order to apply the diff
+ f_in = tempfile.NamedTemporaryFile(mode="w")
+ f_in.write(outputs[rname][step - 1][file])
+ f_in.flush()
+ f_out = tempfile.NamedTemporaryFile(mode="r")
+ os.system(
+ "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename)
+ )
+
+ # Store the updated snapshot and remove the temporary files
+ outputs[rname][step][file] = open(f_out.name).read()
+ f_in.close()
+ f_out.close()
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# For all registered routers, load the zebra configuration file
- for rname, router in router_list.iteritems():
+ for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
"""
import os
-import re
import sys
import pytest
import json
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.snmptest import SnmpTester
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd, pytest.mark.ldpd, pytest.mark.snmp]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["ce3", "r1", "r2", "r3", "r4", "r5"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["ce3", "r1", "r2", "r3", "r4", "r5"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r4"])
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r5"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r5"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["ce3"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["ce3"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r4"])
+ switch.add_link(tgen.gears["r5"])
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r4"])
- switch.add_link(tgen.gears["r5"])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
pytest.skip(error_msg)
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
router_list = tgen.routers()
# For all registered routers, load the zebra configuration file
- for rname, router in router_list.iteritems():
+ for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid("isisSysVersion", "one(1)")
assert r1_snmp.test_oid("isisSysMaxAge", "1200 seconds")
assert r1_snmp.test_oid("isisSysProtSupported", "07 5 6 7")
- r2 = tgen.net.get("r2")
+ r2 = tgen.gears["r2"]
r2_snmp = SnmpTester(r2, "2.2.2.2", "public", "2c")
assert r2_snmp.test_oid("isisSysVersion", "one(1)")
def test_r1_isisCircTable():
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1r = tgen.gears["r1"]
-
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
oids = []
def test_r1_isislevelCircTable():
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1r = tgen.gears["r1"]
-
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
oids = []
def test_r1_isisAdjTable():
"check ISIS Adjacency Table"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
- r1_cmd = tgen.gears["r1"]
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
oids = []
# shutdown interface and one adjacency should be removed
"check ISIS adjacency is removed when interface is shutdown"
- r1_cmd.vtysh_cmd("conf t\ninterface r1-eth1\nshutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth1\nshutdown")
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
for item in adjtable_down_test.keys():
), assertmsg
# no shutdown interface and adjacency should be restored
- r1_cmd.vtysh_cmd("conf t\ninterface r1-eth1\nno shutdown")
+ r1.vtysh_cmd("conf t\ninterface r1-eth1\nno shutdown")
# Memory leak test template
import sys
import pytest
import json
-import re
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.pathd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "dst"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "dst"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
-
- switch = tgen.add_switch("s9")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-dst")
- switch.add_link(tgen.gears["dst"], nodeif="eth-rt6")
+ switch = tgen.add_switch("s9")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-dst")
+ switch.add_link(tgen.gears["dst"], nodeif="eth-rt6")
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
frrdir = tgen.config.get(tgen.CONFIG_SECTION, "frrdir")
if not os.path.isfile(os.path.join(frrdir, "pathd")):
router_list = tgen.routers()
# For all registered routers, load the zebra configuration file
- for rname, router in router_list.iteritems():
+ for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
import pytest
import json
import re
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
-
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import pytest
import json
-import re
import tempfile
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
outputs = {}
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
- tgen.add_router(router)
-
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
-
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
-
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
-
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
-
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
-
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
-
- #
- # Populate multi-dimensional dictionary containing all expected outputs
- #
- files = [
- "show_ip_route.ref",
- "show_ipv6_route.ref",
- "show_mpls_table.ref",
- "show_yang_interface_isis_adjacencies.ref",
- ]
- for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
- outputs[rname] = {}
- for step in range(1, 9 + 1):
- outputs[rname][step] = {}
- for file in files:
- if step == 1:
- # Get snapshots relative to the expected initial network convergence
- filename = "{}/{}/step{}/{}".format(CWD, rname, step, file)
- outputs[rname][step][file] = open(filename).read()
- else:
- if file == "show_yang_interface_isis_adjacencies.ref":
- continue
-
- # Get diff relative to the previous step
- filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file)
-
- # Create temporary files in order to apply the diff
- f_in = tempfile.NamedTemporaryFile()
- f_in.write(outputs[rname][step - 1][file])
- f_in.flush()
- f_out = tempfile.NamedTemporaryFile()
- os.system(
- "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename)
- )
-
- # Store the updated snapshot and remove the temporary files
- outputs[rname][step][file] = open(f_out.name).read()
- f_in.close()
- f_out.close()
+def build_topo(tgen):
+ "Build function"
+
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ tgen.add_router(router)
+
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
+
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
+
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
+
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
+
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
+
+ #
+ # Populate multi-dimensional dictionary containing all expected outputs
+ #
+ files = [
+ "show_ip_route.ref",
+ "show_ipv6_route.ref",
+ "show_mpls_table.ref",
+ "show_yang_interface_isis_adjacencies.ref",
+ ]
+ for rname in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ outputs[rname] = {}
+ for step in range(1, 9 + 1):
+ outputs[rname][step] = {}
+ for file in files:
+ if step == 1:
+ # Get snapshots relative to the expected initial network convergence
+ filename = "{}/{}/step{}/{}".format(CWD, rname, step, file)
+ outputs[rname][step][file] = open(filename).read()
+ else:
+ if file == "show_yang_interface_isis_adjacencies.ref":
+ continue
+
+ # Get diff relative to the previous step
+ filename = "{}/{}/step{}/{}.diff".format(CWD, rname, step, file)
+
+ # Create temporary files in order to apply the diff
+ f_in = tempfile.NamedTemporaryFile(mode="w")
+ f_in.write(outputs[rname][step - 1][file])
+ f_in.flush()
+ f_out = tempfile.NamedTemporaryFile(mode="r")
+ os.system(
+ "patch -s -o %s %s %s" % (f_out.name, f_in.name, filename)
+ )
+
+ # Store the updated snapshot and remove the temporary files
+ outputs[rname][step][file] = open(f_out.name).read()
+ f_in.close()
+ f_out.close()
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
test_isis_topo1.py: Test ISIS topology.
"""
-import collections
import functools
import json
import os
import re
import sys
import pytest
-import time
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
]
-class ISISTopo1(Topo):
- "Simple two layer ISIS topology"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # Add ISIS routers:
+ # r1 r2
+ # | sw1 | sw2
+ # r3 r4
+ # | |
+ # sw3 sw4
+ # \ /
+ # r5
+ for routern in range(1, 6):
+ tgen.add_router("r{}".format(routern))
- # Add ISIS routers:
- # r1 r2
- # | sw1 | sw2
- # r3 r4
- # | |
- # sw3 sw4
- # \ /
- # r5
- for routern in range(1, 6):
- tgen.add_router("r{}".format(routern))
+ # r1 <- sw1 -> r3
+ sw = tgen.add_switch("sw1")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r3"])
- # r1 <- sw1 -> r3
- sw = tgen.add_switch("sw1")
- sw.add_link(tgen.gears["r1"])
- sw.add_link(tgen.gears["r3"])
+ # r2 <- sw2 -> r4
+ sw = tgen.add_switch("sw2")
+ sw.add_link(tgen.gears["r2"])
+ sw.add_link(tgen.gears["r4"])
- # r2 <- sw2 -> r4
- sw = tgen.add_switch("sw2")
- sw.add_link(tgen.gears["r2"])
- sw.add_link(tgen.gears["r4"])
+ # r3 <- sw3 -> r5
+ sw = tgen.add_switch("sw3")
+ sw.add_link(tgen.gears["r3"])
+ sw.add_link(tgen.gears["r5"])
- # r3 <- sw3 -> r5
- sw = tgen.add_switch("sw3")
- sw.add_link(tgen.gears["r3"])
- sw.add_link(tgen.gears["r5"])
-
- # r4 <- sw4 -> r5
- sw = tgen.add_switch("sw4")
- sw.add_link(tgen.gears["r4"])
- sw.add_link(tgen.gears["r5"])
+ # r4 <- sw4 -> r5
+ sw = tgen.add_switch("sw4")
+ sw.add_link(tgen.gears["r4"])
+ sw.add_link(tgen.gears["r5"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(ISISTopo1, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
# For all registered routers, load the zebra configuration file
https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
"""
for k, v in merge_dct.items():
- if (
- k in dct
- and isinstance(dct[k], dict)
- and isinstance(merge_dct[k], collections.Mapping)
- ):
+ if k in dct and isinstance(dct[k], dict) and topotest.is_mapping(merge_dct[k]):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
test_isis_topo1_vrf.py: Test ISIS vrf topology.
"""
-import collections
import functools
import json
import os
import re
import sys
import pytest
-import platform
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
from lib.topotest import iproute2_is_vrf_capable
from lib.common_config import required_linux_kernel_version
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd]
]
-class ISISTopo1(Topo):
- "Simple two layer ISIS vrf topology"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # Add ISIS routers:
+ # r1 r2
+ # | sw1 | sw2
+ # r3 r4
+ # | |
+ # sw3 sw4
+ # \ /
+ # r5
+ for routern in range(1, 6):
+ tgen.add_router("r{}".format(routern))
- # Add ISIS routers:
- # r1 r2
- # | sw1 | sw2
- # r3 r4
- # | |
- # sw3 sw4
- # \ /
- # r5
- for routern in range(1, 6):
- tgen.add_router("r{}".format(routern))
+ # r1 <- sw1 -> r3
+ sw = tgen.add_switch("sw1")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r3"])
- # r1 <- sw1 -> r3
- sw = tgen.add_switch("sw1")
- sw.add_link(tgen.gears["r1"])
- sw.add_link(tgen.gears["r3"])
+ # r2 <- sw2 -> r4
+ sw = tgen.add_switch("sw2")
+ sw.add_link(tgen.gears["r2"])
+ sw.add_link(tgen.gears["r4"])
- # r2 <- sw2 -> r4
- sw = tgen.add_switch("sw2")
- sw.add_link(tgen.gears["r2"])
- sw.add_link(tgen.gears["r4"])
+ # r3 <- sw3 -> r5
+ sw = tgen.add_switch("sw3")
+ sw.add_link(tgen.gears["r3"])
+ sw.add_link(tgen.gears["r5"])
- # r3 <- sw3 -> r5
- sw = tgen.add_switch("sw3")
- sw.add_link(tgen.gears["r3"])
- sw.add_link(tgen.gears["r5"])
-
- # r4 <- sw4 -> r5
- sw = tgen.add_switch("sw4")
- sw.add_link(tgen.gears["r4"])
- sw.add_link(tgen.gears["r5"])
+ # r4 <- sw4 -> r5
+ sw = tgen.add_switch("sw4")
+ sw.add_link(tgen.gears["r4"])
+ sw.add_link(tgen.gears["r5"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(ISISTopo1, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
logger.info("Testing with VRF Lite support")
https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
"""
for k, v in merge_dct.items():
- if (
- k in dct
- and isinstance(dct[k], dict)
- and isinstance(merge_dct[k], collections.Mapping)
- ):
+ if k in dct and isinstance(dct[k], dict) and topotest.is_mapping(merge_dct[k]):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["r1", "r2", "r3", "r4"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["r1", "r2", "r3", "r4"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s0")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- #
- # Define connections
- #
- switch = tgen.add_switch("s0")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import pytest
import json
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["r1", "r2", "r3", "r4"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["r1", "r2", "r3", "r4"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s0")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- #
- # Define connections
- #
- switch = tgen.add_switch("s0")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
\ No newline at end of file
"""
import os
-import re
import sys
import pytest
import json
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.snmptest import SnmpTester
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.isisd, pytest.mark.snmp]
-class TemplateTopo(Topo):
- "Test topology builder"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- #
- # Define FRR Routers
- #
- for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
- tgen.add_router(router)
+ #
+ # Define FRR Routers
+ #
+ for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
+ tgen.add_router(router)
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["ce1"])
- switch.add_link(tgen.gears["r1"])
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["ce1"])
+ switch.add_link(tgen.gears["r1"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["ce2"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["ce2"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["ce3"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["ce3"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
"Test mplsLdpLsrObjects objects"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid("mplsLdpLsrId", "01 01 01 01")
"Test mplsLdpEntityTable"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid_walk("mplsLdpEntityLdpId", ["1.1.1.1:0"])
"Test mplsLdpEntityStatsTable"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid_walk("mplsLdpEntityStatsSessionAttempts", ["0"])
"Test mplsLdpPeerTable"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid_walk("mplsLdpPeerLdpId", ["2.2.2.2:0", "3.3.3.3:0"])
"Test mplsLdpSessionTable"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid_walk(
"Test mplsLdpSessionStatsTable"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid_walk("mplsLdpSessionStatsUnknownMesTypeErrors", ["0", "0"])
"Test mplsLdpHelloAdjacencyTable"
tgen = get_topogen()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid_walk("mplsLdpHelloAdjacencyIndex", ["1", "2", "1"])
import sys
import pytest
import json
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.isisd, pytest.mark.ldpd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["ce1"])
+ switch.add_link(tgen.gears["r1"])
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["ce1"])
- switch.add_link(tgen.gears["r1"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["ce2"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["ce2"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["ce3"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["ce3"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
interface = {}
interface_name = None
- line = it.next()
+ line = next(it)
if line.startswith(rname + "-eth"):
interface_name = line
- line = it.next()
+ line = next(it)
if line.startswith(" LDP-IGP Synchronization enabled: "):
interface["ldpIgpSyncEnabled"] = line.endswith("yes")
- line = it.next()
+ line = next(it)
if line.startswith(" holddown timer in seconds: "):
interface["holdDownTimeInSec"] = int(line.split(": ")[-1])
- line = it.next()
+ line = next(it)
if line.startswith(" State: "):
interface["ldpIgpSyncState"] = line.split(": ")[-1]
while True:
try:
- line = it.next()
+ line = next(it)
area_match = re.match(r"Area (.+):", line)
if not area_match:
area_id = area_match.group(1)
area = {}
- line = it.next()
+ line = next(it)
while line.startswith(" Interface: "):
interface_name = re.split(":|,", line)[1].lstrip()
# Look for keyword: Level-1 or Level-2
while not line.startswith(" Level-"):
- line = it.next()
+ line = next(it)
while line.startswith(" Level-"):
level_name = line.split()[0]
level["level"] = level_name
- line = it.next()
+ line = next(it)
if line.startswith(" Metric:"):
level["metric"] = re.split(":|,", line)[1].lstrip()
while not line.startswith(" Level-") and not line.startswith(
" Interface: "
):
- line = it.next()
+ line = next(it)
if line.startswith(" Level-"):
continue
import sys
import pytest
import json
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["ce1"])
+ switch.add_link(tgen.gears["r1"])
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["ce1"])
- switch.add_link(tgen.gears["r1"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["ce2"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["ce2"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["ce3"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["ce3"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import pytest
from time import sleep
-from mininet.topo import Topo
-from mininet.net import Mininet
-from mininet.node import Node, OVSSwitch, Host
-from mininet.log import setLogLevel, info
-from mininet.cli import CLI
-from mininet.link import Intf
-
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
+from lib.topogen import Topogen, get_topogen
fatal_error = ""
#####################################################
-class NetworkTopo(Topo):
- "LDP Test Topology 1"
-
- def build(self, **_opts):
+def build_topo(tgen):
- # Setup Routers
- router = {}
- for i in range(1, 5):
- router[i] = topotest.addRouter(self, "r%s" % i)
-
- # Setup Switches, add Interfaces and Connections
- switch = {}
- # First switch
- switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch)
- self.addLink(
- switch[0],
- router[1],
- intfName2="r1-eth0",
- addr1="80:AA:00:00:00:00",
- addr2="00:11:00:01:00:00",
- )
- self.addLink(
- switch[0],
- router[2],
- intfName2="r2-eth0",
- addr1="80:AA:00:00:00:01",
- addr2="00:11:00:02:00:00",
- )
- # Second switch
- switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
- self.addLink(
- switch[1],
- router[2],
- intfName2="r2-eth1",
- addr1="80:AA:00:01:00:00",
- addr2="00:11:00:02:00:01",
- )
- self.addLink(
- switch[1],
- router[3],
- intfName2="r3-eth0",
- addr1="80:AA:00:01:00:01",
- addr2="00:11:00:03:00:00",
- )
- self.addLink(
- switch[1],
- router[4],
- intfName2="r4-eth0",
- addr1="80:AA:00:01:00:02",
- addr2="00:11:00:04:00:00",
- )
- # Third switch
- switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch)
- self.addLink(
- switch[2],
- router[2],
- intfName2="r2-eth2",
- addr1="80:AA:00:02:00:00",
- addr2="00:11:00:02:00:02",
- )
- self.addLink(
- switch[2],
- router[3],
- intfName2="r3-eth1",
- addr1="80:AA:00:02:00:01",
- addr2="00:11:00:03:00:01",
- )
+ # Setup Routers
+ for i in range(1, 5):
+ tgen.add_router("r%s" % i)
+
+ # First switch
+ switch = tgen.add_switch("sw0")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ # Second switch
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+ # Third switch
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
#####################################################
def setup_module(module):
- global topo, net
- global fatal_error
-
print("\n\n** %s: Setup Topology" % module.__name__)
print("******************************************\n")
- print("Cleanup old Mininet runs")
- os.system("sudo mn -c > /dev/null 2>&1")
-
thisDir = os.path.dirname(os.path.realpath(__file__))
- topo = NetworkTopo()
+ tgen = Topogen(build_topo, module.__name__)
+ tgen.start_topology()
- net = Mininet(controller=None, topo=topo)
- net.start()
+ net = tgen.net
# Starting Routers
for i in range(1, 5):
net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
net["r%s" % i].loadConf("ospfd", "%s/r%s/ospfd.conf" % (thisDir, i))
net["r%s" % i].loadConf("ldpd", "%s/r%s/ldpd.conf" % (thisDir, i))
- fatal_error = net["r%s" % i].startRouter()
-
- if fatal_error != "":
- break
+ tgen.gears["r%s" % i].start()
# For debugging after starting FRR daemons, uncomment the next line
# CLI(net)
def teardown_module(module):
- global net
-
print("\n\n** %s: Shutdown Topology" % module.__name__)
print("******************************************\n")
-
- # End - Shutdown network
- net.stop()
+ tgen = get_topogen()
+ tgen.stop_topology()
def test_router_running():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_mpls_interfaces():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_mpls_ldp_neighbor_establish():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
else:
# Bail out with error if a router fails to converge
fatal_error = "MPLS LDP neighbors did not establish"
- assert False, "MPLS LDP neighbors did not establish" % ospfStatus
+ assert False, "MPLS LDP neighbors did not establish"
print("MPLS LDP neighbors established.")
def test_mpls_ldp_discovery():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_mpls_ldp_neighbor():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_mpls_ldp_binding():
global fatal_error
- global net
+ net = get_topogen().net
# Skip this test for now until proper sorting of the output
# is implemented
def test_zebra_ipv4_routingTable():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_mpls_table():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_linux_mpls_routes():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_shutdown_check_stderr():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_shutdown_check_memleak():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
if __name__ == "__main__":
- setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
import sys
import pytest
import json
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ldpd, pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["ce1"])
+ switch.add_link(tgen.gears["r1"])
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["ce1"])
- switch.add_link(tgen.gears["r1"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["ce2"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["ce2"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["ce3"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["ce3"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# OF THIS SOFTWARE.
#
-from copy import deepcopy
-from time import sleep
-import traceback
-import ipaddr
import ipaddress
-import os
import sys
-from lib import topotest
-from lib.topolog import logger
-
-from lib.topogen import TopoRouter, get_topogen
-from lib.topotest import frr_unicode
+import traceback
+from copy import deepcopy
+from time import sleep
# Import common_config to use commomnly used APIs
from lib.common_config import (
- create_common_configuration,
+ create_common_configurations,
+ FRRCFG_FILE,
InvalidCLIError,
- load_config_to_router,
check_address_types,
- generate_ips,
- validate_ip_address,
find_interface_with_greater_ip,
- run_frr_cmd,
- FRRCFG_FILE,
+ generate_ips,
+ get_frr_ipv6_linklocal,
retry,
- get_ipv6_linklocal_address,
- get_frr_ipv6_linklocal
+ run_frr_cmd,
+ validate_ip_address,
)
+from lib.topogen import get_topogen
+from lib.topolog import logger
+from lib.topotest import frr_unicode
-LOGDIR = "/tmp/topotests/"
-TMPDIR = None
+from lib import topotest
-def create_router_bgp(tgen, topo, input_dict=None, build=False, load_config=True):
+def create_router_bgp(tgen, topo=None, input_dict=None, build=False, load_config=True):
"""
API to configure bgp on router
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
+ if topo is None:
+ topo = tgen.json_topo
+
# Flag is used when testing ipv6 over ipv4 or vice-versa
afi_test = False
topo = topo["routers"]
input_dict = deepcopy(input_dict)
+ config_data_dict = {}
+
for router in input_dict.keys():
if "bgp" not in input_dict[router]:
logger.debug("Router %s: 'bgp' not present in input_dict", router)
if type(bgp_data_list) is not list:
bgp_data_list = [bgp_data_list]
+ config_data = []
+
for bgp_data in bgp_data_list:
data_all_bgp = __create_bgp_global(tgen, bgp_data, router, build)
if data_all_bgp:
data_all_bgp = __create_l2vpn_evpn_address_family(
tgen, topo, bgp_data, router, config_data=data_all_bgp
)
+ if data_all_bgp:
+ config_data.extend(data_all_bgp)
- try:
- result = create_common_configuration(
- tgen, router, data_all_bgp, "bgp", build, load_config
- )
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
+ if config_data:
+ config_data_dict[router] = config_data
+
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, "bgp", build, load_config
+ )
+ except InvalidCLIError:
+ logger.error("create_router_bgp", exc_info=True)
+ result = False
logger.debug("Exiting lib API: create_router_bgp()")
return result
Returns
-------
- True or False
+ list of config commands
"""
result = False
logger.debug(
"Router %s: 'local_as' not present in input_dict" "for BGP", router
)
- return False
+ return config_data
local_as = bgp_data.setdefault("local_as", "")
cmd = "router bgp {}".format(local_as)
if router_id:
config_data.append("bgp router-id {}".format(router_id))
+ config_data.append("bgp log-neighbor-changes")
config_data.append("no bgp network import-check")
bgp_peer_grp_data = bgp_data.setdefault("peer-group", {})
tgen = get_topogen()
bgp_data = input_dict["address_family"]
neigh_data = bgp_data[addr_type]["unicast"]["neighbor"]
+ global_connect = input_dict.get("connecttimer", 5)
for name, peer_dict in neigh_data.items():
for dest_link, peer in peer_dict["dest_link"].items():
)
disable_connected = peer.setdefault("disable_connected_check", False)
+ connect = peer.get("connecttimer", global_connect)
keep_alive = peer.setdefault("keepalivetimer", 3)
hold_down = peer.setdefault("holddowntimer", 10)
password = peer.setdefault("password", None)
config_data.append(
"{} timers {} {}".format(neigh_cxt, keep_alive, hold_down)
)
+ if int(connect) != 120:
+ config_data.append("{} timers connect {}".format(neigh_cxt, connect))
+
if graceful_restart:
config_data.append("{} graceful-restart".format(neigh_cxt))
elif graceful_restart == False:
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
-
- global LOGDIR
-
result = create_router_bgp(
tgen, topo, input_dict, build=False, load_config=False
)
if router != dut:
continue
- TMPDIR = os.path.join(LOGDIR, tgen.modname)
-
logger.info("Delete BGP config when BGPd is down in {}".format(router))
- # Reading the config from /tmp/topotests and
- # copy to /etc/frr/bgpd.conf
+ # Reading the config from "rundir" and copy to /etc/frr/bgpd.conf
cmd = "cat {}/{}/{} >> /etc/frr/bgpd.conf".format(
- TMPDIR, router, FRRCFG_FILE
+ tgen.logdir, router, FRRCFG_FILE
)
router_list[router].run(cmd)
@retry(retry_timeout=150)
-def verify_bgp_convergence(tgen, topo, dut=None, expected=True):
+def verify_bgp_convergence(tgen, topo=None, dut=None, expected=True):
"""
API will verify if BGP is converged with in the given time frame.
Running "show bgp summary json" command and verify bgp neighbor
errormsg(str) or True
"""
+ if topo is None:
+ topo = tgen.json_topo
+
result = False
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
tgen = get_topogen()
for router, rnode in tgen.routers().items():
- if 'bgp' not in topo['routers'][router]:
+ if "bgp" not in topo["routers"][router]:
continue
if dut is not None and dut != router:
continue
logger.info("Verifying BGP Convergence on router %s:", router)
- show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp vrf all summary json", isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
data = topo["routers"][bgp_neighbor]["links"]
for dest_link in dest_link_dict.keys():
if dest_link in data:
- peer_details = \
- peer_data[_addr_type][dest_link]
+ peer_details = peer_data[_addr_type][dest_link]
- neighbor_ip = \
- data[dest_link][_addr_type].split(
- "/")[0]
+ neighbor_ip = data[dest_link][_addr_type].split("/")[0]
nh_state = None
- if "ipv4Unicast" in show_bgp_json[vrf] or \
- "ipv6Unicast" in show_bgp_json[vrf]:
- errormsg = ("[DUT: %s] VRF: %s, "
- "ipv4Unicast/ipv6Unicast"
- " address-family present"
- " under l2vpn" % (router,
- vrf))
+ if (
+ "ipv4Unicast" in show_bgp_json[vrf]
+ or "ipv6Unicast" in show_bgp_json[vrf]
+ ):
+ errormsg = (
+ "[DUT: %s] VRF: %s, "
+ "ipv4Unicast/ipv6Unicast"
+ " address-family present"
+ " under l2vpn" % (router, vrf)
+ )
return errormsg
- l2VpnEvpn_data = \
- show_bgp_json[vrf]["l2VpnEvpn"][
- "peers"]
- nh_state = \
- l2VpnEvpn_data[neighbor_ip]["state"]
+ l2VpnEvpn_data = show_bgp_json[vrf]["l2VpnEvpn"][
+ "peers"
+ ]
+ nh_state = l2VpnEvpn_data[neighbor_ip]["state"]
if nh_state == "Established":
no_of_evpn_peer += 1
if no_of_evpn_peer == total_evpn_peer:
- logger.info("[DUT: %s] VRF: %s, BGP is Converged for "
- "epvn peers", router, vrf)
+ logger.info(
+ "[DUT: %s] VRF: %s, BGP is Converged for " "epvn peers",
+ router,
+ vrf,
+ )
result = True
else:
- errormsg = ("[DUT: %s] VRF: %s, BGP is not converged "
- "for evpn peers" % (router, vrf))
+ errormsg = (
+ "[DUT: %s] VRF: %s, BGP is not converged "
+ "for evpn peers" % (router, vrf)
+ )
return errormsg
else:
total_peer = 0
if not check_address_types(addr_type):
continue
- bgp_neighbors = \
- bgp_addr_type[addr_type]["unicast"]["neighbor"]
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
for bgp_neighbor in bgp_neighbors:
- total_peer += \
- len(bgp_neighbors[bgp_neighbor]["dest_link"])
+ total_peer += len(bgp_neighbors[bgp_neighbor]["dest_link"])
no_of_peer = 0
for addr_type in bgp_addr_type.keys():
if not check_address_types(addr_type):
continue
- bgp_neighbors = \
- bgp_addr_type[addr_type]["unicast"]["neighbor"]
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
for bgp_neighbor, peer_data in bgp_neighbors.items():
- for dest_link in peer_data["dest_link"].\
- keys():
- data = \
- topo["routers"][bgp_neighbor]["links"]
- if dest_link in data:
- peer_details = \
- peer_data['dest_link'][dest_link]
- # for link local neighbors
- if "neighbor_type" in peer_details and \
- peer_details["neighbor_type"] == \
- 'link-local':
- intf = topo["routers"][bgp_neighbor][
- "links"][dest_link]["interface"]
- neighbor_ip = get_frr_ipv6_linklocal(
- tgen, bgp_neighbor, intf)
- elif "source_link" in peer_details:
- neighbor_ip = \
- topo["routers"][bgp_neighbor][
- "links"][peer_details[
- 'source_link']][
- addr_type].\
- split("/")[0]
- elif "neighbor_type" in peer_details and \
- peer_details["neighbor_type"] == \
- 'unnumbered':
- neighbor_ip = \
- data[dest_link]["peer-interface"]
- else:
- neighbor_ip = \
- data[dest_link][addr_type].split(
- "/")[0]
- nh_state = None
- neighbor_ip = neighbor_ip.lower()
- if addr_type == "ipv4":
- ipv4_data = show_bgp_json[vrf][
- "ipv4Unicast"]["peers"]
- nh_state = \
- ipv4_data[neighbor_ip]["state"]
- else:
- ipv6_data = show_bgp_json[vrf][
- "ipv6Unicast"]["peers"]
- if neighbor_ip in ipv6_data:
- nh_state = \
- ipv6_data[neighbor_ip]["state"]
+ for dest_link in peer_data["dest_link"].keys():
+ data = topo["routers"][bgp_neighbor]["links"]
+ if dest_link in data:
+ peer_details = peer_data["dest_link"][dest_link]
+ # for link local neighbors
+ if (
+ "neighbor_type" in peer_details
+ and peer_details["neighbor_type"] == "link-local"
+ ):
+ intf = topo["routers"][bgp_neighbor]["links"][
+ dest_link
+ ]["interface"]
+ neighbor_ip = get_frr_ipv6_linklocal(
+ tgen, bgp_neighbor, intf
+ )
+ elif "source_link" in peer_details:
+ neighbor_ip = topo["routers"][bgp_neighbor][
+ "links"
+ ][peer_details["source_link"]][addr_type].split(
+ "/"
+ )[
+ 0
+ ]
+ elif (
+ "neighbor_type" in peer_details
+ and peer_details["neighbor_type"] == "unnumbered"
+ ):
+ neighbor_ip = data[dest_link]["peer-interface"]
+ else:
+ neighbor_ip = data[dest_link][addr_type].split("/")[
+ 0
+ ]
+ nh_state = None
+ neighbor_ip = neighbor_ip.lower()
+ if addr_type == "ipv4":
+ ipv4_data = show_bgp_json[vrf]["ipv4Unicast"][
+ "peers"
+ ]
+ nh_state = ipv4_data[neighbor_ip]["state"]
+ else:
+ ipv6_data = show_bgp_json[vrf]["ipv6Unicast"][
+ "peers"
+ ]
+ if neighbor_ip in ipv6_data:
+ nh_state = ipv6_data[neighbor_ip]["state"]
- if nh_state == "Established":
- no_of_peer += 1
+ if nh_state == "Established":
+ no_of_peer += 1
if no_of_peer == total_peer and no_of_peer > 0:
- logger.info("[DUT: %s] VRF: %s, BGP is Converged",
- router, vrf)
+ logger.info("[DUT: %s] VRF: %s, BGP is Converged", router, vrf)
result = True
else:
- errormsg = ("[DUT: %s] VRF: %s, BGP is not converged"
- % (router, vrf))
+ errormsg = "[DUT: %s] VRF: %s, BGP is not converged" % (router, vrf)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@retry(retry_timeout=16)
def verify_bgp_community(
- tgen, addr_type, router, network, input_dict=None, vrf=None, bestpath=False, expected=True
+ tgen,
+ addr_type,
+ router,
+ network,
+ input_dict=None,
+ vrf=None,
+ bestpath=False,
+ expected=True,
):
"""
API to veiryf BGP large community is attached in route for any given
create_router_bgp(tgen, topo, router_dict)
logger.info("Applying modified bgp configuration")
- create_router_bgp(tgen, new_topo)
-
+ result = create_router_bgp(tgen, new_topo)
+ if result is not True:
+ result = "Error applying new AS number config"
except Exception as e:
errormsg = traceback.format_exc()
logger.error(errormsg)
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return True
+ return result
@retry(retry_timeout=8)
input_dict=None,
seq_id=None,
nexthop=None,
- expected=True
+ expected=True,
):
"""
API will verify BGP attributes set by Route-map for given prefix and
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- for router, rnode in tgen.routers().iteritems():
+ for router, rnode in tgen.routers().items():
if router != dut:
continue
@retry(retry_timeout=10, initial_wait=2)
def verify_bgp_rib(
- tgen, addr_type, dut, input_dict, next_hop=None, aspath=None, multi_nh=None, expected=True
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=None,
+ aspath=None,
+ multi_nh=None,
+ expected=True,
):
"""
This API is to verify whether bgp rib has any
@retry(retry_timeout=10)
-def verify_graceful_restart(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
+def verify_graceful_restart(
+ tgen, topo, addr_type, input_dict, dut, peer, expected=True
+):
"""
This API is to verify verify_graceful_restart configuration of DUT and
cross verify the same from the peer bgp routerrouter.
@retry(retry_timeout=8)
-def verify_gr_address_family(tgen, topo, addr_type, addr_family, dut, peer, expected=True):
+def verify_gr_address_family(
+ tgen, topo, addr_type, addr_family, dut, peer, expected=True
+):
"""
This API is to verify gr_address_family in the BGP gr capability advertised
by the neighbor router
show_bgp_graceful_json = run_frr_cmd(
rnode,
- "show bgp {} neighbor {} graceful-restart json".format(
- addr_type, neighbor_ip
- ),
+ "show bgp {} neighbor {} graceful-restart json".format(addr_type, neighbor_ip),
isjson=True,
)
ipLen=None,
rd_peer=None,
rt_peer=None,
- expected=True
+ expected=True,
):
"""
API to verify rd and rt value using "sh bgp l2vpn evpn 10.1.1.1"
# ribRequireUnicastRoutes('r1','ipv4','','Customer routes in default',want_unicast_routes)
#
-from lutil import luCommand, luResult, LUtil
+from lib.lutil import luCommand, luResult, LUtil
import json
import re
# OF THIS SOFTWARE.
#
-from collections import OrderedDict
-from datetime import datetime, timedelta
-from time import sleep
-from copy import deepcopy
-from functools import wraps
-from re import search as re_search
-from tempfile import mkdtemp
-
+import ipaddress
import json
import os
-import sys
-import traceback
+import platform
import socket
import subprocess
-import ipaddress
-import platform
+import sys
+import traceback
+from collections import OrderedDict
+from copy import deepcopy
+from datetime import datetime, timedelta
+from functools import wraps
+from re import search as re_search
+from time import sleep
try:
# Imports from python2
- from StringIO import StringIO
import ConfigParser as configparser
except ImportError:
# Imports from python3
- from io import StringIO
import configparser
-from lib.topolog import logger, logger_config
+from lib.micronet import comm_error
from lib.topogen import TopoRouter, get_topogen
-from lib.topotest import interface_set_status, version_cmp, frr_unicode
+from lib.topolog import get_logger, logger
+from lib.topotest import frr_unicode, interface_set_status, version_cmp
+from lib import topotest
FRRCFG_FILE = "frr_json.conf"
FRRCFG_BKUP_FILE = "frr_json_initial.conf"
ERROR_LIST = ["Malformed", "Failure", "Unknown", "Incomplete"]
-ROUTER_LIST = []
####
CD = os.path.dirname(os.path.realpath(__file__))
PYTESTINI_PATH = os.path.join(CD, "../pytest.ini")
-# Creating tmp dir with testsuite name to avoid conflict condition when
-# multiple testsuites run together. All temporary files would be created
-# in this dir and this dir would be removed once testsuite run is
-# completed
-LOGDIR = "/tmp/topotests/"
-TMPDIR = None
-
# NOTE: to save execution logs to log file frrtest_log_dir must be configured
# in `pytest.ini`.
config = configparser.ConfigParser()
],
}
+g_iperf_client_procs = {}
+g_iperf_server_procs = {}
+
+
def is_string(value):
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
+
if config.has_option("topogen", "verbosity"):
loglevel = config.get("topogen", "verbosity")
- loglevel = loglevel.upper()
+ loglevel = loglevel.lower()
else:
- loglevel = "INFO"
+ loglevel = "info"
if config.has_option("topogen", "frrtest_log_dir"):
frrtest_log_dir = config.get("topogen", "frrtest_log_dir")
frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
print("frrtest_log_file..", frrtest_log_file)
- logger = logger_config.get_logger(
- name="test_execution_logs", log_level=loglevel, target=frrtest_log_file
+ logger = get_logger(
+ "test_execution_logs", log_level=loglevel, target=frrtest_log_file
)
print("Logs will be sent to logfile: {}".format(frrtest_log_file))
class InvalidCLIError(Exception):
"""Raise when the CLI command is wrong"""
- pass
-
def run_frr_cmd(rnode, cmd, isjson=False):
"""
True or errormsg
"""
- result = True
+ rlist = []
+
for router_name in input_dict.keys():
config_cmd = input_dict[router_name]["raw_config"]
if not isinstance(config_cmd, list):
config_cmd = [config_cmd]
- frr_cfg_file = "{}/{}/{}".format(TMPDIR, router_name, FRRCFG_FILE)
+ frr_cfg_file = "{}/{}/{}".format(tgen.logdir, router_name, FRRCFG_FILE)
with open(frr_cfg_file, "w") as cfg:
for cmd in config_cmd:
cfg.write("{}\n".format(cmd))
- result = load_config_to_router(tgen, router_name)
+ rlist.append(router_name)
- return result
+ # Load config on all routers
+ return load_config_to_routers(tgen, rlist)
-def create_common_configuration(
- tgen, router, data, config_type=None, build=False, load_config=True
+def create_common_configurations(
+ tgen, config_dict, config_type=None, build=False, load_config=True
):
"""
API to create object of class FRRConfig and also create frr_json.conf
Parameters
----------
* `tgen`: tgen object
- * `data`: Configuration data saved in a list.
- * `router` : router id to be configured.
+ * `config_dict`: Configuration data saved in a dict of { router: config-list }
+ * `routers` : list of router id to be configured.
* `config_type` : Syntactic information while writing configuration. Should
be one of the value as mentioned in the config_map below.
* `build` : Only for initial setup phase this is set as True
-------
True or False
"""
- TMPDIR = os.path.join(LOGDIR, tgen.modname)
-
- fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE)
config_map = OrderedDict(
{
else:
mode = "w"
- try:
- frr_cfg_fd = open(fname, mode)
- if config_type:
- frr_cfg_fd.write(config_map[config_type])
- for line in data:
- frr_cfg_fd.write("{} \n".format(str(line)))
- frr_cfg_fd.write("\n")
-
- except IOError as err:
- logger.error(
- "Unable to open FRR Config File. error(%s): %s" % (err.errno, err.strerror)
- )
- return False
- finally:
- frr_cfg_fd.close()
+ routers = config_dict.keys()
+ for router in routers:
+ fname = "{}/{}/{}".format(tgen.logdir, router, FRRCFG_FILE)
+ try:
+ frr_cfg_fd = open(fname, mode)
+ if config_type:
+ frr_cfg_fd.write(config_map[config_type])
+ for line in config_dict[router]:
+ frr_cfg_fd.write("{} \n".format(str(line)))
+ frr_cfg_fd.write("\n")
+
+ except IOError as err:
+ logger.error("Unable to open FRR Config '%s': %s" % (fname, str(err)))
+ return False
+ finally:
+ frr_cfg_fd.close()
# If configuration applied from build, it will done at last
+ result = True
if not build and load_config:
- load_config_to_router(tgen, router)
+ result = load_config_to_routers(tgen, routers)
- return True
+ return result
+
+
+def create_common_configuration(
+ tgen, router, data, config_type=None, build=False, load_config=True
+):
+ """
+ API to create object of class FRRConfig and also create frr_json.conf
+ file. It will create interface and common configurations and save it to
+ frr_json.conf and load to router
+ Parameters
+ ----------
+ * `tgen`: tgen object
+ * `data`: Configuration data saved in a list.
+ * `router` : router id to be configured.
+ * `config_type` : Syntactic information while writing configuration. Should
+ be one of the value as mentioned in the config_map below.
+ * `build` : Only for initial setup phase this is set as True
+ Returns
+ -------
+ True or False
+ """
+ return create_common_configurations(
+ tgen, {router: data}, config_type, build, load_config
+ )
def kill_router_daemons(tgen, router, daemons, save_config=True):
return True
+def save_initial_config_on_routers(tgen):
+ """Save current configuration on routers to FRRCFG_BKUP_FILE.
+
+ FRRCFG_BKUP_FILE is the file that will be restored when `reset_config_on_routers()`
+ is called.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ """
+ router_list = tgen.routers()
+ target_cfg_fmt = tgen.logdir + "/{}/frr_json_initial.conf"
+
+ # Get all running configs in parallel
+ procs = {}
+ for rname in router_list:
+ logger.info("Fetching running config for router %s", rname)
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=open(target_cfg_fmt.format(rname), "w"),
+ stderr=subprocess.PIPE,
+ )
+ for rname, p in procs.items():
+ _, error = p.communicate()
+ if p.returncode:
+ logger.error(
+ "Get running config for %s failed %d: %s", rname, p.returncode, error
+ )
+ raise InvalidCLIError(
+ "vtysh show running error on {}: {}".format(rname, error)
+ )
+
+
def reset_config_on_routers(tgen, routerName=None):
"""
Resets configuration on routers to the snapshot created using input JSON
logger.debug("Entering API: reset_config_on_routers")
+ tgen.cfg_gen += 1
+ gen = tgen.cfg_gen
+
# Trim the router list if needed
router_list = tgen.routers()
if routerName:
- if ((routerName not in ROUTER_LIST) or (routerName not in router_list)):
- logger.debug("Exiting API: reset_config_on_routers: no routers")
+ if routerName not in router_list:
+ logger.warning(
+ "Exiting API: reset_config_on_routers: no router %s",
+ routerName,
+ exc_info=True,
+ )
return True
- router_list = { routerName: router_list[routerName] }
+ router_list = {routerName: router_list[routerName]}
- delta_fmt = TMPDIR + "/{}/delta.conf"
- init_cfg_fmt = TMPDIR + "/{}/frr_json_initial.conf"
- run_cfg_fmt = TMPDIR + "/{}/frr.sav"
+ delta_fmt = tgen.logdir + "/{}/delta-{}.conf"
+ # FRRCFG_BKUP_FILE
+ target_cfg_fmt = tgen.logdir + "/{}/frr_json_initial.conf"
+ run_cfg_fmt = tgen.logdir + "/{}/frr-{}.sav"
#
# Get all running configs in parallel
procs[rname] = router_list[rname].popen(
["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
stdin=None,
- stdout=open(run_cfg_fmt.format(rname), "w"),
+ stdout=open(run_cfg_fmt.format(rname, gen), "w"),
stderr=subprocess.PIPE,
)
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
- logger.error("Get running config for %s failed %d: %s", rname, p.returncode, error)
- raise InvalidCLIError("vtysh show running error on {}: {}".format(rname, error))
+ logger.error(
+ "Get running config for %s failed %d: %s", rname, p.returncode, error
+ )
+ raise InvalidCLIError(
+ "vtysh show running error on {}: {}".format(rname, error)
+ )
#
# Get all delta's in parallel
#
procs = {}
for rname in router_list:
- logger.info("Generating delta for router %s to new configuration", rname)
- procs[rname] = subprocess.Popen(
- [ "/usr/lib/frr/frr-reload.py",
- "--test-reset",
- "--input",
- run_cfg_fmt.format(rname),
- "--test",
- init_cfg_fmt.format(rname) ],
+ logger.info(
+ "Generating delta for router %s to new configuration (gen %d)", rname, gen
+ )
+ procs[rname] = tgen.net.popen(
+ [
+ "/usr/lib/frr/frr-reload.py",
+ "--test-reset",
+ "--input",
+ run_cfg_fmt.format(rname, gen),
+ "--test",
+ target_cfg_fmt.format(rname),
+ ],
stdin=None,
- stdout=open(delta_fmt.format(rname), "w"),
+ stdout=open(delta_fmt.format(rname, gen), "w"),
stderr=subprocess.PIPE,
)
for rname, p in procs.items():
_, error = p.communicate()
if p.returncode:
- logger.error("Delta file creation for %s failed %d: %s", rname, p.returncode, error)
+ logger.error(
+ "Delta file creation for %s failed %d: %s", rname, p.returncode, error
+ )
raise InvalidCLIError("frr-reload error for {}: {}".format(rname, error))
#
logger.info("Applying delta config on router %s", rname)
procs[rname] = router_list[rname].popen(
- ["/usr/bin/env", "vtysh", "-f", delta_fmt.format(rname)],
+ ["/usr/bin/env", "vtysh", "-f", delta_fmt.format(rname, gen)],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for rname, p in procs.items():
output, _ = p.communicate()
- vtysh_command = "vtysh -f {}".format(delta_fmt.format(rname))
+ vtysh_command = "vtysh -f {}".format(delta_fmt.format(rname, gen))
if not p.returncode:
router_list[rname].logger.info(
- '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output)
+ '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
)
else:
- router_list[rname].logger.error(
- '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(vtysh_command, output)
+ router_list[rname].logger.warning(
+ '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ logger.error(
+ "Delta file apply for %s failed %d: %s", rname, p.returncode, output
)
- logger.error("Delta file apply for %s failed %d: %s", rname, p.returncode, output)
# We really need to enable this failure; however, currently frr-reload.py
# producing invalid "no" commands as it just preprends "no", but some of the
output, _ = p.communicate()
if p.returncode:
logger.warning(
- "Get running config for %s failed %d: %s", rname, p.returncode, output
+ "Get running config for %s failed %d: %s",
+ rname,
+ p.returncode,
+ output,
)
else:
- logger.info("Configuration on router {} after reset:\n{}".format(rname, output))
+ logger.info(
+ "Configuration on router %s after reset:\n%s", rname, output
+ )
logger.debug("Exiting API: reset_config_on_routers")
return True
-def load_config_to_router(tgen, routerName, save_bkup=False):
+def prep_load_config_to_routers(tgen, *config_name_list):
+ """Create common config for `load_config_to_routers`.
+
+ The common config file is constructed from the list of sub-config files passed as
+ position arguments to this function. Each entry in `config_name_list` is looked for
+ under the router sub-directory in the test directory and those files are
+ concatenated together to create the common config. e.g.,
+
+ # Routers are "r1" and "r2", test file is `example/test_example_foo.py`
+ prepare_load_config_to_routers(tgen, "bgpd.conf", "ospfd.conf")
+
+ When the above call is made the files in
+
+ example/r1/bgpd.conf
+ example/r1/ospfd.conf
+
+ Are concat'd together into a single config file that will be loaded on r1, and
+
+ example/r2/bgpd.conf
+ example/r2/ospfd.conf
+
+ Are concat'd together into a single config file that will be loaded on r2 when
+ the call to `load_config_to_routers` is made.
"""
- Loads configuration on router from the file FRRCFG_FILE.
+
+ routers = tgen.routers()
+ for rname, router in routers.items():
+ destname = "{}/{}/{}".format(tgen.logdir, rname, FRRCFG_FILE)
+ wmode = "w"
+ for cfbase in config_name_list:
+ script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
+ confname = os.path.join(script_dir, "{}/{}".format(rname, cfbase))
+ with open(confname, "r") as cf:
+ with open(destname, wmode) as df:
+ df.write(cf.read())
+ wmode = "a"
+
+
+def load_config_to_routers(tgen, routers, save_bkup=False):
+ """
+ Loads configuration on routers from the file FRRCFG_FILE.
Parameters
----------
* `tgen` : Topogen object
- * `routerName` : router for which configuration to be loaded
+ * `routers` : routers for which configuration is to be loaded
* `save_bkup` : If True, Saves snapshot of FRRCFG_FILE to FRRCFG_BKUP_FILE
+ Returns
+ -------
+ True or False
"""
- logger.debug("Entering API: load_config_to_router")
+ logger.debug("Entering API: load_config_to_routers")
- router_list = tgen.routers()
- for rname in ROUTER_LIST:
- if routerName and rname != routerName:
+ tgen.cfg_gen += 1
+ gen = tgen.cfg_gen
+
+ base_router_list = tgen.routers()
+ router_list = {}
+ for router in routers:
+ if router not in base_router_list:
continue
+ router_list[router] = base_router_list[router]
+
+ frr_cfg_file_fmt = tgen.logdir + "/{}/" + FRRCFG_FILE
+ frr_cfg_save_file_fmt = tgen.logdir + "/{}/{}-" + FRRCFG_FILE
+ frr_cfg_bkup_fmt = tgen.logdir + "/{}/" + FRRCFG_BKUP_FILE
+ procs = {}
+ for rname in router_list:
router = router_list[rname]
try:
- frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
- frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_BKUP_FILE)
+ frr_cfg_file = frr_cfg_file_fmt.format(rname)
+ frr_cfg_save_file = frr_cfg_save_file_fmt.format(rname, gen)
+ frr_cfg_bkup = frr_cfg_bkup_fmt.format(rname)
with open(frr_cfg_file, "r+") as cfg:
data = cfg.read()
logger.info(
- "Applying following configuration on router"
- " {}:\n{}".format(rname, data)
+ "Applying following configuration on router %s (gen: %d):\n%s",
+ rname,
+ gen,
+ data,
)
+ # Always save a copy of what we just did
+ with open(frr_cfg_save_file, "w") as bkup:
+ bkup.write(data)
if save_bkup:
with open(frr_cfg_bkup, "w") as bkup:
bkup.write(data)
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-f", frr_cfg_file],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+ except IOError as err:
+ logger.error(
+ "Unable to open config File. error(%s): %s", err.errno, err.strerror
+ )
+ return False
+ except Exception as error:
+ logger.error("Unable to apply config on %s: %s", rname, str(error))
+ return False
- output = router.vtysh_multicmd(data, pretty_output=False)
- for out_err in ERROR_LIST:
- if out_err.lower() in output.lower():
- raise InvalidCLIError("%s" % output)
+ errors = []
+ for rname, p in procs.items():
+ output, _ = p.communicate()
+ frr_cfg_file = frr_cfg_file_fmt.format(rname)
+ vtysh_command = "vtysh -f " + frr_cfg_file
+ if not p.returncode:
+ router_list[rname].logger.info(
+ '\nvtysh config apply => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ else:
+ router_list[rname].logger.error(
+ '\nvtysh config apply failed => "{}"\nvtysh output <= "{}"'.format(
+ vtysh_command, output
+ )
+ )
+ logger.error(
+ "Config apply for %s failed %d: %s", rname, p.returncode, output
+ )
+ # We can't thorw an exception here as we won't clear the config file.
+ errors.append(
+ InvalidCLIError(
+ "load_config_to_routers error for {}: {}".format(rname, output)
+ )
+ )
- cfg.truncate(0)
+ # Empty the config file or we append to it next time through.
+ with open(frr_cfg_file, "r+") as cfg:
+ cfg.truncate(0)
- except IOError as err:
- errormsg = (
- "Unable to open config File. error(%s):" " %s",
- (err.errno, err.strerror),
+ # Router current configuration to log file or console if
+ # "show_router_config" is defined in "pytest.ini"
+ if show_router_config:
+ procs = {}
+ for rname in router_list:
+ procs[rname] = router_list[rname].popen(
+ ["/usr/bin/env", "vtysh", "-c", "show running-config no-header"],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
)
- return errormsg
+ for rname, p in procs.items():
+ output, _ = p.communicate()
+ if p.returncode:
+ logger.warning(
+ "Get running config for %s failed %d: %s",
+ rname,
+ p.returncode,
+ output,
+ )
+ else:
+ logger.info("New configuration for router %s:\n%s", rname, output)
- # Router current configuration to log file or console if
- # "show_router_config" is defined in "pytest.ini"
- if show_router_config:
- logger.info("New configuration for router {}:".format(rname))
- new_config = router.run("vtysh -c 'show running'")
- logger.info(new_config)
+ logger.debug("Exiting API: load_config_to_routers")
+ return not errors
- logger.debug("Exiting API: load_config_to_router")
- return True
+
+def load_config_to_router(tgen, routerName, save_bkup=False):
+ """
+ Loads configuration on router from the file FRRCFG_FILE.
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `routerName` : router for which configuration to be loaded
+ * `save_bkup` : If True, Saves snapshot of FRRCFG_FILE to FRRCFG_BKUP_FILE
+ """
+ return load_config_to_routers(tgen, [routerName], save_bkup)
+def reset_with_new_configs(tgen, *cflist):
+ """Reset the router to initial config, then load new configs.
+
+ Resets routers to the initial config state (see `save_initial_config_on_routers()
+ and `reset_config_on_routers()` `), then concat list of router sub-configs together
+ and load onto the routers (see `prep_load_config_to_routers()` and
+ `load_config_to_routers()`)
+ """
+ routers = tgen.routers()
+
+ reset_config_on_routers(tgen)
+ prep_load_config_to_routers(tgen, *cflist)
+ load_config_to_routers(tgen, tgen.routers(), save_bkup=False)
+
def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
"""
else:
cmd = "show interface"
for chk_ll in range(0, 60):
- sleep(1/4)
+ sleep(1 / 4)
ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
# Fix newlines (make them all the same)
- ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
+ ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
interface = None
ll_per_if_count = 0
for line in ifaces:
# Interface name
- m = re_search('Interface ([a-zA-Z0-9-]+) is', line)
+ m = re_search("Interface ([a-zA-Z0-9-]+) is", line)
if m:
interface = m.group(1).split(" ")[0]
ll_per_if_count = 0
# Interface ip
- m1 = re_search('inet6 (fe80[:a-fA-F0-9]+[\/0-9]+)',
- line)
+ m1 = re_search("inet6 (fe80[:a-fA-F0-9]+/[0-9]+)", line)
if m1:
local = m1.group(1)
ll_per_if_count += 1
if ll_per_if_count > 1:
- linklocal += [["%s-%s" %
- (interface, ll_per_if_count), local]]
+ linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
else:
linklocal += [[interface, local]]
try:
if linklocal:
if intf:
- return [_linklocal[1] for _linklocal in linklocal if _linklocal[0]==intf][0].\
- split("/")[0]
+ return [
+ _linklocal[1]
+ for _linklocal in linklocal
+ if _linklocal[0] == intf
+ ][0].split("/")[0]
return linklocal
except IndexError:
continue
tgen = get_topogen()
router_list = tgen.routers()
- test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
-
- TMPDIR = os.path.join(LOGDIR, tgen.modname)
+ test_name = os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0]
bundle_procs = {}
for rname, rnode in router_list.items():
logger.info("Spawn collection of support bundle for %s", rname)
- rnode.run("mkdir -p /var/log/frr")
- bundle_procs[rname] = tgen.net[rname].popen(
+ dst_bundle = "{}/{}/support_bundles/{}".format(tgen.logdir, rname, test_name)
+ rnode.run("mkdir -p " + dst_bundle)
+
+ gen_sup_cmd = [
"/usr/lib/frr/generate_support_bundle.py",
- stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
+ "--log-dir=" + dst_bundle,
+ ]
+ bundle_procs[rname] = tgen.net[rname].popen(gen_sup_cmd, stdin=None)
for rname, rnode in router_list.items():
- dst_bundle = "{}/{}/support_bundles/{}".format(TMPDIR, rname, test_name)
- src_bundle = "/var/log/frr"
-
+ logger.info("Waiting on support bundle for %s", rname)
output, error = bundle_procs[rname].communicate()
-
- logger.info("Saving support bundle for %s", rname)
if output:
logger.info(
"Output from collecting support bundle for %s:\n%s", rname, output
logger.warning(
"Error from collecting support bundle for %s:\n%s", rname, error
)
- rnode.run("rm -rf {}".format(dst_bundle))
- rnode.run("mkdir -p {}".format(dst_bundle))
- rnode.run("mv -f {}/* {}".format(src_bundle, dst_bundle))
return True
* `tgen` : topogen object
"""
- global TMPDIR, ROUTER_LIST
# Starting topology
tgen.start_topology()
# Starting daemons
router_list = tgen.routers()
- ROUTER_LIST = sorted(
+ routers_sorted = sorted(
router_list.keys(), key=lambda x: int(re_search("[0-9]+", x).group(0))
)
- TMPDIR = os.path.join(LOGDIR, tgen.modname)
linux_ver = ""
router_list = tgen.routers()
- for rname in ROUTER_LIST:
+ for rname in routers_sorted:
router = router_list[rname]
# It will help in debugging the failures, will give more details on which
logger.info("Logging platform related details: \n %s \n", linux_ver)
try:
- os.chdir(TMPDIR)
-
- # Creating router named dir and empty zebra.conf bgpd.conf files
- # inside the current directory
- if os.path.isdir("{}".format(rname)):
- os.system("rm -rf {}".format(rname))
- os.mkdir("{}".format(rname))
- os.system("chmod -R go+rw {}".format(rname))
- os.chdir("{}/{}".format(TMPDIR, rname))
- os.system("touch zebra.conf bgpd.conf")
- else:
- os.mkdir("{}".format(rname))
- os.system("chmod -R go+rw {}".format(rname))
- os.chdir("{}/{}".format(TMPDIR, rname))
- os.system("touch zebra.conf bgpd.conf")
+ os.chdir(tgen.logdir)
+
+ # # Creating router named dir and empty zebra.conf bgpd.conf files
+ # # inside the current directory
+ # if os.path.isdir("{}".format(rname)):
+ # os.system("rm -rf {}".format(rname))
+ # os.mkdir("{}".format(rname))
+ # os.system("chmod -R go+rw {}".format(rname))
+ # os.chdir("{}/{}".format(tgen.logdir, rname))
+ # os.system("touch zebra.conf bgpd.conf")
+ # else:
+ # os.mkdir("{}".format(rname))
+ # os.system("chmod -R go+rw {}".format(rname))
+ # os.chdir("{}/{}".format(tgen.logdir, rname))
+ # os.system("touch zebra.conf bgpd.conf")
except IOError as err:
logger.error("I/O error({0}): {1}".format(err.errno, err.strerror))
# Loading empty zebra.conf file to router, to start the zebra daemon
router.load_config(
- TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname)
+ TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(tgen.logdir, rname)
)
# Loading empty bgpd.conf file to router, to start the bgp daemon
- router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname))
+ router.load_config(
+ TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(tgen.logdir, rname)
+ )
if daemon and "ospfd" in daemon:
# Loading empty ospf.conf file to router, to start the bgp daemon
router.load_config(
- TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(TMPDIR, rname)
+ TopoRouter.RD_OSPF, "{}/{}/ospfd.conf".format(tgen.logdir, rname)
)
if daemon and "ospf6d" in daemon:
# Loading empty ospf.conf file to router, to start the bgp daemon
router.load_config(
- TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(TMPDIR, rname)
+ TopoRouter.RD_OSPF6, "{}/{}/ospf6d.conf".format(tgen.logdir, rname)
)
if daemon and "pimd" in daemon:
# Loading empty pimd.conf file to router, to start the pim deamon
router.load_config(
- TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(TMPDIR, rname)
+ TopoRouter.RD_PIM, "{}/{}/pimd.conf".format(tgen.logdir, rname)
)
# Starting routers
return ord(routerName[0]) - 97
-def topo_daemons(tgen, topo):
+def topo_daemons(tgen, topo=None):
"""
Returns daemon list required for the suite based on topojson.
"""
daemon_list = []
+ if topo is None:
+ topo = tgen.json_topo
+
router_list = tgen.routers()
- ROUTER_LIST = sorted(
+ routers_sorted = sorted(
router_list.keys(), key=lambda x: int(re_search("[0-9]+", x).group(0))
)
- for rtr in ROUTER_LIST:
+ for rtr in routers_sorted:
if "ospf" in topo["routers"][rtr] and "ospfd" not in daemon_list:
daemon_list.append("ospfd")
router_list = tgen.routers()
for dut in input_dict.keys():
- rnode = tgen.routers()[dut]
+ rnode = router_list[dut]
if "vlan" in input_dict[dut]:
for vlan, interfaces in input_dict[dut]["vlan"].items():
# Adding interface to VLAN
vlan_intf = "{}.{}".format(interface, vlan)
cmd = "ip link add link {} name {} type vlan id {}".format(
- interface,
- vlan_intf,
- vlan
+ interface, vlan_intf, vlan
)
logger.info("[DUT: %s]: Running command: %s", dut, cmd)
rnode.run(cmd)
# Assigning IP address
ifaddr = ipaddress.ip_interface(
u"{}/{}".format(
- frr_unicode(data["ip"]),
- frr_unicode(data["subnet"])
+ frr_unicode(data["ip"]), frr_unicode(data["subnet"])
)
)
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- rnode = tgen.routers()[router]
+ rnode = tgen.gears[router]
if timeout > 0:
cmd = "timeout {}".format(timeout)
cmdargs += " -s 0 {}".format(str(options))
if cap_file:
- file_name = os.path.join(LOGDIR, tgen.modname, router, cap_file)
+ file_name = os.path.join(tgen.logdir, router, cap_file)
cmdargs += " -w {}".format(str(file_name))
# Remove existing capture file
rnode.run("rm -rf {}".format(file_name))
if not background:
rnode.run(cmdargs)
else:
- rnode.run("nohup {} & /dev/null 2>&1".format(cmdargs))
+ # XXX this & is bogus doesn't work
+ # rnode.run("nohup {} & /dev/null 2>&1".format(cmdargs))
+ rnode.run("nohup {} > /dev/null 2>&1".format(cmdargs))
# Check if tcpdump process is running
if background:
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- rnode = tgen.routers()[router]
+ rnode = tgen.gears[router]
# Check if tcpdump process is running
result = rnode.run("ps -ef | grep tcpdump")
errormsg = "tcpdump is not running {}".format("tcpdump")
return errormsg
else:
+ # XXX this doesn't work with micronet
ppid = tgen.net.nameToNode[rnode.name].pid
rnode.run("set +m; pkill -P %s tcpdump &> /dev/null" % ppid)
logger.info("Stopped tcpdump capture")
result = False
try:
+ debug_config_dict = {}
+
for router in input_dict.keys():
debug_config = []
if "debug" in input_dict[router]:
log_file = debug_dict.setdefault("log_file", None)
if log_file:
- _log_file = os.path.join(LOGDIR, tgen.modname, log_file)
+ _log_file = os.path.join(tgen.logdir, log_file)
debug_config.append("log file {} \n".format(_log_file))
if type(enable_logs) is list:
for daemon, debug_logs in disable_logs.items():
for debug_log in debug_logs:
debug_config.append("no {}".format(debug_log))
+ if debug_config:
+ debug_config_dict[router] = debug_config
- result = create_common_configuration(
- tgen, router, debug_config, "debug_log_config", build=build
- )
+ result = create_common_configurations(
+ tgen, debug_config_dict, "debug_log_config", build=build
+ )
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
input_dict = deepcopy(input_dict)
try:
+ config_data_dict = {}
+
for c_router, c_data in input_dict.items():
- rnode = tgen.routers()[c_router]
+ rnode = tgen.gears[c_router]
+ config_data = []
if "vrfs" in c_data:
for vrf in c_data["vrfs"]:
- config_data = []
del_action = vrf.setdefault("delete", False)
name = vrf.setdefault("name", None)
table_id = vrf.setdefault("id", None)
cmd = "no vni {}".format(del_vni)
config_data.append(cmd)
- result = create_common_configuration(
- tgen, c_router, config_data, "vrf", build=build
- )
+ if config_data:
+ config_data_dict[c_router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "vrf", build=build
+ )
except InvalidCLIError:
# Traceback
to create
"""
- rnode = tgen.routers()[dut]
+ rnode = tgen.gears[dut]
if create:
cmd = "ip link show {0} >/dev/null || ip link add {0} type dummy".format(name)
if not netmask:
ifaddr = ipaddress.ip_interface(frr_unicode(ip_addr))
else:
- ifaddr = ipaddress.ip_interface(u"{}/{}".format(
- frr_unicode(ip_addr),
- frr_unicode(netmask)
- ))
+ ifaddr = ipaddress.ip_interface(
+ u"{}/{}".format(frr_unicode(ip_addr), frr_unicode(netmask))
+ )
cmd = "ip -{0} a flush {1} scope global && ip a add {2} dev {1} && ip l set {1} up".format(
ifaddr.version, name, ifaddr
)
ineterface
"""
- rnode = tgen.routers()[dut]
+ rnode = tgen.gears[dut]
cmd = "ip link set dev"
if ifaceaction:
def write_test_header(tc_name):
- """ Display message at beginning of test case"""
+ """Display message at beginning of test case"""
count = 20
logger.info("*" * (len(tc_name) + count))
step("START -> Testcase : %s" % tc_name, reset=True)
def write_test_footer(tc_name):
- """ Display message at end of test case"""
+ """Display message at end of test case"""
count = 21
logger.info("=" * (len(tc_name) + count))
logger.info("Testcase : %s -> PASSED", tc_name)
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
try:
- global frr_cfg
+ rlist = []
+
for router in input_dict.keys():
interface_list = input_dict[router]["interface_list"]
status = input_dict[router].setdefault("status", "up")
for intf in interface_list:
- rnode = tgen.routers()[router]
+ rnode = tgen.gears[router]
interface_set_status(rnode, intf, status)
- # Load config to router
- load_config_to_router(tgen, router)
+ rlist.append(router)
+
+ # Load config to routers
+ load_config_to_routers(tgen, rlist)
except Exception as e:
errormsg = traceback.format_exc()
_diag_pct = kwargs.pop("diag_pct", diag_pct)
start_time = datetime.now()
- retry_until = datetime.now() + timedelta(seconds=_retry_timeout + _initial_wait)
+ retry_until = datetime.now() + timedelta(
+ seconds=_retry_timeout + _initial_wait
+ )
if initial_wait > 0:
logger.info("Waiting for [%s]s as initial delay", initial_wait)
# Positive result, but happened after timeout failure, very important to
# note for fixing tests.
- logger.warning("RETRY DIAGNOSTIC: SUCCEED after FAILED with requested timeout of %.1fs; however, succeeded in %.1fs, investigate timeout timing",
- _retry_timeout, (datetime.now() - start_time).total_seconds())
+ logger.warning(
+ "RETRY DIAGNOSTIC: SUCCEED after FAILED with requested timeout of %.1fs; however, succeeded in %.1fs, investigate timeout timing",
+ _retry_timeout,
+ (datetime.now() - start_time).total_seconds(),
+ )
if isinstance(saved_failure, Exception):
- raise saved_failure # pylint: disable=E0702
+ raise saved_failure # pylint: disable=E0702
return saved_failure
except Exception as error:
ret = error
if seconds_left < 0 and saved_failure:
- logger.info("RETRY DIAGNOSTIC: Retry timeout reached, still failing")
+ logger.info(
+ "RETRY DIAGNOSTIC: Retry timeout reached, still failing"
+ )
if isinstance(saved_failure, Exception):
- raise saved_failure # pylint: disable=E0702
+ raise saved_failure # pylint: disable=E0702
return saved_failure
if seconds_left < 0:
logger.info("Retry timeout of %ds reached", _retry_timeout)
saved_failure = ret
- retry_extra_delta = timedelta(seconds=seconds_left + _retry_timeout * _diag_pct)
+ retry_extra_delta = timedelta(
+ seconds=seconds_left + _retry_timeout * _diag_pct
+ )
retry_until = datetime.now() + retry_extra_delta
seconds_left = retry_extra_delta.total_seconds()
return saved_failure
if saved_failure:
- logger.info("RETRY DIAG: [failure] Sleeping %ds until next retry with %.1f retry time left - too see if timeout was too short",
- retry_sleep, seconds_left)
+ logger.info(
+ "RETRY DIAG: [failure] Sleeping %ds until next retry with %.1f retry time left - too see if timeout was too short",
+ retry_sleep,
+ seconds_left,
+ )
else:
- logger.info("Sleeping %ds until next retry with %.1f retry time left",
- retry_sleep, seconds_left)
+ logger.info(
+ "Sleeping %ds until next retry with %.1f retry time left",
+ retry_sleep,
+ seconds_left,
+ )
sleep(retry_sleep)
func_retry._original = func
topo = deepcopy(topo)
try:
+ interface_data_dict = {}
+
for c_router, c_data in topo.items():
interface_data = []
for destRouterLink, data in sorted(c_data["links"].items()):
interface_data.append("ipv6 address {}".format(intf_addr))
# Wait for vrf interfaces to get link local address once they are up
- if not destRouterLink == 'lo' and 'vrf' in topo[c_router][
- 'links'][destRouterLink]:
- vrf = topo[c_router]['links'][destRouterLink]['vrf']
- intf = topo[c_router]['links'][destRouterLink]['interface']
- ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf,
- vrf = vrf)
+ if (
+ not destRouterLink == "lo"
+ and "vrf" in topo[c_router]["links"][destRouterLink]
+ ):
+ vrf = topo[c_router]["links"][destRouterLink]["vrf"]
+ intf = topo[c_router]["links"][destRouterLink]["interface"]
+ ll = get_frr_ipv6_linklocal(tgen, c_router, intf=intf, vrf=vrf)
if "ipv6-link-local" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6-link-local"]
"network",
"priority",
"cost",
- "mtu_ignore"
+ "mtu_ignore",
]
if "ospf" in data:
interface_data += _create_interfaces_ospf_cfg(
interface_data += _create_interfaces_ospf_cfg(
"ospf6", c_data, data, ospf_keywords + ["area"]
)
+ if interface_data:
+ interface_data_dict[c_router] = interface_data
- result = create_common_configuration(
- tgen, c_router, interface_data, "interface_config", build=build
- )
+ result = create_common_configurations(
+ tgen, interface_data_dict, "interface_config", build=build
+ )
except InvalidCLIError:
# Traceback
input_dict = deepcopy(input_dict)
try:
+ static_routes_list_dict = {}
+
for router in input_dict.keys():
if "static_routes" not in input_dict[router]:
errormsg = "static_routes not present in input_dict"
static_routes_list.append(cmd)
- result = create_common_configuration(
- tgen, router, static_routes_list, "static_route", build=build
- )
+ if static_routes_list:
+ static_routes_list_dict[router] = static_routes_list
+
+ result = create_common_configurations(
+ tgen, static_routes_list_dict, "static_route", build=build
+ )
except InvalidCLIError:
# Traceback
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
try:
+ config_data_dict = {}
+
for router in input_dict.keys():
if "prefix_lists" not in input_dict[router]:
errormsg = "prefix_lists not present in input_dict"
cmd = "no {}".format(cmd)
config_data.append(cmd)
- result = create_common_configuration(
- tgen, router, config_data, "prefix_list", build=build
- )
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "prefix_list", build=build
+ )
except InvalidCLIError:
# Traceback
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
input_dict = deepcopy(input_dict)
try:
+ rmap_data_dict = {}
+
for router in input_dict.keys():
if "route_maps" not in input_dict[router]:
logger.debug("route_maps not present in input_dict")
cmd = "match metric {}".format(metric)
rmap_data.append(cmd)
- result = create_common_configuration(
- tgen, router, rmap_data, "route_maps", build=build
- )
+ if rmap_data:
+ rmap_data_dict[router] = rmap_data
+
+ result = create_common_configurations(
+ tgen, rmap_data_dict, "route_maps", build=build
+ )
except InvalidCLIError:
# Traceback
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
input_dict = deepcopy(input_dict)
try:
+ config_data_dict = {}
+
for router in input_dict.keys():
if "bgp_community_lists" not in input_dict[router]:
errormsg = "bgp_community_lists not present in input_dict"
config_data.append(cmd)
- result = create_common_configuration(
- tgen, router, config_data, "bgp_community_list", build=build
- )
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "bgp_community_list", build=build
+ )
except InvalidCLIError:
# Traceback
logger.debug("Entering lib API: addKernelRoute()")
- rnode = tgen.routers()[router]
+ rnode = tgen.gears[router]
if type(group_addr_range) is not list:
group_addr_range = [group_addr_range]
ip, mask = grp_addr.split("/")
if mask == "32" or mask == "128":
grp_addr = ip
+ else:
+ mask = "32" if addr_type == "ipv4" else "128"
if not re_search(r"{}".format(grp_addr), result) and mask != "0":
errormsg = (
router_list = tgen.routers()
for dut in input_dict.keys():
- rnode = tgen.routers()[dut]
+ rnode = router_list[dut]
if "vxlan" in input_dict[dut]:
for vxlan_dict in input_dict[dut]["vxlan"]:
router_list = tgen.routers()
for dut in input_dict.keys():
- rnode = tgen.routers()[dut]
+ rnode = router_list[dut]
if "brctl" in input_dict[dut]:
for brctl_dict in input_dict[dut]["brctl"]:
router_list = tgen.routers()
for dut in input_dict.keys():
- rnode = tgen.routers()[dut]
+ rnode = router_list[dut]
for intf, mac in input_dict[dut].items():
cmd = "ip link set {} address {}".format(intf, mac)
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
router_list = tgen.routers()
+ if dut not in router_list:
+ return
+
for routerInput in input_dict.keys():
+ # XXX replace with router = dut; rnode = router_list[dut]
for router, rnode in router_list.items():
if router != dut:
continue
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ router_list = tgen.routers()
for router in input_dict.keys():
- if router not in tgen.routers():
+ if router not in router_list:
continue
-
- rnode = tgen.routers()[router]
+ rnode = router_list[router]
for static_route in input_dict[router]["static_routes"]:
addr_type = validate_ip_address(static_route["network"])
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ router_list = tgen.routers()
for router in input_dict.keys():
- if router not in tgen.routers():
+ if router not in router_list:
continue
- rnode = tgen.routers()[router]
+ rnode = router_list[router]
# Show ip prefix list
show_prefix_list = run_frr_cmd(rnode, "show ip prefix-list")
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ router_list = tgen.routers()
for router in input_dict.keys():
- if router not in tgen.routers():
+ if router not in router_list:
continue
- rnode = tgen.routers()[router]
+ rnode = router_list[router]
# Show ip route-map
show_route_maps = rnode.vtysh_cmd("show route-map")
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- if router not in tgen.routers():
+ router_list = tgen.routers()
+ if router not in router_list:
return False
- rnode = tgen.routers()[router]
+ rnode = router_list[router]
logger.debug(
"Verifying BGP community attributes on dut %s: for %s " "network %s",
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ router_list = tgen.routers()
for router in input_dict.keys():
- if router not in tgen.routers():
+ if router not in router_list:
continue
- rnode = tgen.routers()[router]
+ rnode = router_list[router]
logger.info("Verifying large-community is created for dut %s:", router)
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
- rnode = tgen.routers()[dut]
+ rnode = tgen.gears[dut]
for cli in input_dict[dut]["cli"]:
logger.info(
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
- rnode = tgen.routers()[dut]
+ rnode = tgen.gears[dut]
logger.info("[DUT: %s]: Verifying evpn vni details :", dut)
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
for dut in input_dict.keys():
- rnode = tgen.routers()[dut]
+ rnode = tgen.gears[dut]
logger.info("[DUT: %s]: Verifying vrf vni details :", dut)
return True
-def iperfSendIGMPJoin(
- tgen, server, bindToAddress, l4Type="UDP", join_interval=1, inc_step=0, repeat=0
-):
- """
- Run iperf to send IGMP join and traffic
+class HostApplicationHelper(object):
+ """Helper to track and cleanup per-host based test processes."""
- Parameters:
- -----------
- * `tgen` : Topogen object
- * `l4Type`: string, one of [ TCP, UDP ]
- * `server`: iperf server, from where IGMP join would be sent
- * `bindToAddress`: bind to <host>, an interface or multicast
- address
- * `join_interval`: seconds between periodic bandwidth reports
- * `inc_step`: increamental steps, by default 0
- * `repeat`: Repetition of group, by default 0
+ def __init__(self, tgen=None, base_cmd=None):
+ self.base_cmd_str = ""
+ self.host_procs = {}
+ self.tgen = None
+ self.set_base_cmd(base_cmd if base_cmd else [])
+ if tgen is not None:
+ self.init(tgen)
- returns:
- --------
- errormsg or True
- """
+ def __enter__(self):
+ self.init()
+ return self
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ def __exit__(self, type, value, traceback):
+ self.cleanup()
- rnode = tgen.routers()[server]
+ def __str__(self):
+ return "HostApplicationHelper({})".format(self.base_cmd_str)
- iperfArgs = "iperf -s "
-
- # UDP/TCP
- if l4Type == "UDP":
- iperfArgs += "-u "
-
- iperfCmd = iperfArgs
- # Group address range to cover
- if bindToAddress:
- if type(bindToAddress) is not list:
- Address = []
- start = ipaddress.IPv4Address(frr_unicode(bindToAddress))
-
- Address = [start]
- next_ip = start
-
- count = 1
- while count < repeat:
- next_ip += inc_step
- Address.append(next_ip)
- count += 1
- bindToAddress = Address
-
- for bindTo in bindToAddress:
- iperfArgs = iperfCmd
- iperfArgs += "-B %s " % bindTo
-
- # Join interval
- if join_interval:
- iperfArgs += "-i %d " % join_interval
-
- iperfArgs += " &>/dev/null &"
- # Run iperf command to send IGMP join
- logger.debug("[DUT: {}]: Running command: [{}]".format(server, iperfArgs))
- output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs))
-
- # Check if iperf process is running
- if output:
- pid = output.split()[1]
- rnode.run("touch /var/run/frr/iperf_server.pid")
- rnode.run("echo %s >> /var/run/frr/iperf_server.pid" % pid)
+ def set_base_cmd(self, base_cmd):
+ assert isinstance(base_cmd, list) or isinstance(base_cmd, tuple)
+ self.base_cmd = base_cmd
+ if base_cmd:
+ self.base_cmd_str = " ".join(base_cmd)
else:
- errormsg = "IGMP join is not sent for {}. Error: {}".format(bindTo, output)
- logger.error(output)
- return errormsg
+ self.base_cmd_str = ""
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return True
-
-
-def iperfSendTraffic(
- tgen,
- client,
- bindToAddress,
- ttl,
- time=0,
- l4Type="UDP",
- inc_step=0,
- repeat=0,
- mappedAddress=None,
-):
- """
- Run iperf to send IGMP join and traffic
-
- Parameters:
- -----------
- * `tgen` : Topogen object
- * `l4Type`: string, one of [ TCP, UDP ]
- * `client`: iperf client, from where iperf traffic would be sent
- * `bindToAddress`: bind to <host>, an interface or multicast
- address
- * `ttl`: time to live
- * `time`: time in seconds to transmit for
- * `inc_step`: increamental steps, by default 0
- * `repeat`: Repetition of group, by default 0
- * `mappedAddress`: Mapped Interface ip address
-
- returns:
- --------
- errormsg or True
- """
-
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
-
- rnode = tgen.routers()[client]
-
- iperfArgs = "iperf -c "
+ def init(self, tgen=None):
+ """Initialize the helper with tgen if needed.
- iperfCmd = iperfArgs
- # Group address range to cover
- if bindToAddress:
- if type(bindToAddress) is not list:
- Address = []
- start = ipaddress.IPv4Address(frr_unicode(bindToAddress))
-
- Address = [start]
- next_ip = start
-
- count = 1
- while count < repeat:
- next_ip += inc_step
- Address.append(next_ip)
- count += 1
- bindToAddress = Address
-
- for bindTo in bindToAddress:
- iperfArgs = iperfCmd
- iperfArgs += "%s " % bindTo
-
- # Mapped Interface IP
- if mappedAddress:
- iperfArgs += "-B %s " % mappedAddress
-
- # UDP/TCP
- if l4Type == "UDP":
- iperfArgs += "-u -b 0.012m "
-
- # TTL
- if ttl:
- iperfArgs += "-T %d " % ttl
+ If overridden, need to handle multiple entries but one init. Will be called on
+ object creation if tgen is supplied. Will be called again on __enter__ so should
+ not re-init if already inited.
+ """
+ if self.tgen:
+ assert tgen is None or self.tgen == tgen
+ else:
+ self.tgen = tgen
- # Time
- if time:
- iperfArgs += "-t %d " % time
+ def started_proc(self, host, p):
+ """Called after process started on host.
- iperfArgs += " &>/dev/null &"
+ Return value is passed to `stopping_proc` method."""
+ logger.debug("%s: Doing nothing after starting process", self)
+ return False
- # Run iperf command to send multicast traffic
- logger.debug("[DUT: {}]: Running command: [{}]".format(client, iperfArgs))
- output = rnode.run("set +m; {} sleep 0.5".format(iperfArgs))
+ def stopping_proc(self, host, p, info):
+ """Called after process started on host."""
+ logger.debug("%s: Doing nothing before stopping process", self)
+
+ def _add_host_proc(self, host, p):
+ v = self.started_proc(host, p)
+
+ if host not in self.host_procs:
+ self.host_procs[host] = []
+ logger.debug("%s: %s: tracking process %s", self, host, p)
+ self.host_procs[host].append((p, v))
+
+ def stop_host(self, host):
+ """Stop the process on the host.
+
+ Override to do additional cleanup."""
+ if host in self.host_procs:
+ hlogger = self.tgen.net[host].logger
+ for p, v in self.host_procs[host]:
+ self.stopping_proc(host, p, v)
+ logger.debug("%s: %s: terminating process %s", self, host, p.pid)
+ hlogger.debug("%s: %s: terminating process %s", self, host, p.pid)
+ rc = p.poll()
+ if rc is not None:
+ logger.error(
+ "%s: %s: process early exit %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ hlogger.error(
+ "%s: %s: process early exit %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ else:
+ p.terminate()
+ p.wait()
+ logger.debug(
+ "%s: %s: terminated process %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
+ hlogger.debug(
+ "%s: %s: terminated process %s: %s",
+ self,
+ host,
+ p.pid,
+ comm_error(p),
+ )
- # Check if iperf process is running
- if output:
- pid = output.split()[1]
- rnode.run("touch /var/run/frr/iperf_client.pid")
- rnode.run("echo %s >> /var/run/frr/iperf_client.pid" % pid)
- else:
- errormsg = "Multicast traffic is not sent for {}. Error {}".format(
- bindTo, output
- )
- logger.error(output)
- return errormsg
+ del self.host_procs[host]
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return True
+ def stop_all_hosts(self):
+ hosts = set(self.host_procs)
+ for host in hosts:
+ self.stop_host(host)
+ def cleanup(self):
+ self.stop_all_hosts()
-def kill_iperf(tgen, dut=None, action=None):
- """
- Killing iperf process if running for any router in topology
- Parameters:
- -----------
- * `tgen` : Topogen object
- * `dut` : Any iperf hostname to send igmp prune
- * `action`: to kill igmp join iperf action is remove_join
- to kill traffic iperf action is remove_traffic
+ def run(self, host, cmd_args, **kwargs):
+ cmd = list(self.base_cmd)
+ cmd.extend(cmd_args)
+ p = self.tgen.gears[host].popen(cmd, **kwargs)
+ assert p.poll() is None
+ self._add_host_proc(host, p)
+ return p
- Usage
- ----
- kill_iperf(tgen, dut ="i6", action="remove_join")
+ def check_procs(self):
+ """Check that all current processes are running, log errors if not.
- """
+ Returns: List of stopped processes."""
+ procs = []
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ logger.debug("%s: checking procs on hosts %s", self, self.host_procs.keys())
- router_list = tgen.routers()
- for router, rnode in router_list.items():
- # Run iperf command to send IGMP join
- pid_client = rnode.run("cat /var/run/frr/iperf_client.pid")
- pid_server = rnode.run("cat /var/run/frr/iperf_server.pid")
- if action == "remove_join":
- pids = pid_server
- elif action == "remove_traffic":
- pids = pid_client
- else:
- pids = "\n".join([pid_client, pid_server])
- for pid in pids.split("\n"):
- pid = pid.strip()
- if pid.isdigit():
- cmd = "set +m; kill -9 %s &> /dev/null" % pid
- logger.debug("[DUT: {}]: Running command: [{}]".format(router, cmd))
- rnode.run(cmd)
+ for host in self.host_procs:
+ hlogger = self.tgen.net[host].logger
+ for p, _ in self.host_procs[host]:
+ logger.debug("%s: checking %s proc %s", self, host, p)
+ rc = p.poll()
+ if rc is None:
+ continue
+ logger.error(
+ "%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True
+ )
+ hlogger.error(
+ "%s: %s proc exited: %s", self, host, comm_error(p), exc_info=True
+ )
+ procs.append(p)
+ return procs
+
+
+class IPerfHelper(HostApplicationHelper):
+ def __str__(self):
+ return "IPerfHelper()"
+
+ def run_join(
+ self,
+ host,
+ join_addr,
+ l4Type="UDP",
+ join_interval=1,
+ join_intf=None,
+ join_towards=None,
+ ):
+ """
+ Use iperf to send IGMP join and listen to traffic
+
+ Parameters:
+ -----------
+ * `host`: iperf host from where IGMP join would be sent
+ * `l4Type`: string, one of [ TCP, UDP ]
+ * `join_addr`: multicast address (or addresses) to join to
+ * `join_interval`: seconds between periodic bandwidth reports
+ * `join_intf`: the interface to bind the join to
+ * `join_towards`: router whos interface to bind the join to
+
+ returns: Success (bool)
+ """
+
+ iperf_path = self.tgen.net.get_exec_path("iperf")
+
+ assert join_addr
+ if not isinstance(join_addr, list) and not isinstance(join_addr, tuple):
+ join_addr = [ipaddress.IPv4Address(frr_unicode(join_addr))]
+
+ for bindTo in join_addr:
+ iperf_args = [iperf_path, "-s"]
+
+ if l4Type == "UDP":
+ iperf_args.append("-u")
+
+ iperf_args.append("-B")
+ if join_towards:
+ to_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][join_towards][
+ "interface"
+ ]
+ )
+ iperf_args.append("{}%{}".format(str(bindTo), to_intf))
+ elif join_intf:
+ iperf_args.append("{}%{}".format(str(bindTo), join_intf))
+ else:
+ iperf_args.append(str(bindTo))
+
+ if join_interval:
+ iperf_args.append("-i")
+ iperf_args.append(str(join_interval))
+
+ p = self.run(host, iperf_args)
+ if p.poll() is not None:
+ logger.error("IGMP join failed on %s: %s", bindTo, comm_error(p))
+ return False
+ return True
+
+ def run_traffic(
+ self, host, sentToAddress, ttl, time=0, l4Type="UDP", bind_towards=None
+ ):
+ """
+ Run iperf to send IGMP join and traffic
+
+ Parameters:
+ -----------
+ * `host`: iperf host to send traffic from
+ * `l4Type`: string, one of [ TCP, UDP ]
+ * `sentToAddress`: multicast address to send traffic to
+ * `ttl`: time to live
+ * `time`: time in seconds to transmit for
+ * `bind_towards`: Router who's interface the source ip address is got from
+
+ returns: Success (bool)
+ """
+
+ iperf_path = self.tgen.net.get_exec_path("iperf")
+
+ if sentToAddress and not isinstance(sentToAddress, list):
+ sentToAddress = [ipaddress.IPv4Address(frr_unicode(sentToAddress))]
+
+ for sendTo in sentToAddress:
+ iperf_args = [iperf_path, "-c", sendTo]
+
+ # Bind to Interface IP
+ if bind_towards:
+ ifaddr = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][bind_towards]["ipv4"]
+ )
+ ipaddr = ipaddress.IPv4Interface(ifaddr).ip
+ iperf_args.append("-B")
+ iperf_args.append(str(ipaddr))
+
+ # UDP/TCP
+ if l4Type == "UDP":
+ iperf_args.append("-u")
+ iperf_args.append("-b")
+ iperf_args.append("0.012m")
+
+ # TTL
+ if ttl:
+ iperf_args.append("-T")
+ iperf_args.append(str(ttl))
+
+ # Time
+ if time:
+ iperf_args.append("-t")
+ iperf_args.append(str(time))
+
+ p = self.run(host, iperf_args)
+ if p.poll() is not None:
+ logger.error(
+ "mcast traffic send failed for %s: %s", sendTo, comm_error(p)
+ )
+ return False
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
def verify_ip_nht(tgen, input_dict):
logger.debug("Entering lib API: verify_ip_nht()")
+ router_list = tgen.routers()
for router in input_dict.keys():
- if router not in tgen.routers():
+ if router not in router_list:
continue
- rnode = tgen.routers()[router]
+ rnode = router_list[router]
nh_list = input_dict[router]
- if validate_ip_address(nh_list.keys()[0]) is "ipv6":
+ if validate_ip_address(next(iter(nh_list))) == "ipv6":
show_ip_nht = run_frr_cmd(rnode, "show ipv6 nht")
else:
show_ip_nht = run_frr_cmd(rnode, "show ip nht")
logger.debug("Exiting lib API: verify_ip_nht()")
return False
+
+
+def scapy_send_raw_packet(tgen, topo, senderRouter, intf, packet=None):
+ """
+ Using scapy Raw() method to send BSR raw packet from one FRR
+ to other
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `topo` : json file data
+ * `senderRouter` : Sender router
+ * `packet` : packet in raw format
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ global CD
+ result = ""
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ sender_interface = intf
+ rnode = tgen.routers()[senderRouter]
+
+ for destLink, data in topo["routers"][senderRouter]["links"].items():
+ if "type" in data and data["type"] == "loopback":
+ continue
+
+ if not packet:
+ packet = topo["routers"][senderRouter]["pkt"]["test_packets"][packet][
+ "data"
+ ]
+
+ python3_path = tgen.net.get_exec_path(["python3", "python"])
+ script_path = os.path.join(CD, "send_bsr_packet.py")
+ cmd = "{} {} '{}' '{}' --interval=1 --count=1".format(
+ python3_path, script_path, packet, sender_interface
+ )
+
+ logger.info("Scapy cmd: \n %s", cmd)
+ result = rnode.run(cmd)
+
+ if result == "":
+ return result
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
--- /dev/null
+#!/usr/bin/env python3
+
+"""
+exa-receive.py: Save received routes form ExaBGP into file
+"""
+
+import argparse
+import os
+from sys import stdin
+from datetime import datetime
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+ "--no-timestamp", dest="timestamp", action="store_false", help="Disable timestamps"
+)
+parser.add_argument(
+ "--logdir", default="/tmp/gearlogdir", help="The directory to store the peer log in"
+)
+parser.add_argument("peer", type=int, help="The peer number")
+args = parser.parse_args()
+
+savepath = os.path.join(args.logdir, "peer{}-received.log".format(args.peer))
+routesavefile = open(savepath, "w")
+
+while True:
+ try:
+ line = stdin.readline()
+ if not line:
+ break
+
+ if not args.timestamp:
+ routesavefile.write(line)
+ else:
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
+ routesavefile.write(timestamp + line)
+ routesavefile.flush()
+ except KeyboardInterrupt:
+ pass
+ except IOError:
+ # most likely a signal during readline
+ pass
+
+routesavefile.close()
--- /dev/null
+# -*- coding: utf-8 eval: (yapf-mode 1) -*-
+#
+# August 27 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C. ("LabN")
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import lib.topojson as topojson
+import lib.topogen as topogen
+from lib.topolog import logger
+
+
+def tgen_json(request):
+ logger.info("Creating/starting topogen topology for %s", request.module.__name__)
+
+ tgen = topojson.setup_module_from_json(request.module.__file__)
+ yield tgen
+
+ logger.info("Stopping topogen topology for %s", request.module.__name__)
+ tgen.stop_topology()
+
+
+def topo(tgen):
+ """Make tgen json object available as test argument."""
+ return tgen.json_topo
+
+
+def tgen():
+ """Make global topogen object available as test argument."""
+ return topogen.get_topogen()
import os
import sys
import platform
+
import pytest
-import imp
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.lutil import *
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
customize = None
iproute2Ver = None
def __init__(self, test, testdir):
+ pathname = os.path.join(testdir, "customize.py")
global customize
- customize = imp.load_source("customize", os.path.join(testdir, "customize.py"))
+ if sys.version_info >= (3, 5):
+ import importlib.util
+
+ spec = importlib.util.spec_from_file_location("customize", pathname)
+ customize = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(customize)
+ else:
+ import imp
+
+ customize = imp.load_source("customize", pathname)
self.test = test
self.testdir = testdir
self.scriptdir = testdir
- self.logdir = "/tmp/topotests/{0}.test_{0}".format(test)
+ self.logdir = ""
logger.info("LTemplate: " + test)
def setup_module(self, mod):
"Sets up the pytest environment"
# This function initiates the topology build with Topogen...
- tgen = Topogen(customize.ThisTestTopo, mod.__name__)
+ tgen = Topogen(customize.build_topo, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
+ self.logdir = tgen.logdir
+
logger.info("Topology started")
try:
self.prestarthooksuccess = customize.ltemplatePreRouterStartHook()
import re
import sys
import time
-import datetime
import json
import math
import time
from lib.topolog import logger
from lib.topotest import json_cmp
-from mininet.net import Mininet
# L utility functions
"""
import argparse
-import os
import json
+import os
import socket
-import subprocess
import struct
+import subprocess
import sys
import time
+
#
# Functions
#
def interface_name_to_index(name):
"Gets the interface index using its name. Returns None on failure."
- interfaces = json.loads(
- subprocess.check_output('ip -j link show', shell=True))
+ interfaces = json.loads(subprocess.check_output("ip -j link show", shell=True))
for interface in interfaces:
- if interface['ifname'] == name:
- return interface['ifindex']
+ if interface["ifname"] == name:
+ return interface["ifindex"]
return None
# Main code.
#
parser = argparse.ArgumentParser(description="Multicast RX utility")
-parser.add_argument('socket', help='Point to topotest UNIX socket')
-parser.add_argument('group', help='Multicast IP')
-parser.add_argument('interface', help='Interface name')
+parser.add_argument("group", help="Multicast IP")
+parser.add_argument("interface", help="Interface name")
+parser.add_argument("--socket", help="Point to topotest UNIX socket")
parser.add_argument(
- '--send',
- help='Transmit instead of join with interval (defaults to 0.7 sec)',
- type=float, default=0)
+ "--send", help="Transmit instead of join with interval", type=float, default=0
+)
args = parser.parse_args()
ttl = 16
# Get interface index/validate.
ifindex = interface_name_to_index(args.interface)
if ifindex is None:
- sys.stderr.write('Interface {} does not exists\n'.format(args.interface))
+ sys.stderr.write("Interface {} does not exists\n".format(args.interface))
sys.exit(1)
# We need root privileges to set up multicast.
sys.exit(1)
# Wait for topotest to synchronize with us.
-toposock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
-while True:
- try:
- toposock.connect(args.socket)
- break
- except ConnectionRefusedError:
- time.sleep(1)
- continue
+if not args.socket:
+ toposock = None
+else:
+ toposock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+ while True:
+ try:
+ toposock.connect(args.socket)
+ break
+ except ConnectionRefusedError:
+ time.sleep(1)
+ continue
+ # Set topotest socket non blocking so we can multiplex the main loop.
+ toposock.setblocking(False)
msock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if args.send > 0:
# Prepare multicast bit in that interface.
msock.setsockopt(
- socket.SOL_SOCKET, 25,
- struct.pack("%ds" % len(args.interface),
- args.interface.encode('utf-8')))
+ socket.SOL_SOCKET,
+ 25,
+ struct.pack("%ds" % len(args.interface), args.interface.encode("utf-8")),
+ )
# Set packets TTL.
- msock.setsockopt(
- socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl))
+ msock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", ttl))
# Block to ensure packet send.
msock.setblocking(True)
- # Set topotest socket non blocking so we can multiplex the main loop.
- toposock.setblocking(False)
else:
multicast_join(msock, ifindex, args.group, port)
+
+def should_exit():
+ if not toposock:
+ # If we are sending then we have slept
+ if not args.send:
+ time.sleep(100)
+ return False
+ else:
+ try:
+ data = toposock.recv(1)
+ if data == b"":
+ print(" -> Connection closed")
+ return True
+ except BlockingIOError:
+ return False
+
+
counter = 0
-while True:
+while not should_exit():
if args.send > 0:
msock.sendto(b"test %d" % counter, (args.group, port))
counter += 1
time.sleep(args.send)
- try:
- data = toposock.recv(1)
- if data == b'':
- print(' -> Connection closed')
- break
- except BlockingIOError:
- continue
-
msock.close()
-
sys.exit(0)
--- /dev/null
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# July 9 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; see the file COPYING; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+import datetime
+import logging
+import os
+import re
+import shlex
+import subprocess
+import sys
+import tempfile
+import time as time_mod
+import traceback
+
+root_hostname = subprocess.check_output("hostname")
+
+# This allows us to cleanup any leftovers later on
+os.environ["MICRONET_PID"] = str(os.getpid())
+
+
+class Timeout(object):
+ def __init__(self, delta):
+ self.started_on = datetime.datetime.now()
+ self.expires_on = self.started_on + datetime.timedelta(seconds=delta)
+
+ def elapsed(self):
+ elapsed = datetime.datetime.now() - self.started_on
+ return elapsed.total_seconds()
+
+ def is_expired(self):
+ return datetime.datetime.now() > self.expires_on
+
+
+def is_string(value):
+ """Return True if value is a string."""
+ try:
+ return isinstance(value, basestring) # type: ignore
+ except NameError:
+ return isinstance(value, str)
+
+
+def shell_quote(command):
+ """Return command wrapped in single quotes."""
+ if sys.version_info[0] >= 3:
+ return shlex.quote(command)
+ return "'{}'".format(command.replace("'", "'\"'\"'")) # type: ignore
+
+
+def cmd_error(rc, o, e):
+ s = "rc {}".format(rc)
+ o = "\n\tstdout: " + o.strip() if o and o.strip() else ""
+ e = "\n\tstderr: " + e.strip() if e and e.strip() else ""
+ return s + o + e
+
+
+def proc_error(p, o, e):
+ args = p.args if is_string(p.args) else " ".join(p.args)
+ s = "rc {} pid {}\n\targs: {}".format(p.returncode, p.pid, args)
+ o = "\n\tstdout: " + o.strip() if o and o.strip() else ""
+ e = "\n\tstderr: " + e.strip() if e and e.strip() else ""
+ return s + o + e
+
+
+def comm_error(p):
+ rc = p.poll()
+ assert rc is not None
+ if not hasattr(p, "saved_output"):
+ p.saved_output = p.communicate()
+ return proc_error(p, *p.saved_output)
+
+
+class Commander(object): # pylint: disable=R0205
+ """
+ Commander.
+
+ An object that can execute commands.
+ """
+
+ tmux_wait_gen = 0
+
+ def __init__(self, name, logger=None):
+ """Create a Commander."""
+ self.name = name
+ self.last = None
+ self.exec_paths = {}
+ self.pre_cmd = []
+ self.pre_cmd_str = ""
+
+ if not logger:
+ self.logger = logging.getLogger(__name__ + ".commander." + name)
+ else:
+ self.logger = logger
+
+ self.cwd = self.cmd_raises("pwd").strip()
+
+ def set_logger(self, logfile):
+ self.logger = logging.getLogger(__name__ + ".commander." + self.name)
+ if is_string(logfile):
+ handler = logging.FileHandler(logfile, mode="w")
+ else:
+ handler = logging.StreamHandler(logfile)
+
+ fmtstr = "%(asctime)s.%(msecs)03d %(levelname)s: {}({}): %(message)s".format(
+ self.__class__.__name__, self.name
+ )
+ handler.setFormatter(logging.Formatter(fmt=fmtstr))
+ self.logger.addHandler(handler)
+
+ def set_pre_cmd(self, pre_cmd=None):
+ if not pre_cmd:
+ self.pre_cmd = []
+ self.pre_cmd_str = ""
+ else:
+ self.pre_cmd = pre_cmd
+ self.pre_cmd_str = " ".join(self.pre_cmd) + " "
+
+ def __str__(self):
+ return "Commander({})".format(self.name)
+
+ def get_exec_path(self, binary):
+ """Return the full path to the binary executable.
+
+ `binary` :: binary name or list of binary names
+ """
+ if is_string(binary):
+ bins = [binary]
+ else:
+ bins = binary
+ for b in bins:
+ if b in self.exec_paths:
+ return self.exec_paths[b]
+
+ rc, output, _ = self.cmd_status("which " + b, warn=False)
+ if not rc:
+ return os.path.abspath(output.strip())
+ return None
+
+ def get_tmp_dir(self, uniq):
+ return os.path.join(tempfile.mkdtemp(), uniq)
+
+ def test(self, flags, arg):
+ """Run test binary, with flags and arg"""
+ test_path = self.get_exec_path(["test"])
+ rc, output, _ = self.cmd_status([test_path, flags, arg], warn=False)
+ return not rc
+
+ def path_exists(self, path):
+ """Check if path exists."""
+ return self.test("-e", path)
+
+ def _get_cmd_str(self, cmd):
+ if is_string(cmd):
+ return self.pre_cmd_str + cmd
+ cmd = self.pre_cmd + cmd
+ return " ".join(cmd)
+
+ def _get_sub_args(self, cmd, defaults, **kwargs):
+ if is_string(cmd):
+ defaults["shell"] = True
+ pre_cmd = self.pre_cmd_str
+ else:
+ defaults["shell"] = False
+ pre_cmd = self.pre_cmd
+ cmd = [str(x) for x in cmd]
+ defaults.update(kwargs)
+ return pre_cmd, cmd, defaults
+
+ def _popen(self, method, cmd, skip_pre_cmd=False, **kwargs):
+ if sys.version_info[0] >= 3:
+ defaults = {
+ "encoding": "utf-8",
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.PIPE,
+ }
+ else:
+ defaults = {
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.PIPE,
+ }
+ pre_cmd, cmd, defaults = self._get_sub_args(cmd, defaults, **kwargs)
+
+ self.logger.debug('%s: %s("%s", kwargs: %s)', self, method, cmd, defaults)
+
+ actual_cmd = cmd if skip_pre_cmd else pre_cmd + cmd
+ p = subprocess.Popen(actual_cmd, **defaults)
+ if not hasattr(p, "args"):
+ p.args = actual_cmd
+ return p, actual_cmd
+
+ def set_cwd(self, cwd):
+ self.logger.warning("%s: 'cd' (%s) does not work outside namespaces", self, cwd)
+ self.cwd = cwd
+
+ def popen(self, cmd, **kwargs):
+ """
+ Creates a pipe with the given `command`.
+
+ Args:
+ command: `str` or `list` of command to open a pipe with.
+ **kwargs: kwargs is eventually passed on to Popen. If `command` is a string
+ then will be invoked with shell=True, otherwise `command` is a list and
+ will be invoked with shell=False.
+
+ Returns:
+ a subprocess.Popen object.
+ """
+ p, _ = self._popen("popen", cmd, **kwargs)
+ return p
+
+ def cmd_status(self, cmd, raises=False, warn=True, stdin=None, **kwargs):
+ """Execute a command."""
+
+ # We are not a shell like mininet, so we need to intercept this
+ chdir = False
+ if not is_string(cmd):
+ cmds = cmd
+ else:
+ # XXX we can drop this when the code stops assuming it works
+ m = re.match(r"cd(\s*|\s+(\S+))$", cmd)
+ if m and m.group(2):
+ self.logger.warning(
+ "Bad call to 'cd' (chdir) emulating, use self.set_cwd():\n%s",
+ "".join(traceback.format_stack(limit=12)),
+ )
+ assert is_string(cmd)
+ chdir = True
+ cmd += " && pwd"
+
+ # If we are going to run under bash then we don't need shell=True!
+ cmds = ["/bin/bash", "-c", cmd]
+
+ pinput = None
+
+ if is_string(stdin) or isinstance(stdin, bytes):
+ pinput = stdin
+ stdin = subprocess.PIPE
+
+ p, actual_cmd = self._popen("cmd_status", cmds, stdin=stdin, **kwargs)
+ stdout, stderr = p.communicate(input=pinput)
+ rc = p.wait()
+
+ # For debugging purposes.
+ self.last = (rc, actual_cmd, cmd, stdout, stderr)
+
+ if rc:
+ if warn:
+ self.logger.warning(
+ "%s: proc failed: %s:", self, proc_error(p, stdout, stderr)
+ )
+ if raises:
+ # error = Exception("stderr: {}".format(stderr))
+ # This annoyingly doesnt' show stderr when printed normally
+ error = subprocess.CalledProcessError(rc, actual_cmd)
+ error.stdout, error.stderr = stdout, stderr
+ raise error
+ elif chdir:
+ self.set_cwd(stdout.strip())
+
+ return rc, stdout, stderr
+
+ def cmd_legacy(self, cmd, **kwargs):
+ """Execute a command with stdout and stderr joined, *IGNORES ERROR*."""
+
+ defaults = {"stderr": subprocess.STDOUT}
+ defaults.update(kwargs)
+ _, stdout, _ = self.cmd_status(cmd, raises=False, **defaults)
+ return stdout
+
+ def cmd_raises(self, cmd, **kwargs):
+ """Execute a command. Raise an exception on errors"""
+
+ rc, stdout, _ = self.cmd_status(cmd, raises=True, **kwargs)
+ assert rc == 0
+ return stdout
+
+ # Run a command in a new window (gnome-terminal, screen, tmux, xterm)
+ def run_in_window(
+ self,
+ cmd,
+ wait_for=False,
+ background=False,
+ name=None,
+ title=None,
+ forcex=False,
+ new_window=False,
+ tmux_target=None,
+ ):
+ """
+ Run a command in a new window (TMUX, Screen or XTerm).
+
+ Args:
+ wait_for: True to wait for exit from command or `str` as channel neme to signal on exit, otherwise False
+ background: Do not change focus to new window.
+ title: Title for new pane (tmux) or window (xterm).
+ name: Name of the new window (tmux)
+ forcex: Force use of X11.
+ new_window: Open new window (instead of pane) in TMUX
+ tmux_target: Target for tmux pane.
+
+ Returns:
+ the pane/window identifier from TMUX (depends on `new_window`)
+ """
+
+ channel = None
+ if is_string(wait_for):
+ channel = wait_for
+ elif wait_for is True:
+ channel = "{}-wait-{}".format(os.getpid(), Commander.tmux_wait_gen)
+ Commander.tmux_wait_gen += 1
+
+ sudo_path = self.get_exec_path(["sudo"])
+ nscmd = sudo_path + " " + self.pre_cmd_str + cmd
+ if "TMUX" in os.environ and not forcex:
+ cmd = [self.get_exec_path("tmux")]
+ if new_window:
+ cmd.append("new-window")
+ cmd.append("-P")
+ if name:
+ cmd.append("-n")
+ cmd.append(name)
+ if tmux_target:
+ cmd.append("-t")
+ cmd.append(tmux_target)
+ else:
+ cmd.append("split-window")
+ cmd.append("-P")
+ cmd.append("-h")
+ if not tmux_target:
+ tmux_target = os.getenv("TMUX_PANE", "")
+ if background:
+ cmd.append("-d")
+ if tmux_target:
+ cmd.append("-t")
+ cmd.append(tmux_target)
+ if title:
+ nscmd = "printf '\033]2;{}\033\\'; {}".format(title, nscmd)
+ if channel:
+ nscmd = 'trap "tmux wait -S {}; exit 0" EXIT; {}'.format(channel, nscmd)
+ cmd.append(nscmd)
+ elif "STY" in os.environ and not forcex:
+ # wait for not supported in screen for now
+ channel = None
+ cmd = [self.get_exec_path("screen")]
+ if not os.path.exists(
+ "/run/screen/S-{}/{}".format(os.environ["USER"], os.environ["STY"])
+ ):
+ cmd = ["sudo", "-u", os.environ["SUDO_USER"]] + cmd
+ cmd.append(nscmd)
+ elif "DISPLAY" in os.environ:
+ # We need it broken up for xterm
+ user_cmd = cmd
+ cmd = [self.get_exec_path("xterm")]
+ if "SUDO_USER" in os.environ:
+ cmd = [self.get_exec_path("sudo"), "-u", os.environ["SUDO_USER"]] + cmd
+ if title:
+ cmd.append("-T")
+ cmd.append(title)
+ cmd.append("-e")
+ cmd.append(sudo_path)
+ cmd.extend(self.pre_cmd)
+ cmd.extend(["bash", "-c", user_cmd])
+ # if channel:
+ # return self.cmd_raises(cmd, skip_pre_cmd=True)
+ # else:
+ p = self.popen(
+ cmd,
+ skip_pre_cmd=True,
+ stdin=None,
+ shell=False,
+ )
+ time_mod.sleep(2)
+ if p.poll() is not None:
+ self.logger.error("%s: Failed to launch xterm: %s", self, comm_error(p))
+ return p
+ else:
+ self.logger.error(
+ "DISPLAY, STY, and TMUX not in environment, can't open window"
+ )
+ raise Exception("Window requestd but TMUX, Screen and X11 not available")
+
+ pane_info = self.cmd_raises(cmd, skip_pre_cmd=True).strip()
+
+ # Re-adjust the layout
+ if "TMUX" in os.environ:
+ self.cmd_status(
+ "tmux select-layout -t {} tiled".format(
+ pane_info if not tmux_target else tmux_target
+ ),
+ skip_pre_cmd=True,
+ )
+
+ # Wait here if we weren't handed the channel to wait for
+ if channel and wait_for is True:
+ cmd = [self.get_exec_path("tmux"), "wait", channel]
+ self.cmd_status(cmd, skip_pre_cmd=True)
+
+ return pane_info
+
+ def delete(self):
+ pass
+
+
+class LinuxNamespace(Commander):
+ """
+ A linux Namespace.
+
+ An object that creates and executes commands in a linux namespace
+ """
+
+ def __init__(
+ self,
+ name,
+ net=True,
+ mount=True,
+ uts=True,
+ cgroup=False,
+ ipc=False,
+ pid=False,
+ time=False,
+ user=False,
+ set_hostname=True,
+ private_mounts=None,
+ logger=None,
+ ):
+ """
+ Create a new linux namespace.
+
+ Args:
+ name: Internal name for the namespace.
+ net: Create network namespace.
+ mount: Create network namespace.
+ uts: Create UTS (hostname) namespace.
+ cgroup: Create cgroup namespace.
+ ipc: Create IPC namespace.
+ pid: Create PID namespace, also mounts new /proc.
+ time: Create time namespace.
+ user: Create user namespace, also keeps capabilities.
+ set_hostname: Set the hostname to `name`, uts must also be True.
+ private_mounts: List of strings of the form
+ "[/external/path:]/internal/path. If no external path is specified a
+ tmpfs is mounted on the internal path. Any paths specified are first
+ passed to `mkdir -p`.
+ logger: Passed to superclass.
+ """
+ super(LinuxNamespace, self).__init__(name, logger)
+
+ self.logger.debug("%s: Creating", self)
+
+ self.intfs = []
+
+ nslist = []
+ cmd = ["/usr/bin/unshare"]
+ flags = "-"
+ self.ifnetns = {}
+
+ if cgroup:
+ nslist.append("cgroup")
+ flags += "C"
+ if ipc:
+ nslist.append("ipc")
+ flags += "i"
+ if mount:
+ nslist.append("mnt")
+ flags += "m"
+ if net:
+ nslist.append("net")
+ flags += "n"
+ if pid:
+ nslist.append("pid")
+ flags += "p"
+ cmd.append("--mount-proc")
+ if time:
+ # XXX this filename is probably wrong
+ nslist.append("time")
+ flags += "T"
+ if user:
+ nslist.append("user")
+ flags += "U"
+ cmd.append("--keep-caps")
+ if uts:
+ nslist.append("uts")
+ cmd.append("--uts")
+
+ cmd.append(flags)
+ cmd.append("/bin/cat")
+
+ # Using cat and a stdin PIPE is nice as it will exit when we do. However, we
+ # also detach it from the pgid so that signals do not propagate to it. This is
+ # b/c it would exit early (e.g., ^C) then, at least the main micronet proc which
+ # has no other processes like frr daemons running, will take the main network
+ # namespace with it, which will remove the bridges and the veth pair (because
+ # the bridge side veth is deleted).
+ self.logger.debug("%s: creating namespace process: %s", self, cmd)
+ p = subprocess.Popen(
+ cmd,
+ stdin=subprocess.PIPE,
+ stdout=open("/dev/null", "w"),
+ stderr=open("/dev/null", "w"),
+ preexec_fn=os.setsid, # detach from pgid so signals don't propogate
+ shell=False,
+ )
+ self.p = p
+ self.pid = p.pid
+
+ self.logger.debug("%s: namespace pid: %d", self, self.pid)
+
+ # -----------------------------------------------
+ # Now let's wait until unshare completes it's job
+ # -----------------------------------------------
+ timeout = Timeout(30)
+ while p.poll() is None and not timeout.is_expired():
+ for fname in tuple(nslist):
+ ours = os.readlink("/proc/self/ns/{}".format(fname))
+ theirs = os.readlink("/proc/{}/ns/{}".format(self.pid, fname))
+ # See if their namespace is different
+ if ours != theirs:
+ nslist.remove(fname)
+ if not nslist:
+ break
+ elapsed = int(timeout.elapsed())
+ if elapsed <= 3:
+ time_mod.sleep(0.1)
+ elif elapsed > 10:
+ self.logger.warning("%s: unshare taking more than %ss", self, elapsed)
+ time_mod.sleep(3)
+ else:
+ self.logger.info("%s: unshare taking more than %ss", self, elapsed)
+ time_mod.sleep(1)
+ assert p.poll() is None, "unshare unexpectedly exited!"
+ assert not nslist, "unshare never unshared!"
+
+ # Set pre-command based on our namespace proc
+ self.base_pre_cmd = ["/usr/bin/nsenter", "-a", "-t", str(self.pid)]
+ if not pid:
+ self.base_pre_cmd.append("-F")
+ self.set_pre_cmd(self.base_pre_cmd + ["--wd=" + self.cwd])
+
+ # Remount /sys to pickup any changes
+ self.cmd_raises("mount -t sysfs sysfs /sys")
+
+ # Set the hostname to the namespace name
+ if uts and set_hostname:
+ # Debugging get the root hostname
+ self.cmd_raises("hostname " + self.name)
+ nroot = subprocess.check_output("hostname")
+ if root_hostname != nroot:
+ result = self.p.poll()
+ assert root_hostname == nroot, "STATE of namespace process {}".format(
+ result
+ )
+
+ if private_mounts:
+ if is_string(private_mounts):
+ private_mounts = [private_mounts]
+ for m in private_mounts:
+ s = m.split(":", 1)
+ if len(s) == 1:
+ self.tmpfs_mount(s[0])
+ else:
+ self.bind_mount(s[0], s[1])
+
+ o = self.cmd_legacy("ls -l /proc/{}/ns".format(self.pid))
+ self.logger.debug("namespaces:\n %s", o)
+
+ # Doing this here messes up all_protocols ipv6 check
+ self.cmd_raises("ip link set lo up")
+
+ def __str__(self):
+ return "LinuxNamespace({})".format(self.name)
+
+ def tmpfs_mount(self, inner):
+ self.cmd_raises("mkdir -p " + inner)
+ self.cmd_raises("mount -n -t tmpfs tmpfs " + inner)
+
+ def bind_mount(self, outer, inner):
+ self.cmd_raises("mkdir -p " + inner)
+ self.cmd_raises("mount --rbind {} {} ".format(outer, inner))
+
+ def add_netns(self, ns):
+ self.logger.debug("Adding network namespace %s", ns)
+
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ if os.path.exists("/run/netns/{}".format(ns)):
+ self.logger.warning("%s: Removing existing nsspace %s", self, ns)
+ try:
+ self.delete_netns(ns)
+ except Exception as ex:
+ self.logger.warning(
+ "%s: Couldn't remove existing nsspace %s: %s",
+ self,
+ ns,
+ str(ex),
+ exc_info=True,
+ )
+ self.cmd_raises([ip_path, "netns", "add", ns])
+
+ def delete_netns(self, ns):
+ self.logger.debug("Deleting network namespace %s", ns)
+
+ ip_path = self.get_exec_path("ip")
+ assert ip_path, "XXX missing ip command!"
+ self.cmd_raises([ip_path, "netns", "delete", ns])
+
+ def set_intf_netns(self, intf, ns, up=False):
+ # In case a user hard-codes 1 thinking it "resets"
+ ns = str(ns)
+ if ns == "1":
+ ns = str(self.pid)
+
+ self.logger.debug("Moving interface %s to namespace %s", intf, ns)
+
+ cmd = "ip link set {} netns " + ns
+ if up:
+ cmd += " up"
+ self.intf_ip_cmd(intf, cmd)
+ if ns == str(self.pid):
+ # If we are returning then remove from dict
+ if intf in self.ifnetns:
+ del self.ifnetns[intf]
+ else:
+ self.ifnetns[intf] = ns
+
+ def reset_intf_netns(self, intf):
+ self.logger.debug("Moving interface %s to default namespace", intf)
+ self.set_intf_netns(intf, str(self.pid))
+
+ def intf_ip_cmd(self, intf, cmd):
+ """Run an ip command for considering an interfaces possible namespace.
+
+ `cmd` - format is run using the interface name on the command
+ """
+ if intf in self.ifnetns:
+ assert cmd.startswith("ip ")
+ cmd = "ip -n " + self.ifnetns[intf] + cmd[2:]
+ self.cmd_raises(cmd.format(intf))
+
+ def set_cwd(self, cwd):
+ # Set pre-command based on our namespace proc
+ self.logger.debug("%s: new CWD %s", self, cwd)
+ self.set_pre_cmd(self.base_pre_cmd + ["--wd=" + cwd])
+
+ def register_interface(self, ifname):
+ if ifname not in self.intfs:
+ self.intfs.append(ifname)
+
+ def delete(self):
+ if self.p and self.p.poll() is None:
+ if sys.version_info[0] >= 3:
+ try:
+ self.p.terminate()
+ self.p.communicate(timeout=10)
+ except subprocess.TimeoutExpired:
+ self.p.kill()
+ self.p.communicate(timeout=2)
+ else:
+ self.p.kill()
+ self.p.communicate()
+ self.set_pre_cmd(["/bin/false"])
+
+
+class SharedNamespace(Commander):
+ """
+ Share another namespace.
+
+ An object that executes commands in an existing pid's linux namespace
+ """
+
+ def __init__(self, name, pid, logger=None):
+ """
+ Share a linux namespace.
+
+ Args:
+ name: Internal name for the namespace.
+ pid: PID of the process to share with.
+ """
+ super(SharedNamespace, self).__init__(name, logger)
+
+ self.logger.debug("%s: Creating", self)
+
+ self.pid = pid
+ self.intfs = []
+
+ # Set pre-command based on our namespace proc
+ self.set_pre_cmd(
+ ["/usr/bin/nsenter", "-a", "-t", str(self.pid), "--wd=" + self.cwd]
+ )
+
+ def __str__(self):
+ return "SharedNamespace({})".format(self.name)
+
+ def set_cwd(self, cwd):
+ # Set pre-command based on our namespace proc
+ self.logger.debug("%s: new CWD %s", self, cwd)
+ self.set_pre_cmd(["/usr/bin/nsenter", "-a", "-t", str(self.pid), "--wd=" + cwd])
+
+ def register_interface(self, ifname):
+ if ifname not in self.intfs:
+ self.intfs.append(ifname)
+
+
+class Bridge(SharedNamespace):
+ """
+ A linux bridge.
+ """
+
+ next_brid_ord = 0
+
+ @classmethod
+ def _get_next_brid(cls):
+ brid_ord = cls.next_brid_ord
+ cls.next_brid_ord += 1
+ return brid_ord
+
+ def __init__(self, name=None, unet=None, logger=None):
+ """Create a linux Bridge."""
+
+ self.unet = unet
+ self.brid_ord = self._get_next_brid()
+ if name:
+ self.brid = name
+ else:
+ self.brid = "br{}".format(self.brid_ord)
+ name = self.brid
+
+ super(Bridge, self).__init__(name, unet.pid, logger)
+
+ self.logger.debug("Bridge: Creating")
+
+ assert len(self.brid) <= 16 # Make sure fits in IFNAMSIZE
+ self.cmd_raises("ip link delete {} || true".format(self.brid))
+ self.cmd_raises("ip link add {} type bridge".format(self.brid))
+ self.cmd_raises("ip link set {} up".format(self.brid))
+
+ self.logger.debug("%s: Created, Running", self)
+
+ def __str__(self):
+ return "Bridge({})".format(self.brid)
+
+ def delete(self):
+ """Stop the bridge (i.e., delete the linux resources)."""
+
+ rc, o, e = self.cmd_status("ip link show {}".format(self.brid), warn=False)
+ if not rc:
+ rc, o, e = self.cmd_status(
+ "ip link delete {}".format(self.brid), warn=False
+ )
+ if rc:
+ self.logger.error(
+ "%s: error deleting bridge %s: %s",
+ self,
+ self.brid,
+ cmd_error(rc, o, e),
+ )
+ else:
+ self.logger.debug("%s: Deleted.", self)
+
+
+class Micronet(LinuxNamespace): # pylint: disable=R0205
+ """
+ Micronet.
+ """
+
+ def __init__(self):
+ """Create a Micronet."""
+
+ self.hosts = {}
+ self.switches = {}
+ self.links = {}
+ self.macs = {}
+ self.rmacs = {}
+
+ super(Micronet, self).__init__("micronet", mount=True, net=True, uts=True)
+
+ self.logger.debug("%s: Creating", self)
+
+ def __str__(self):
+ return "Micronet()"
+
+ def __getitem__(self, key):
+ if key in self.switches:
+ return self.switches[key]
+ return self.hosts[key]
+
+ def add_host(self, name, cls=LinuxNamespace, **kwargs):
+ """Add a host to micronet."""
+
+ self.logger.debug("%s: add_host %s", self, name)
+
+ self.hosts[name] = cls(name, **kwargs)
+ # Create a new mounted FS for tracking nested network namespaces creatd by the
+ # user with `ip netns add`
+ self.hosts[name].tmpfs_mount("/run/netns")
+
+ def add_link(self, name1, name2, if1, if2):
+ """Add a link between switch and host to micronet."""
+ isp2p = False
+ if name1 in self.switches:
+ assert name2 in self.hosts
+ elif name2 in self.switches:
+ assert name1 in self.hosts
+ name1, name2 = name2, name1
+ if1, if2 = if2, if1
+ else:
+ # p2p link
+ assert name1 in self.hosts
+ assert name2 in self.hosts
+ isp2p = True
+
+ lname = "{}:{}-{}:{}".format(name1, if1, name2, if2)
+ self.logger.debug("%s: add_link %s%s", self, lname, " p2p" if isp2p else "")
+ self.links[lname] = (name1, if1, name2, if2)
+
+ # And create the veth now.
+ if isp2p:
+ lhost, rhost = self.hosts[name1], self.hosts[name2]
+ lifname = "i1{:x}".format(lhost.pid)
+ rifname = "i2{:x}".format(rhost.pid)
+ self.cmd_raises(
+ "ip link add {} type veth peer name {}".format(lifname, rifname)
+ )
+
+ self.cmd_raises("ip link set {} netns {}".format(lifname, lhost.pid))
+ lhost.cmd_raises("ip link set {} name {}".format(lifname, if1))
+ lhost.cmd_raises("ip link set {} up".format(if1))
+ lhost.register_interface(if1)
+
+ self.cmd_raises("ip link set {} netns {}".format(rifname, rhost.pid))
+ rhost.cmd_raises("ip link set {} name {}".format(rifname, if2))
+ rhost.cmd_raises("ip link set {} up".format(if2))
+ rhost.register_interface(if2)
+ else:
+ switch = self.switches[name1]
+ host = self.hosts[name2]
+
+ assert len(if1) <= 16 and len(if2) <= 16 # Make sure fits in IFNAMSIZE
+
+ self.logger.debug("%s: Creating veth pair for link %s", self, lname)
+ self.cmd_raises(
+ "ip link add {} type veth peer name {} netns {}".format(
+ if1, if2, host.pid
+ )
+ )
+ self.cmd_raises("ip link set {} netns {}".format(if1, switch.pid))
+ switch.register_interface(if1)
+ host.register_interface(if2)
+ self.cmd_raises("ip link set {} master {}".format(if1, switch.brid))
+ self.cmd_raises("ip link set {} up".format(if1))
+ host.cmd_raises("ip link set {} up".format(if2))
+
+ # Cache the MAC values, and reverse mapping
+ self.get_mac(name1, if1)
+ self.get_mac(name2, if2)
+
+ def add_switch(self, name):
+ """Add a switch to micronet."""
+
+ self.logger.debug("%s: add_switch %s", self, name)
+ self.switches[name] = Bridge(name, self)
+
+ def get_mac(self, name, ifname):
+ if name in self.hosts:
+ dev = self.hosts[name]
+ else:
+ dev = self.switches[name]
+
+ if (name, ifname) not in self.macs:
+ _, output, _ = dev.cmd_status("ip -o link show " + ifname)
+ m = re.match(".*link/(loopback|ether) ([0-9a-fA-F:]+) .*", output)
+ mac = m.group(2)
+ self.macs[(name, ifname)] = mac
+ self.rmacs[mac] = (name, ifname)
+
+ return self.macs[(name, ifname)]
+
+ def delete(self):
+ """Delete the micronet topology."""
+
+ self.logger.debug("%s: Deleting.", self)
+
+ for lname, (_, _, rname, rif) in self.links.items():
+ host = self.hosts[rname]
+
+ self.logger.debug("%s: Deleting veth pair for link %s", self, lname)
+
+ rc, o, e = host.cmd_status("ip link delete {}".format(rif), warn=False)
+ if rc:
+ self.logger.error(
+ "Error deleting veth pair %s: %s", lname, cmd_error(rc, o, e)
+ )
+
+ self.links = {}
+
+ for host in self.hosts.values():
+ try:
+ host.delete()
+ except Exception as error:
+ self.logger.error(
+ "%s: error while deleting host %s: %s", self, host, error
+ )
+
+ self.hosts = {}
+
+ for switch in self.switches.values():
+ try:
+ switch.delete()
+ except Exception as error:
+ self.logger.error(
+ "%s: error while deleting switch %s: %s", self, switch, error
+ )
+ self.switches = {}
+
+ self.logger.debug("%s: Deleted.", self)
+
+ super(Micronet, self).delete()
+
+
+# ---------------------------
+# Root level utility function
+# ---------------------------
+
+
+def get_exec_path(binary):
+ base = Commander("base")
+ return base.get_exec_path(binary)
+
+
+commander = Commander("micronet")
--- /dev/null
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# July 24 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; see the file COPYING; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+import argparse
+import logging
+import os
+import pty
+import re
+import readline
+import select
+import socket
+import subprocess
+import sys
+import tempfile
+import termios
+import tty
+
+
+ENDMARKER = b"\x00END\x00"
+
+
+def lineiter(sock):
+ s = ""
+ while True:
+ sb = sock.recv(256)
+ if not sb:
+ return
+
+ s += sb.decode("utf-8")
+ i = s.find("\n")
+ if i != -1:
+ yield s[:i]
+ s = s[i + 1 :]
+
+
+def spawn(unet, host, cmd):
+ if sys.stdin.isatty():
+ old_tty = termios.tcgetattr(sys.stdin)
+ tty.setraw(sys.stdin.fileno())
+ try:
+ master_fd, slave_fd = pty.openpty()
+
+ # use os.setsid() make it run in a new process group, or bash job
+ # control will not be enabled
+ p = unet.hosts[host].popen(
+ cmd,
+ preexec_fn=os.setsid,
+ stdin=slave_fd,
+ stdout=slave_fd,
+ stderr=slave_fd,
+ universal_newlines=True,
+ )
+
+ while p.poll() is None:
+ r, w, e = select.select([sys.stdin, master_fd], [], [], 0.25)
+ if sys.stdin in r:
+ d = os.read(sys.stdin.fileno(), 10240)
+ os.write(master_fd, d)
+ elif master_fd in r:
+ o = os.read(master_fd, 10240)
+ if o:
+ os.write(sys.stdout.fileno(), o)
+ finally:
+ # restore tty settings back
+ if sys.stdin.isatty():
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
+
+
+def doline(unet, line, writef):
+ def host_cmd_split(unet, cmd):
+ csplit = cmd.split()
+ for i, e in enumerate(csplit):
+ if e not in unet.hosts:
+ break
+ hosts = csplit[:i]
+ if not hosts:
+ hosts = sorted(unet.hosts.keys())
+ cmd = " ".join(csplit[i:])
+ return hosts, cmd
+
+ line = line.strip()
+ m = re.match(r"^(\S+)(?:\s+(.*))?$", line)
+ if not m:
+ return True
+
+ cmd = m.group(1)
+ oargs = m.group(2) if m.group(2) else ""
+ if cmd == "q" or cmd == "quit":
+ return False
+ if cmd == "hosts":
+ writef("%% hosts: %s\n" % " ".join(sorted(unet.hosts.keys())))
+ elif cmd in ["term", "vtysh", "xterm"]:
+ args = oargs.split()
+ if not args or (len(args) == 1 and args[0] == "*"):
+ args = sorted(unet.hosts.keys())
+ hosts = [unet.hosts[x] for x in args]
+ for host in hosts:
+ if cmd == "t" or cmd == "term":
+ host.run_in_window("bash")
+ elif cmd == "v" or cmd == "vtysh":
+ host.run_in_window("vtysh")
+ elif cmd == "x" or cmd == "xterm":
+ host.run_in_window("bash", forcex=True)
+ elif cmd == "sh":
+ hosts, cmd = host_cmd_split(unet, oargs)
+ for host in hosts:
+ if sys.stdin.isatty():
+ spawn(unet, host, cmd)
+ else:
+ if len(hosts) > 1:
+ writef("------ Host: %s ------\n" % host)
+ output = unet.hosts[host].cmd_legacy(cmd)
+ writef(output)
+ if len(hosts) > 1:
+ writef("------- End: %s ------\n" % host)
+ writef("\n")
+ elif cmd == "h" or cmd == "help":
+ writef(
+ """
+Commands:
+ help :: this help
+ sh [hosts] <shell-command> :: execute <shell-command> on <host>
+ term [hosts] :: open shell terminals for hosts
+ vtysh [hosts] :: open vtysh terminals for hosts
+ [hosts] <vtysh-command> :: execute vtysh-command on hosts\n\n"""
+ )
+ else:
+ hosts, cmd = host_cmd_split(unet, line)
+ for host in hosts:
+ if len(hosts) > 1:
+ writef("------ Host: %s ------\n" % host)
+ output = unet.hosts[host].cmd_legacy('vtysh -c "{}"'.format(cmd))
+ writef(output)
+ if len(hosts) > 1:
+ writef("------- End: %s ------\n" % host)
+ writef("\n")
+ return True
+
+
+def cli_server_setup(unet):
+ sockdir = tempfile.mkdtemp("-sockdir", "pyt")
+ sockpath = os.path.join(sockdir, "cli-server.sock")
+ try:
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(10)
+ sock.bind(sockpath)
+ sock.listen(1)
+ return sock, sockdir, sockpath
+ except Exception:
+ unet.cmd_status("rm -rf " + sockdir)
+ raise
+
+
+def cli_server(unet, server_sock):
+ sock, addr = server_sock.accept()
+
+ # Go into full non-blocking mode now
+ sock.settimeout(None)
+
+ for line in lineiter(sock):
+ line = line.strip()
+
+ def writef(x):
+ xb = x.encode("utf-8")
+ sock.send(xb)
+
+ if not doline(unet, line, writef):
+ return
+ sock.send(ENDMARKER)
+
+
+def cli_client(sockpath, prompt="unet> "):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(10)
+ sock.connect(sockpath)
+
+ # Go into full non-blocking mode now
+ sock.settimeout(None)
+
+ print("\n--- Micronet CLI Starting ---\n\n")
+ while True:
+ if sys.version_info[0] == 2:
+ line = raw_input(prompt) # pylint: disable=E0602
+ else:
+ line = input(prompt)
+ if line is None:
+ return
+
+ # Need to put \n back
+ line += "\n"
+
+ # Send the CLI command
+ sock.send(line.encode("utf-8"))
+
+ def bendswith(b, sentinel):
+ slen = len(sentinel)
+ return len(b) >= slen and b[-slen:] == sentinel
+
+ # Collect the output
+ rb = b""
+ while not bendswith(rb, ENDMARKER):
+ lb = sock.recv(4096)
+ if not lb:
+ return
+ rb += lb
+
+ # Remove the marker
+ rb = rb[: -len(ENDMARKER)]
+
+ # Write the output
+ sys.stdout.write(rb.decode("utf-8"))
+
+
+def local_cli(unet, outf, prompt="unet> "):
+ print("\n--- Micronet CLI Starting ---\n\n")
+ while True:
+ if sys.version_info[0] == 2:
+ line = raw_input(prompt) # pylint: disable=E0602
+ else:
+ line = input(prompt)
+ if line is None:
+ return
+ if not doline(unet, line, outf.write):
+ return
+
+
+def cli(
+ unet,
+ histfile=None,
+ sockpath=None,
+ force_window=False,
+ title=None,
+ prompt=None,
+ background=True,
+):
+ if prompt is None:
+ prompt = "unet> "
+
+ if force_window or not sys.stdin.isatty():
+ # Run CLI in another window b/c we have no tty.
+ sock, sockdir, sockpath = cli_server_setup(unet)
+
+ python_path = unet.get_exec_path(["python3", "python"])
+ us = os.path.realpath(__file__)
+ cmd = "{} {}".format(python_path, us)
+ if histfile:
+ cmd += " --histfile=" + histfile
+ if title:
+ cmd += " --prompt={}".format(title)
+ cmd += " " + sockpath
+
+ try:
+ unet.run_in_window(cmd, new_window=True, title=title, background=background)
+ return cli_server(unet, sock)
+ finally:
+ unet.cmd_status("rm -rf " + sockdir)
+
+ if not unet:
+ logger.debug("client-cli using sockpath %s", sockpath)
+
+ try:
+ if histfile is None:
+ histfile = os.path.expanduser("~/.micronet-history.txt")
+ if not os.path.exists(histfile):
+ if unet:
+ unet.cmd("touch " + histfile)
+ else:
+ subprocess.run("touch " + histfile)
+ if histfile:
+ readline.read_history_file(histfile)
+ except Exception:
+ pass
+
+ try:
+ if sockpath:
+ cli_client(sockpath, prompt=prompt)
+ else:
+ local_cli(unet, sys.stdout, prompt=prompt)
+ except EOFError:
+ pass
+ except Exception as ex:
+ logger.critical("cli: got exception: %s", ex, exc_info=True)
+ raise
+ finally:
+ readline.write_history_file(histfile)
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG, filename="/tmp/topotests/cli-client.log")
+ logger = logging.getLogger("cli-client")
+ logger.info("Start logging cli-client")
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--histfile", help="file to user for history")
+ parser.add_argument("--prompt-text", help="prompt string to use")
+ parser.add_argument("socket", help="path to pair of sockets to communicate over")
+ args = parser.parse_args()
+
+ prompt = "{}> ".format(args.prompt_text) if args.prompt_text else "unet> "
+ cli(None, args.histfile, args.socket, prompt=prompt)
--- /dev/null
+# -*- coding: utf-8 eval: (blacken-mode 1) -*-
+#
+# July 11 2021, Christian Hopps <chopps@labn.net>
+#
+# Copyright (c) 2021, LabN Consulting, L.L.C
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; see the file COPYING; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+
+import glob
+import logging
+import os
+import signal
+import time
+
+from lib.micronet import LinuxNamespace, Micronet
+from lib.micronet_cli import cli
+
+
+def get_pids_with_env(has_var, has_val=None):
+ result = {}
+ for pidenv in glob.iglob("/proc/*/environ"):
+ pid = pidenv.split("/")[2]
+ try:
+ with open(pidenv, "rb") as rfb:
+ envlist = [
+ x.decode("utf-8").split("=", 1) for x in rfb.read().split(b"\0")
+ ]
+ envlist = [[x[0], ""] if len(x) == 1 else x for x in envlist]
+ envdict = dict(envlist)
+ if has_var not in envdict:
+ continue
+ if has_val is None:
+ result[pid] = envdict
+ elif envdict[has_var] == str(has_val):
+ result[pid] = envdict
+ except Exception:
+ # E.g., process exited and files are gone
+ pass
+ return result
+
+
+def _kill_piddict(pids_by_upid, sig):
+ for upid, pids in pids_by_upid:
+ logging.info(
+ "Sending %s to (%s) of micronet pid %s", sig, ", ".join(pids), upid
+ )
+ for pid in pids:
+ try:
+ os.kill(int(pid), sig)
+ except Exception:
+ pass
+
+
+def _get_our_pids():
+ ourpid = str(os.getpid())
+ piddict = get_pids_with_env("MICRONET_PID", ourpid)
+ pids = [x for x in piddict if x != ourpid]
+ if pids:
+ return {ourpid: pids}
+ return {}
+
+
+def _get_other_pids():
+ piddict = get_pids_with_env("MICRONET_PID")
+ unet_pids = {d["MICRONET_PID"] for d in piddict.values()}
+ pids_by_upid = {p: set() for p in unet_pids}
+ for pid, envdict in piddict.items():
+ pids_by_upid[envdict["MICRONET_PID"]].add(pid)
+ # Filter out any child pid sets whos micronet pid is still running
+ return {x: y for x, y in pids_by_upid.items() if x not in y}
+
+
+def _get_pids_by_upid(ours):
+ if ours:
+ return _get_our_pids()
+ return _get_other_pids()
+
+
+def _cleanup_pids(ours):
+ pids_by_upid = _get_pids_by_upid(ours).items()
+ if not pids_by_upid:
+ return
+
+ _kill_piddict(pids_by_upid, signal.SIGTERM)
+
+ # Give them 5 second to exit cleanly
+ logging.info("Waiting up to 5s to allow for clean exit of abandon'd pids")
+ for _ in range(0, 5):
+ pids_by_upid = _get_pids_by_upid(ours).items()
+ if not pids_by_upid:
+ return
+ time.sleep(1)
+
+ pids_by_upid = _get_pids_by_upid(ours).items()
+ _kill_piddict(pids_by_upid, signal.SIGKILL)
+
+
+def cleanup_current():
+ """Attempt to cleanup preview runs.
+
+ Currently this only scans for old processes.
+ """
+ logging.info("reaping current micronet processes")
+ _cleanup_pids(True)
+
+
+def cleanup_previous():
+ """Attempt to cleanup preview runs.
+
+ Currently this only scans for old processes.
+ """
+ logging.info("reaping past micronet processes")
+ _cleanup_pids(False)
+
+
+class Node(LinuxNamespace):
+ """Node (mininet compat)."""
+
+ def __init__(self, name, **kwargs):
+ """
+ Create a Node.
+ """
+ self.params = kwargs
+
+ if "private_mounts" in kwargs:
+ private_mounts = kwargs["private_mounts"]
+ else:
+ private_mounts = kwargs.get("privateDirs", [])
+
+ logger = kwargs.get("logger")
+
+ super(Node, self).__init__(name, logger=logger, private_mounts=private_mounts)
+
+ def cmd(self, cmd, **kwargs):
+ """Execute a command, joins stdout, stderr, ignores exit status."""
+
+ return super(Node, self).cmd_legacy(cmd, **kwargs)
+
+ def config(self, lo="up", **params):
+ """Called by Micronet when topology is built (but not started)."""
+ # mininet brings up loopback here.
+ del params
+ del lo
+
+ def intfNames(self):
+ return self.intfs
+
+ def terminate(self):
+ return
+
+
+class Topo(object): # pylint: disable=R0205
+ def __init__(self, *args, **kwargs):
+ raise Exception("Remove Me")
+
+
+class Mininet(Micronet):
+ """
+ Mininet using Micronet.
+ """
+
+ g_mnet_inst = None
+
+ def __init__(self, controller=None):
+ """
+ Create a Micronet.
+ """
+ assert not controller
+
+ if Mininet.g_mnet_inst is not None:
+ Mininet.g_mnet_inst.stop()
+ Mininet.g_mnet_inst = self
+
+ self.configured_hosts = set()
+ self.host_params = {}
+ self.prefix_len = 8
+
+ # SNMPd used to require this, which was set int he mininet shell
+ # that all commands executed from. This is goofy default so let's not
+ # do it if we don't have to. The snmpd.conf files have been updated
+ # to set permissions to root:frr 770 to make this unneeded in that case
+ # os.umask(0)
+
+ super(Mininet, self).__init__()
+
+ self.logger.debug("%s: Creating", self)
+
+ def __str__(self):
+ return "Mininet()"
+
+ def configure_hosts(self):
+ """
+ Configure hosts once the topology has been built.
+
+ This function can be called multiple times if routers are added to the topology
+ later.
+ """
+ if not self.hosts:
+ return
+
+ self.logger.debug("Configuring hosts: %s", self.hosts.keys())
+
+ for name in sorted(self.hosts.keys()):
+ if name in self.configured_hosts:
+ continue
+
+ host = self.hosts[name]
+ first_intf = host.intfs[0] if host.intfs else None
+ params = self.host_params[name]
+
+ if first_intf and "ip" in params:
+ ip = params["ip"]
+ i = ip.find("/")
+ if i == -1:
+ plen = self.prefix_len
+ else:
+ plen = int(ip[i + 1 :])
+ ip = ip[:i]
+
+ host.cmd_raises("ip addr add {}/{} dev {}".format(ip, plen, first_intf))
+
+ if "defaultRoute" in params:
+ host.cmd_raises(
+ "ip route add default {}".format(params["defaultRoute"])
+ )
+
+ host.config()
+
+ self.configured_hosts.add(name)
+
+ def add_host(self, name, cls=Node, **kwargs):
+ """Add a host to micronet."""
+
+ self.host_params[name] = kwargs
+ super(Mininet, self).add_host(name, cls=cls, **kwargs)
+
+ def start(self):
+ """Start the micronet topology."""
+ self.logger.debug("%s: Starting (no-op).", self)
+
+ def stop(self):
+ """Stop the mininet topology (deletes)."""
+ self.logger.debug("%s: Stopping (deleting).", self)
+
+ self.delete()
+
+ self.logger.debug("%s: Stopped (deleted).", self)
+
+ if Mininet.g_mnet_inst == self:
+ Mininet.g_mnet_inst = None
+
+ def cli(self):
+ cli(self)
# OF THIS SOFTWARE.
#
-import traceback
-import ipaddr
import ipaddress
import sys
-
from copy import deepcopy
-from time import sleep
-from lib.topolog import logger
-from lib.topotest import frr_unicode
-from ipaddress import IPv6Address
# Import common_config to use commomnly used APIs
from lib.common_config import (
- create_common_configuration,
+ create_common_configurations,
InvalidCLIError,
- retry,
generate_ips,
- check_address_types,
- validate_ip_address,
+ retry,
run_frr_cmd,
+ validate_ip_address,
)
-
-LOGDIR = "/tmp/topotests/"
-TMPDIR = None
+from lib.topolog import logger
+from lib.topotest import frr_unicode
################################
# Configure procs
################################
-def create_router_ospf(tgen, topo, input_dict=None, build=False, load_config=True):
+def create_router_ospf(tgen, topo=None, input_dict=None, build=False, load_config=True):
"""
API to configure ospf on router.
logger.debug("Entering lib API: create_router_ospf()")
result = False
+ if topo is None:
+ topo = tgen.json_topo
+
if not input_dict:
input_dict = deepcopy(topo)
else:
topo = topo["routers"]
input_dict = deepcopy(input_dict)
- for router in input_dict.keys():
- if "ospf" not in input_dict[router]:
- logger.debug("Router %s: 'ospf' not present in input_dict", router)
- continue
-
- result = __create_ospf_global(tgen, input_dict, router, build, load_config)
- if result is True:
- ospf_data = input_dict[router]["ospf"]
+ for ospf in ["ospf", "ospf6"]:
+ config_data_dict = {}
- for router in input_dict.keys():
- if "ospf6" not in input_dict[router]:
- logger.debug("Router %s: 'ospf6' not present in input_dict", router)
- continue
+ for router in input_dict.keys():
+ if ospf not in input_dict[router]:
+ logger.debug("Router %s: %s not present in input_dict", router, ospf)
+ continue
- result = __create_ospf_global(
- tgen, input_dict, router, build, load_config, ospf="ospf6"
- )
- if result is True:
- ospf_data = input_dict[router]["ospf6"]
+ config_data = __create_ospf_global(
+ tgen, input_dict, router, build, load_config, ospf
+ )
+ if config_data:
+ if router not in config_data_dict:
+ config_data_dict[router] = config_data
+ else:
+ config_data_dict[router].extend(config_data)
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, ospf, build, load_config
+ )
+ except InvalidCLIError:
+ logger.error("create_router_ospf (ipv4)", exc_info=True)
+ result = False
logger.debug("Exiting lib API: create_router_ospf()")
return result
-def __create_ospf_global(
- tgen, input_dict, router, build=False, load_config=True, ospf="ospf"
-):
+def __create_ospf_global(tgen, input_dict, router, build, load_config, ospf):
"""
Helper API to create ospf global configuration.
"links": {
"r3": {
"ipv6": "2013:13::1/64",
- "ospf6": {
+ "ospf6": {
"hello_interval": 1,
"dead_interval": 4,
"network": "point-to-point"
}
- }
+ }
},
"ospf6": {
"router_id": "1.1.1.1",
Returns
-------
- True or False
+ list of configuration commands
"""
- result = False
- logger.debug("Entering lib API: __create_ospf_global()")
- try:
+ config_data = []
- ospf_data = input_dict[router][ospf]
- del_ospf_action = ospf_data.setdefault("delete", False)
- if del_ospf_action:
- config_data = ["no router {}".format(ospf)]
- result = create_common_configuration(
- tgen, router, config_data, ospf, build, load_config
- )
- return result
+ if ospf not in input_dict[router]:
+ return config_data
- config_data = []
- cmd = "router {}".format(ospf)
+ logger.debug("Entering lib API: __create_ospf_global()")
+ ospf_data = input_dict[router][ospf]
+ del_ospf_action = ospf_data.setdefault("delete", False)
+ if del_ospf_action:
+ config_data = ["no router {}".format(ospf)]
+ return config_data
+
+ cmd = "router {}".format(ospf)
+
+ config_data.append(cmd)
+
+ # router id
+ router_id = ospf_data.setdefault("router_id", None)
+ del_router_id = ospf_data.setdefault("del_router_id", False)
+ if del_router_id:
+ config_data.append("no {} router-id".format(ospf))
+ if router_id:
+ config_data.append("{} router-id {}".format(ospf, router_id))
+
+ # log-adjacency-changes
+ log_adj_changes = ospf_data.setdefault("log_adj_changes", None)
+ del_log_adj_changes = ospf_data.setdefault("del_log_adj_changes", False)
+ if del_log_adj_changes:
+ config_data.append("no log-adjacency-changes detail")
+ if log_adj_changes:
+ config_data.append("log-adjacency-changes {}".format(log_adj_changes))
+
+ # aggregation timer
+ aggr_timer = ospf_data.setdefault("aggr_timer", None)
+ del_aggr_timer = ospf_data.setdefault("del_aggr_timer", False)
+ if del_aggr_timer:
+ config_data.append("no aggregation timer")
+ if aggr_timer:
+ config_data.append("aggregation timer {}".format(aggr_timer))
+
+ # maximum path information
+ ecmp_data = ospf_data.setdefault("maximum-paths", {})
+ if ecmp_data:
+ cmd = "maximum-paths {}".format(ecmp_data)
+ del_action = ospf_data.setdefault("del_max_path", False)
+ if del_action:
+ cmd = "no maximum-paths"
config_data.append(cmd)
- # router id
- router_id = ospf_data.setdefault("router_id", None)
- del_router_id = ospf_data.setdefault("del_router_id", False)
- if del_router_id:
- config_data.append("no {} router-id".format(ospf))
- if router_id:
- config_data.append("{} router-id {}".format(ospf, router_id))
-
- # log-adjacency-changes
- log_adj_changes = ospf_data.setdefault("log_adj_changes", None)
- del_log_adj_changes = ospf_data.setdefault("del_log_adj_changes", False)
- if del_log_adj_changes:
- config_data.append("no log-adjacency-changes detail")
- if log_adj_changes:
- config_data.append("log-adjacency-changes {}".format(log_adj_changes))
-
- # aggregation timer
- aggr_timer = ospf_data.setdefault("aggr_timer", None)
- del_aggr_timer = ospf_data.setdefault("del_aggr_timer", False)
- if del_aggr_timer:
- config_data.append("no aggregation timer")
- if aggr_timer:
- config_data.append("aggregation timer {}".format(aggr_timer))
-
- # maximum path information
- ecmp_data = ospf_data.setdefault("maximum-paths", {})
- if ecmp_data:
- cmd = "maximum-paths {}".format(ecmp_data)
- del_action = ospf_data.setdefault("del_max_path", False)
- if del_action:
- cmd = "no maximum-paths"
- config_data.append(cmd)
+ # redistribute command
+ redistribute_data = ospf_data.setdefault("redistribute", {})
+ if redistribute_data:
+ for redistribute in redistribute_data:
+ if "redist_type" not in redistribute:
+ logger.debug(
+ "Router %s: 'redist_type' not present in " "input_dict", router
+ )
+ else:
+ cmd = "redistribute {}".format(redistribute["redist_type"])
+ for red_type in redistribute_data:
+ if "route_map" in red_type:
+ cmd = cmd + " route-map {}".format(red_type["route_map"])
+ del_action = redistribute.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- # redistribute command
- redistribute_data = ospf_data.setdefault("redistribute", {})
- if redistribute_data:
- for redistribute in redistribute_data:
- if "redist_type" not in redistribute:
- logger.debug(
- "Router %s: 'redist_type' not present in " "input_dict", router
- )
- else:
- cmd = "redistribute {}".format(redistribute["redist_type"])
- for red_type in redistribute_data:
- if "route_map" in red_type:
- cmd = cmd + " route-map {}".format(red_type["route_map"])
- del_action = redistribute.setdefault("delete", False)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ # area information
+ area_data = ospf_data.setdefault("area", {})
+ if area_data:
+ for area in area_data:
+ if "id" not in area:
+ logger.debug(
+ "Router %s: 'area id' not present in " "input_dict", router
+ )
+ else:
+ cmd = "area {}".format(area["id"])
- # area information
- area_data = ospf_data.setdefault("area", {})
- if area_data:
- for area in area_data:
- if "id" not in area:
- logger.debug(
- "Router %s: 'area id' not present in " "input_dict", router
- )
- else:
- cmd = "area {}".format(area["id"])
+ if "type" in area:
+ cmd = cmd + " {}".format(area["type"])
- if "type" in area:
- cmd = cmd + " {}".format(area["type"])
+ del_action = area.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- del_action = area.setdefault("delete", False)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ # def route information
+ def_rte_data = ospf_data.setdefault("default-information", {})
+ if def_rte_data:
+ if "originate" not in def_rte_data:
+ logger.debug(
+ "Router %s: 'originate key' not present in " "input_dict", router
+ )
+ else:
+ cmd = "default-information originate"
- # def route information
- def_rte_data = ospf_data.setdefault("default-information", {})
- if def_rte_data:
- if "originate" not in def_rte_data:
- logger.debug(
- "Router %s: 'originate key' not present in " "input_dict", router
- )
- else:
- cmd = "default-information originate"
+ if "always" in def_rte_data:
+ cmd = cmd + " always"
- if "always" in def_rte_data:
- cmd = cmd + " always"
+ if "metric" in def_rte_data:
+ cmd = cmd + " metric {}".format(def_rte_data["metric"])
- if "metric" in def_rte_data:
- cmd = cmd + " metric {}".format(def_rte_data["metric"])
+ if "metric-type" in def_rte_data:
+ cmd = cmd + " metric-type {}".format(def_rte_data["metric-type"])
- if "metric-type" in def_rte_data:
- cmd = cmd + " metric-type {}".format(def_rte_data["metric-type"])
+ if "route-map" in def_rte_data:
+ cmd = cmd + " route-map {}".format(def_rte_data["route-map"])
- if "route-map" in def_rte_data:
- cmd = cmd + " route-map {}".format(def_rte_data["route-map"])
+ del_action = def_rte_data.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- del_action = def_rte_data.setdefault("delete", False)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ # area interface information for ospf6d only
+ if ospf == "ospf6":
+ area_iface = ospf_data.setdefault("neighbors", {})
+ if area_iface:
+ for neighbor in area_iface:
+ if "area" in area_iface[neighbor]:
+ iface = input_dict[router]["links"][neighbor]["interface"]
+ cmd = "interface {} area {}".format(
+ iface, area_iface[neighbor]["area"]
+ )
+ if area_iface[neighbor].setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- # area interface information for ospf6d only
- if ospf == "ospf6":
- area_iface = ospf_data.setdefault("neighbors", {})
- if area_iface:
- for neighbor in area_iface:
- if "area" in area_iface[neighbor]:
+ try:
+ if "area" in input_dict[router]["links"][neighbor]["ospf6"]:
iface = input_dict[router]["links"][neighbor]["interface"]
cmd = "interface {} area {}".format(
- iface, area_iface[neighbor]["area"]
+ iface,
+ input_dict[router]["links"][neighbor]["ospf6"]["area"],
)
- if area_iface[neighbor].setdefault("delete", False):
+ if input_dict[router]["links"][neighbor].setdefault(
+ "delete", False
+ ):
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ except KeyError:
+ pass
- try:
- if "area" in input_dict[router]["links"][neighbor]["ospf6"]:
- iface = input_dict[router]["links"][neighbor]["interface"]
- cmd = "interface {} area {}".format(
- iface,
- input_dict[router]["links"][neighbor]["ospf6"]["area"],
- )
- if input_dict[router]["links"][neighbor].setdefault(
- "delete", False
- ):
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
- except KeyError:
- pass
+ # summary information
+ summary_data = ospf_data.setdefault("summary-address", {})
+ if summary_data:
+ for summary in summary_data:
+ if "prefix" not in summary:
+ logger.debug(
+ "Router %s: 'summary-address' not present in " "input_dict",
+ router,
+ )
+ else:
+ cmd = "summary {}/{}".format(summary["prefix"], summary["mask"])
- # summary information
- summary_data = ospf_data.setdefault("summary-address", {})
- if summary_data:
- for summary in summary_data:
- if "prefix" not in summary:
- logger.debug(
- "Router %s: 'summary-address' not present in " "input_dict",
- router,
- )
- else:
- cmd = "summary {}/{}".format(summary["prefix"], summary["mask"])
+ _tag = summary.setdefault("tag", None)
+ if _tag:
+ cmd = "{} tag {}".format(cmd, _tag)
- _tag = summary.setdefault("tag", None)
- if _tag:
- cmd = "{} tag {}".format(cmd, _tag)
+ _advertise = summary.setdefault("advertise", True)
+ if not _advertise:
+ cmd = "{} no-advertise".format(cmd)
- _advertise = summary.setdefault("advertise", True)
- if not _advertise:
- cmd = "{} no-advertise".format(cmd)
+ del_action = summary.setdefault("delete", False)
+ if del_action:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- del_action = summary.setdefault("delete", False)
- if del_action:
- cmd = "no {}".format(cmd)
- config_data.append(cmd)
+ # ospf gr information
+ gr_data = ospf_data.setdefault("graceful-restart", {})
+ if gr_data:
- result = create_common_configuration(
- tgen, router, config_data, ospf, build, load_config
- )
+ if "opaque" in gr_data and gr_data["opaque"]:
+ cmd = "capability opaque"
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
+ if "helper-only" in gr_data and not gr_data["helper-only"]:
+ cmd = "graceful-restart helper-only"
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+ elif "helper-only" in gr_data and type(gr_data["helper-only"]) is list:
+ for rtrs in gr_data["helper-only"]:
+ cmd = "graceful-restart helper-only {}".format(rtrs)
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "helper" in gr_data:
+ if type(gr_data["helper"]) is not list:
+ gr_data["helper"] = list(gr_data["helper"])
+ for helper_role in gr_data["helper"]:
+ cmd = "graceful-restart helper {}".format(helper_role)
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
+ if "supported-grace-time" in gr_data:
+ cmd = "graceful-restart helper supported-grace-time {}".format(
+ gr_data["supported-grace-time"]
+ )
+ if gr_data.setdefault("delete", False):
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
logger.debug("Exiting lib API: create_ospf_global()")
- return result
+
+ return config_data
-def create_router_ospf6(tgen, topo, input_dict=None, build=False, load_config=True):
+def create_router_ospf6(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
"""
API to configure ospf on router
logger.debug("Entering lib API: create_router_ospf6()")
result = False
+ if topo is None:
+ topo = tgen.json_topo
+
if not input_dict:
input_dict = deepcopy(topo)
else:
topo = topo["routers"]
input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
for router in input_dict.keys():
if "ospf6" not in input_dict[router]:
logger.debug("Router %s: 'ospf6' not present in input_dict", router)
continue
- result = __create_ospf_global(
+ config_data = __create_ospf_global(
tgen, input_dict, router, build, load_config, "ospf6"
)
+ if config_data:
+ config_data_dict[router] = config_data
+
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, "ospf6", build, load_config
+ )
+ except InvalidCLIError:
+ logger.error("create_router_ospf6", exc_info=True)
+ result = False
logger.debug("Exiting lib API: create_router_ospf6()")
return result
-def config_ospf_interface(tgen, topo, input_dict=None, build=False, load_config=True):
+def config_ospf_interface(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
"""
API to configure ospf on router.
"""
logger.debug("Enter lib config_ospf_interface")
result = False
+
+ if topo is None:
+ topo = tgen.json_topo
+
if not input_dict:
input_dict = deepcopy(topo)
else:
input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
for router in input_dict.keys():
config_data = []
for lnk in input_dict[router]["links"].keys():
if build:
return config_data
- else:
- result = create_common_configuration(
- tgen, router, config_data, "interface_config", build=build
- )
+
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "interface_config", build=build
+ )
+
logger.debug("Exiting lib API: config_ospf_interface()")
return result
# Verification procs
################################
@retry(retry_timeout=80)
-def verify_ospf_neighbor(tgen, topo, dut=None, input_dict=None, lan=False, expected=True):
+def verify_ospf_neighbor(
+ tgen, topo=None, dut=None, input_dict=None, lan=False, expected=True
+):
"""
This API is to verify ospf neighborship by running
show ip ospf neighbour command,
"""
logger.debug("Entering lib API: verify_ospf_neighbor()")
result = False
+ if topo is None:
+ topo = tgen.json_topo
+
if input_dict:
for router, rnode in tgen.routers().items():
if "ospf" not in topo["routers"][router]:
# Verification procs
################################
@retry(retry_timeout=50)
-def verify_ospf6_neighbor(tgen, topo, dut=None, input_dict=None, lan=False):
+def verify_ospf6_neighbor(tgen, topo=None, dut=None, input_dict=None, lan=False):
"""
This API is to verify ospf neighborship by running
show ipv6 ospf neighbour command,
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
+ if topo is None:
+ topo = tgen.json_topo
+
if input_dict:
for router, rnode in tgen.routers().items():
if "ospf6" not in topo["routers"][router]:
nh_found = False
for st_rt in ip_list:
- st_rt = str(ipaddr.IPNetwork(frr_unicode(st_rt)))
+ st_rt = str(ipaddress.ip_network(frr_unicode(st_rt)))
_addr_type = validate_ip_address(st_rt)
if _addr_type != "ipv4":
@retry(retry_timeout=20)
-def verify_ospf_interface(tgen, topo, dut=None, lan=False, input_dict=None, expected=True):
+def verify_ospf_interface(
+ tgen, topo=None, dut=None, lan=False, input_dict=None, expected=True
+):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
logger.debug("Entering lib API: verify_ospf_interface()")
result = False
+ if topo is None:
+ topo = tgen.json_topo
+
for router, rnode in tgen.routers().items():
if "ospf" not in topo["routers"][router]:
continue
rnode = tgen.routers()[dut]
if ospf:
- if 'ospf6' not in topo['routers'][dut]:
- errormsg = "[DUT: {}] OSPF6 is not configured on the router.".format(
- router)
+ if "ospf6" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF6 is not configured on the router.".format(router)
return errormsg
- show_ospf_json = run_frr_cmd(rnode, "show ipv6 ospf summary detail json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ipv6 ospf summary detail json", isjson=True
+ )
else:
- if 'ospf' not in topo['routers'][dut]:
- errormsg = "[DUT: {}] OSPF is not configured on the router.".format(
- router)
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(router)
return errormsg
- show_ospf_json = run_frr_cmd(rnode, "show ip ospf summary detail json",
- isjson=True)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf summary detail json", isjson=True
+ )
# Verifying output dictionary show_ospf_json is empty or not
if not bool(show_ospf_json):
ospf_summary_data = input_dict
if ospf:
- show_ospf_json = show_ospf_json['default']
+ show_ospf_json = show_ospf_json["default"]
for ospf_summ, summ_data in ospf_summary_data.items():
if ospf_summ not in show_ospf_json:
continue
- summary = ospf_summary_data[ospf_summ]['Summary address']
+ summary = ospf_summary_data[ospf_summ]["Summary address"]
if summary in show_ospf_json:
for summ in summ_data:
if summ_data[summ] == show_ospf_json[summary][summ]:
- logger.info("[DUT: %s] OSPF summary %s:%s is %s",
- router, summary, summ, summ_data[summ])
+ logger.info(
+ "[DUT: %s] OSPF summary %s:%s is %s",
+ router,
+ summary,
+ summ,
+ summ_data[summ],
+ )
result = True
else:
- errormsg = ("[DUT: {}] OSPF summary {} : {} is {}, "
- "Expected is {}".format(router, summary, summ,show_ospf_json[
- summary][summ], summ_data[summ] ))
+ errormsg = (
+ "[DUT: {}] OSPF summary {} : {} is {}, "
+ "Expected is {}".format(
+ router,
+ summary,
+ summ,
+ show_ospf_json[summary][summ],
+ summ_data[summ],
+ )
+ )
return errormsg
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
@retry(retry_timeout=30)
-def verify_ospf6_rib(tgen, dut, input_dict, next_hop=None,
- tag=None, metric=None, fib=None):
+def verify_ospf6_rib(
+ tgen, dut, input_dict, next_hop=None, tag=None, metric=None, fib=None
+):
"""
This API is to verify ospf routes by running
show ip ospf route command.
additional_nexthops_in_required_nhs = []
found_hops = []
for routerInput in input_dict.keys():
- for router, rnode in router_list.iteritems():
+ for router, rnode in router_list.items():
if router != dut:
continue
@retry(retry_timeout=6)
-def verify_ospf6_interface(tgen, topo, dut=None,lan=False, input_dict=None):
+def verify_ospf6_interface(tgen, topo=None, dut=None, lan=False, input_dict=None):
"""
This API is to verify ospf routes by running
show ip ospf interface command.
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
- for router, rnode in tgen.routers().iteritems():
+ if topo is None:
+ topo = tgen.json_topo
+
+ for router, rnode in tgen.routers().items():
if "ospf6" not in topo["routers"][router]:
continue
return result
-def config_ospf6_interface(tgen, topo, input_dict=None, build=False, load_config=True):
+def config_ospf6_interface(
+ tgen, topo=None, input_dict=None, build=False, load_config=True
+):
"""
API to configure ospf on router.
"""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
result = False
+ if topo is None:
+ topo = tgen.json_topo
+
if not input_dict:
input_dict = deepcopy(topo)
else:
input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
for router in input_dict.keys():
config_data = []
- for lnk in input_dict[router]['links'].keys():
- if "ospf6" not in input_dict[router]['links'][lnk]:
- logger.debug("Router %s: ospf6 config is not present in"
- "input_dict, passed input_dict %s", router,
- str(input_dict))
+ for lnk in input_dict[router]["links"].keys():
+ if "ospf6" not in input_dict[router]["links"][lnk]:
+ logger.debug(
+ "Router %s: ospf6 config is not present in"
+ "input_dict, passed input_dict %s",
+ router,
+ str(input_dict),
+ )
continue
ospf_data = input_dict[router]["links"][lnk]["ospf6"]
data_ospf_area = ospf_data.setdefault("area", None)
if build:
return config_data
+
+ if config_data:
+ config_data_dict[router] = config_data
+
+ result = create_common_configurations(
+ tgen, config_data_dict, "interface_config", build=build
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return result
+
+
+@retry(retry_timeout=20)
+def verify_ospf_gr_helper(tgen, topo, dut, input_dict=None):
+ """
+ This API is used to vreify gr helper using command
+ show ip ospf graceful-restart helper
+
+ Parameters
+ ----------
+ * `tgen` : Topogen object
+ * `topo` : topology descriptions
+ * 'dut' : router
+ * 'input_dict' - values to be verified
+
+ Usage:
+ -------
+ input_dict = {
+ "helperSupport":"Disabled",
+ "strictLsaCheck":"Enabled",
+ "restartSupoort":"Planned and Unplanned Restarts",
+ "supportedGracePeriod":1800
+ }
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+
+ """
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+ result = False
+
+ if "ospf" not in topo["routers"][dut]:
+ errormsg = "[DUT: {}] OSPF is not configured on the router.".format(dut)
+ return errormsg
+
+ rnode = tgen.routers()[dut]
+ logger.info("Verifying OSPF GR details on router %s:", dut)
+ show_ospf_json = run_frr_cmd(
+ rnode, "show ip ospf graceful-restart helper json", isjson=True
+ )
+
+ # Verifying output dictionary show_ospf_json is empty or not
+ if not bool(show_ospf_json):
+ errormsg = "OSPF is not running"
+ raise ValueError(errormsg)
+ return errormsg
+
+ for ospf_gr, gr_data in input_dict.items():
+ try:
+ if input_dict[ospf_gr] == show_ospf_json[ospf_gr]:
+ logger.info(
+ "[DUT: FRR] OSPF GR Helper: %s is %s",
+ ospf_gr,
+ show_ospf_json[ospf_gr],
+ )
+ result = True
else:
- result = create_common_configuration(
- tgen, router, config_data, "interface_config", build=build
+ errormsg = (
+ "[DUT: FRR] OSPF GR Helper: {} expected is {}, Found "
+ "is {}".format(
+ ospf_gr, input_dict[ospf_gr], show_ospf_json[ospf_gr]
+ )
)
+ raise ValueError(errormsg)
+ return errormsg
+
+ except KeyError:
+ errormsg = "[DUT: FRR] OSPF GR Helper: {}".format(ospf_gr)
+ return errormsg
+
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
-import sys
+import datetime
import os
import re
-import datetime
+import sys
import traceback
-import pytest
-from time import sleep
from copy import deepcopy
-from lib.topolog import logger
+from time import sleep
+
# Import common_config to use commomnly used APIs
from lib.common_config import (
+ create_common_configurations,
+ HostApplicationHelper,
+ InvalidCLIError,
create_common_configuration,
InvalidCLIError,
retry,
run_frr_cmd,
)
+from lib.micronet import get_exec_path
+from lib.topolog import logger
+from lib.topotest import frr_unicode
####
CWD = os.path.dirname(os.path.realpath(__file__))
else:
topo = topo["routers"]
input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
for router in input_dict.keys():
- result = _enable_disable_pim(tgen, topo, input_dict, router, build)
+ config_data = _enable_disable_pim_config(tgen, topo, input_dict, router, build)
+ if config_data:
+ config_data_dict[router] = config_data
+
+ # Now add RP config to all routers
+ for router in input_dict.keys():
if "pim" not in input_dict[router]:
- logger.debug("Router %s: 'pim' is not present in " "input_dict", router)
continue
+ if "rp" not in input_dict[router]["pim"]:
+ continue
+ _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict)
- if result is True:
- if "rp" not in input_dict[router]["pim"]:
- continue
-
- result = _create_pim_rp_config(
- tgen, topo, input_dict, router, build, load_config
- )
- if result is not True:
- return False
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, "pim", build, load_config
+ )
+ except InvalidCLIError:
+ logger.error("create_pim_config", exc_info=True)
+ result = False
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
-def _create_pim_rp_config(tgen, topo, input_dict, router, build=False, load_config=False):
+def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
"""
Helper API to create pim RP configurations.
* `input_dict` : Input dict data, required when configuring from testcase
* `router` : router id to be configured.
* `build` : Only for initial setup phase this is set as True.
-
+ * `config_data_dict` : OUT: adds `router` config to dictinary
Returns
-------
- True or False
+ None
"""
- result = False
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
pim_data = input_dict[router]["pim"]
# Configure this RP on every router.
for dut in tgen.routers():
-
# At least one interface must be enabled for PIM on the router
pim_if_enabled = False
for destLink, data in topo[dut]["links"].items():
# ip address of RP
if "rp_addr" not in rp_dict and build:
logger.error(
- "Router %s: 'ip address of RP' not "
- "present in input_dict/JSON",
+ "Router %s: 'ip address of RP' not " "present in input_dict/JSON",
router,
)
config_data.append(cmd)
if prefix_list:
- cmd = "ip pim rp {} prefix-list {}".format(
- rp_addr, prefix_list
- )
+ cmd = "ip pim rp {} prefix-list {}".format(rp_addr, prefix_list)
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
- try:
- result = create_common_configuration(
- tgen, dut, config_data, "pim", build, load_config
- )
- if result is not True:
- logger.error("Error applying PIM config", exc_info=True)
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return False
-
- except InvalidCLIError as error:
- logger.error("Error applying PIM config: %s", error, exc_info=error)
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return False
-
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return result
+ if config_data:
+ if dut not in config_data_dict:
+ config_data_dict[dut] = config_data
+ else:
+ config_data_dict[dut].extend(config_data)
def create_igmp_config(tgen, topo, input_dict=None, build=False):
else:
topo = topo["routers"]
input_dict = deepcopy(input_dict)
+
+ config_data_dict = {}
+
for router in input_dict.keys():
if "igmp" not in input_dict[router]:
logger.debug("Router %s: 'igmp' is not present in " "input_dict", router)
cmd = "no {}".format(cmd)
config_data.append(cmd)
- try:
+ if config_data:
+ config_data_dict[router] = config_data
- result = create_common_configuration(
- tgen, router, config_data, "interface_config", build=build
- )
- except InvalidCLIError:
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
+ try:
+ result = create_common_configurations(
+ tgen, config_data_dict, "interface_config", build=build
+ )
+ except InvalidCLIError:
+ logger.error("create_igmp_config", exc_info=True)
+ result = False
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
-def _enable_disable_pim(tgen, topo, input_dict, router, build=False):
+def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False):
"""
Helper API to enable or disable pim on interfaces
Returns
-------
- True or False
+ list of config
"""
- result = False
- logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- try:
- config_data = []
- # Enable pim on interfaces
- for destRouterLink, data in sorted(topo[router]["links"].items()):
- if "pim" in data and data["pim"] == "enable":
+ config_data = []
- # Loopback interfaces
- if "type" in data and data["type"] == "loopback":
- interface_name = destRouterLink
- else:
- interface_name = data["interface"]
-
- cmd = "interface {}".format(interface_name)
+ # Enable pim on interfaces
+ for destRouterLink, data in sorted(topo[router]["links"].items()):
+ if "pim" in data and data["pim"] == "enable":
+ # Loopback interfaces
+ if "type" in data and data["type"] == "loopback":
+ interface_name = destRouterLink
+ else:
+ interface_name = data["interface"]
+
+ cmd = "interface {}".format(interface_name)
+ config_data.append(cmd)
+ config_data.append("ip pim")
+
+ # pim global config
+ if "pim" in input_dict[router]:
+ pim_data = input_dict[router]["pim"]
+ del_action = pim_data.setdefault("delete", False)
+ for t in [
+ "join-prune-interval",
+ "keep-alive-timer",
+ "register-suppress-time",
+ ]:
+ if t in pim_data:
+ cmd = "ip pim {} {}".format(t, pim_data[t])
+ if del_action:
+ cmd = "no {}".format(cmd)
config_data.append(cmd)
- config_data.append("ip pim")
- result = create_common_configuration(
- tgen, router, config_data, "interface_config", build=build
- )
- if result is not True:
- return False
-
- config_data = []
- if "pim" in input_dict[router]:
- pim_data = input_dict[router]["pim"]
- for t in [
- "join-prune-interval",
- "keep-alive-timer",
- "register-suppress-time",
- ]:
- if t in pim_data:
- cmd = "ip pim {} {}".format(t, pim_data[t])
- config_data.append(cmd)
-
- if config_data:
- result = create_common_configuration(
- tgen, router, config_data, "pim", build=build
- )
- except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
-
- logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
- return result
+ return config_data
def find_rp_details(tgen, topo):
result = False
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
try:
+ config_data_dict = {}
for dut in input_dict.keys():
if "pim" not in input_dict[dut]:
pim_data = input_dict[dut]["pim"]
+ config_data = []
if "force_expire" in pim_data:
- config_data = []
force_expire_data = pim_data["force_expire"]
for source, groups in force_expire_data.items():
)
config_data.append(cmd)
- result = create_common_configuration(
- tgen, dut, config_data, "pim", build=build
- )
- if result is not True:
- return False
+ if config_data:
+ config_data_dict[dut] = config_data
+ result = create_common_configurations(
+ tgen, config_data_dict, "pim", build=build
+ )
except InvalidCLIError:
- # Traceback
- errormsg = traceback.format_exc()
- logger.error(errormsg)
- return errormsg
+ logger.error("configure_pim_force_expire", exc_info=True)
+ result = False
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return result
@retry(retry_timeout=60)
def verify_upstream_iif(
- tgen, dut, iif, src_address, group_addresses, joinState=None, refCount=1, expected=True
+ tgen,
+ dut,
+ iif,
+ src_address,
+ group_addresses,
+ joinState=None,
+ refCount=1,
+ expected=True,
):
"""
Verify upstream inbound interface is updated correctly
@retry(retry_timeout=12)
-def verify_join_state_and_timer(tgen, dut, iif, src_address, group_addresses, expected=True):
+def verify_join_state_and_timer(
+ tgen, dut, iif, src_address, group_addresses, expected=True
+):
"""
Verify join state is updated correctly and join timer is
running with the help of "show ip pim upstream" cli
error = (
"[DUT %s]: Verifying join timer for"
" (%s,%s) [FAILED]!! "
- " Expected: %s, Found: %s",
+ " Expected: %s, Found: %s"
+ ) % (
dut,
src_address,
grp_addr,
return True
-@retry(retry_timeout=80)
+@retry(retry_timeout=120)
def verify_ip_mroutes(
- tgen, dut, src_address, group_addresses, iif, oil, return_uptime=False, mwait=0, expected=True
+ tgen,
+ dut,
+ src_address,
+ group_addresses,
+ iif,
+ oil,
+ return_uptime=False,
+ mwait=0,
+ expected=True,
):
"""
Verify ip mroutes and make sure (*, G)/(S, G) is present in mroutes
@retry(retry_timeout=60)
def verify_pim_rp_info(
- tgen, topo, dut, group_addresses, oif=None, rp=None, source=None, iamrp=None, expected=True
+ tgen,
+ topo,
+ dut,
+ group_addresses,
+ oif=None,
+ rp=None,
+ source=None,
+ iamrp=None,
+ expected=True,
):
"""
Verify pim rp info by running "show ip pim rp-info" cli
@retry(retry_timeout=60)
def verify_pim_state(
- tgen, dut, iif, oil, group_addresses, src_address=None, installed_fl=None, expected=True
+ tgen,
+ dut,
+ iif,
+ oil,
+ group_addresses,
+ src_address=None,
+ installed_fl=None,
+ expected=True,
):
"""
Verify pim state by running "show ip pim state" cli
@retry(retry_timeout=40)
-def verify_pim_interface(tgen, topo, dut, interface=None, interface_ip=None, expected=True):
+def verify_pim_interface(
+ tgen, topo, dut, interface=None, interface_ip=None, expected=True
+):
"""
Verify all PIM interface are up and running, config is verified
using "show ip pim interface" cli
config_data.append("ip address {}".format(_rp))
config_data.append("ip pim")
+ # Why not config just once, why per group?
result = create_common_configuration(
tgen, rp, config_data, "interface_config"
)
return result
-def scapy_send_bsr_raw_packet(
- tgen, topo, senderRouter, receiverRouter, packet=None, interval=1, count=1
-):
+def scapy_send_bsr_raw_packet(tgen, topo, senderRouter, receiverRouter, packet=None):
"""
Using scapy Raw() method to send BSR raw packet from one FRR
to other
* `senderRouter` : Sender router
* `receiverRouter` : Receiver router
* `packet` : BSR packet in raw format
- * `interval` : Interval between the packets
- * `count` : Number of packets to be sent
returns:
--------
result = ""
logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
- rnode = tgen.routers()[senderRouter]
+ python3_path = tgen.net.get_exec_path(["python3", "python"])
+ script_path = os.path.join(CWD, "send_bsr_packet.py")
+ node = tgen.net[senderRouter]
for destLink, data in topo["routers"][senderRouter]["links"].items():
if "type" in data and data["type"] == "loopback":
packet = topo["routers"][senderRouter]["bsm"]["bsr_packets"][packet]["data"]
- if interval > 1 or count > 1:
- cmd = (
- "nohup /usr/bin/python {}/send_bsr_packet.py '{}' '{}' "
- "--interval={} --count={} &".format(
- CWD, packet, sender_interface, interval, count
- )
- )
- else:
- cmd = (
- "/usr/bin/python {}/send_bsr_packet.py '{}' '{}' "
- "--interval={} --count={}".format(
- CWD, packet, sender_interface, interval, count
- )
- )
-
+ cmd = [
+ python3_path,
+ script_path,
+ packet,
+ sender_interface,
+ "--interval=1",
+ "--count=1",
+ ]
logger.info("Scapy cmd: \n %s", cmd)
- result = rnode.run(cmd)
-
- if result == "":
- return result
+ node.cmd_raises(cmd)
logger.debug("Exiting lib API: scapy_send_bsr_raw_packet")
return True
@retry(retry_timeout=12)
-def verify_pim_grp_rp_source(tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True):
+def verify_pim_grp_rp_source(
+ tgen, topo, dut, grp_addr, rp_source, rpadd=None, expected=True
+):
"""
Verify pim rp info by running "show ip pim rp-info" cli
@retry(retry_timeout=60)
-def verify_ip_pim_upstream_rpf(tgen, topo, dut, interface, group_addresses, rp=None, expected=True):
+def verify_ip_pim_upstream_rpf(
+ tgen, topo, dut, interface, group_addresses, rp=None, expected=True
+):
"""
Verify IP PIM upstream rpf, config is verified
using "show ip pim neighbor" cli
@retry(retry_timeout=60)
-def verify_ip_pim_join(tgen, topo, dut, interface, group_addresses, src_address=None, expected=True):
+def verify_ip_pim_join(
+ tgen, topo, dut, interface, group_addresses, src_address=None, expected=True
+):
"""
Verify ip pim join by running "show ip pim join" cli
@retry(retry_timeout=40)
-def verify_multicast_flag_state(tgen, dut, src_address, group_addresses, flag, expected=True):
+def verify_multicast_flag_state(
+ tgen, dut, src_address, group_addresses, flag, expected=True
+):
"""
Verify flag state for mroutes and make sure (*, G)/(S, G) are having
coorect flags by running "show ip mroute" cli
logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
return True
+
+
+class McastTesterHelper(HostApplicationHelper):
+ def __init__(self, tgen=None):
+ self.script_path = os.path.join(CWD, "mcast-tester.py")
+ self.host_conn = {}
+ self.listen_sock = None
+
+ # # Get a temporary file for socket path
+ # (fd, sock_path) = tempfile.mkstemp("-mct.sock", "tmp" + str(os.getpid()))
+ # os.close(fd)
+ # os.remove(sock_path)
+ # self.app_sock_path = sock_path
+
+ # # Listen on unix socket
+ # logger.debug("%s: listening on socket %s", self, self.app_sock_path)
+ # self.listen_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
+ # self.listen_sock.settimeout(10)
+ # self.listen_sock.bind(self.app_sock_path)
+ # self.listen_sock.listen(10)
+
+ python3_path = get_exec_path(["python3", "python"])
+ super(McastTesterHelper, self).__init__(
+ tgen,
+ # [python3_path, self.script_path, self.app_sock_path]
+ [python3_path, self.script_path],
+ )
+
+ def __str__(self):
+ return "McastTesterHelper({})".format(self.script_path)
+
+ def run_join(self, host, join_addrs, join_towards=None, join_intf=None):
+ """
+ Join a UDP multicast group.
+
+ One of join_towards or join_intf MUST be set.
+
+ Parameters:
+ -----------
+ * `host`: host from where IGMP join would be sent
+ * `join_addrs`: multicast address (or addresses) to join to
+ * `join_intf`: the interface to bind the join[s] to
+ * `join_towards`: router whos interface to bind the join[s] to
+ """
+ if not isinstance(join_addrs, list) and not isinstance(join_addrs, tuple):
+ join_addrs = [join_addrs]
+
+ if join_towards:
+ join_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][join_towards]["interface"]
+ )
+ else:
+ assert join_intf
+
+ for join in join_addrs:
+ self.run(host, [join, join_intf])
+
+ return True
+
+ def run_traffic(self, host, send_to_addrs, bind_towards=None, bind_intf=None):
+ """
+ Send UDP multicast traffic.
+
+ One of bind_towards or bind_intf MUST be set.
+
+ Parameters:
+ -----------
+ * `host`: host to send traffic from
+ * `send_to_addrs`: multicast address (or addresses) to send traffic to
+ * `bind_towards`: Router who's interface the source ip address is got from
+ """
+ if bind_towards:
+ bind_intf = frr_unicode(
+ self.tgen.json_topo["routers"][host]["links"][bind_towards]["interface"]
+ )
+ else:
+ assert bind_intf
+
+ if not isinstance(send_to_addrs, list) and not isinstance(send_to_addrs, tuple):
+ send_to_addrs = [send_to_addrs]
+
+ for send_to in send_to_addrs:
+ self.run(host, ["--send=0.7", send_to, bind_intf])
+
+ return True
+
+ # def cleanup(self):
+ # super(McastTesterHelper, self).cleanup()
+
+ # if not self.listen_sock:
+ # return
+
+ # logger.debug("%s: closing listen socket %s", self, self.app_sock_path)
+ # self.listen_sock.close()
+ # self.listen_sock = None
+
+ # if os.path.exists(self.app_sock_path):
+ # os.remove(self.app_sock_path)
+
+ # def started_proc(self, host, p):
+ # logger.debug("%s: %s: accepting on socket %s", self, host, self.app_sock_path)
+ # try:
+ # conn = self.listen_sock.accept()
+ # return conn
+ # except Exception as error:
+ # logger.error("%s: %s: accept on socket failed: %s", self, host, error)
+ # if p.poll() is not None:
+ # logger.error("%s: %s: helper app quit: %s", self, host, comm_error(p))
+ # raise
+
+ # def stopping_proc(self, host, p, conn):
+ # logger.debug("%s: %s: closing socket %s", self, host, conn)
+ # conn[0].close()
* see tests/topotest/simple-snmp-test/test_simple_snmp.py for example
"""
-from topolog import logger
+from lib.topolog import logger
class SnmpTester(object):
return tokens[0].split(".", 1)[1]
def _parse_multiline(self, snmp_output):
- results = snmp_output.strip().split("\r\n")
+ results = snmp_output.strip().split("\n")
out_dict = {}
out_list = []
* After running stop Mininet with: tgen.stop_topology()
"""
+import grp
+import inspect
+import json
+import logging
import os
+import platform
+import pwd
+import re
+import subprocess
import sys
-import io
-import logging
-import json
+from collections import OrderedDict
if sys.version_info[0] > 2:
import configparser
else:
import ConfigParser as configparser
-import glob
-import grp
-import platform
-import pwd
-import subprocess
-import pytest
-
-from mininet.net import Mininet
-from mininet.log import setLogLevel
-from mininet.cli import CLI
+import lib.topolog as topolog
+from lib.micronet import Commander
+from lib.micronet_compat import Mininet
+from lib.topolog import logger
+from lib.topotest import g_extra_config
from lib import topotest
-from lib.topolog import logger, logger_config
-from lib.topotest import set_sysctl
CWD = os.path.dirname(os.path.realpath(__file__))
global_tgen = tgen
+def is_string(value):
+ """Return True if value is a string."""
+ try:
+ return isinstance(value, basestring) # type: ignore
+ except NameError:
+ return isinstance(value, str)
+
+
+def get_exabgp_cmd(commander=None):
+ """Return the command to use for ExaBGP version < 4."""
+
+ if commander is None:
+ commander = Commander("topogen")
+
+ def exacmd_version_ok(exacmd):
+ logger.debug("checking %s for exabgp < version 4", exacmd)
+ _, stdout, _ = commander.cmd_status(exacmd + " -v", warn=False)
+ m = re.search(r"ExaBGP\s*:\s*((\d+)\.(\d+)(?:\.(\d+))?)", stdout)
+ if not m:
+ return False
+ version = m.group(1)
+ if topotest.version_cmp(version, "4") >= 0:
+ logging.debug("found exabgp version >= 4 in %s will keep looking", exacmd)
+ return False
+ logger.info("Using ExaBGP version %s in %s", version, exacmd)
+ return True
+
+ exacmd = commander.get_exec_path("exabgp")
+ if exacmd and exacmd_version_ok(exacmd):
+ return exacmd
+ py2_path = commander.get_exec_path("python2")
+ if py2_path:
+ exacmd = py2_path + " -m exabgp"
+ if exacmd_version_ok(exacmd):
+ return exacmd
+ py2_path = commander.get_exec_path("python")
+ if py2_path:
+ exacmd = py2_path + " -m exabgp"
+ if exacmd_version_ok(exacmd):
+ return exacmd
+ return None
+
+
#
# Main class: topology builder
#
CONFIG_SECTION = "topogen"
- def __init__(self, cls, modname="unnamed"):
+ def __init__(self, topodef, modname="unnamed"):
"""
Topogen initialization function, takes the following arguments:
- * `cls`: the topology class that is child of mininet.topo
+ * `cls`: OLD:uthe topology class that is child of mininet.topo or a build function.
+ * `topodef`: A dictionary defining the topology, a filename of a json file, or a
+ function that will do the same
* `modname`: module name must be a unique name to identify logs later.
"""
self.config = None
- self.topo = None
self.net = None
self.gears = {}
self.routern = 1
self.errorsd = {}
self.errors = ""
self.peern = 1
- self._init_topo(cls)
+ self.cfg_gen = 0
+ self.exabgp_cmd = None
+ self._init_topo(topodef)
+
logger.info("loading topology: {}".format(self.modname))
- @staticmethod
- def _mininet_reset():
- "Reset the mininet environment"
- # Clean up the mininet environment
- os.system("mn -c > /dev/null 2>&1")
+ # @staticmethod
+ # def _mininet_reset():
+ # "Reset the mininet environment"
+ # # Clean up the mininet environment
+ # os.system("mn -c > /dev/null 2>&1")
+
+ def __str__(self):
+ return "Topogen()"
- def _init_topo(self, cls):
+ def _init_topo(self, topodef):
"""
Initialize the topogily provided by the user. The user topology class
must call get_topogen() during build() to get the topogen object.
# Set the global variable so the test cases can access it anywhere
set_topogen(self)
+ # Increase host based limits
+ topotest.fix_host_limits()
+
# Test for MPLS Kernel modules available
self.hasmpls = False
if not topotest.module_present("mpls-router"):
logger.info("MPLS tests will not run (missing mpls-iptunnel kernel module)")
else:
self.hasmpls = True
+
# Load the default topology configurations
self._load_config()
- # Initialize the API
- self._mininet_reset()
- cls()
- self.net = Mininet(controller=None, topo=self.topo)
- for gear in self.gears.values():
- gear.net = self.net
+ # Create new log directory
+ self.logdir = topotest.get_logs_path(g_extra_config["rundir"])
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(self.logdir), shell=True
+ )
+ try:
+ routertype = self.config.get(self.CONFIG_SECTION, "routertype")
+ # Only allow group, if it exist.
+ gid = grp.getgrnam(routertype)[2]
+ os.chown(self.logdir, 0, gid)
+ os.chmod(self.logdir, 0o775)
+ except KeyError:
+ # Allow anyone, but set the sticky bit to avoid file deletions
+ os.chmod(self.logdir, 0o1777)
+
+ # Remove old twisty way of creating sub-classed topology object which has it's
+ # build method invoked which calls Topogen methods which then call Topo methods
+ # to create a topology within the Topo object, which is then used by
+ # Mininet(Micronet) to build the actual topology.
+ assert not inspect.isclass(topodef)
+
+ self.net = Mininet(controller=None)
+
+ # New direct way: Either a dictionary defines the topology or a build function
+ # is supplied, or a json filename all of which build the topology by calling
+ # Topogen methods which call Mininet(Micronet) methods to create the actual
+ # topology.
+ if not inspect.isclass(topodef):
+ if callable(topodef):
+ topodef(self)
+ self.net.configure_hosts()
+ elif is_string(topodef):
+ # topojson imports topogen in one function too,
+ # switch away from this use here to the topojson
+ # fixutre and remove this case
+ from lib.topojson import build_topo_from_json
+
+ with open(topodef, "r") as topof:
+ self.json_topo = json.load(topof)
+ build_topo_from_json(self, self.json_topo)
+ self.net.configure_hosts()
+ elif topodef:
+ self.add_topology_from_dict(topodef)
+
+ def add_topology_from_dict(self, topodef):
+
+ keylist = (
+ topodef.keys()
+ if isinstance(topodef, OrderedDict)
+ else sorted(topodef.keys())
+ )
+ # ---------------------------
+ # Create all referenced hosts
+ # ---------------------------
+ for oname in keylist:
+ tup = (topodef[oname],) if is_string(topodef[oname]) else topodef[oname]
+ for e in tup:
+ desc = e.split(":")
+ name = desc[0]
+ if name not in self.gears:
+ logging.debug("Adding router: %s", name)
+ self.add_router(name)
+
+ # ------------------------------
+ # Create all referenced switches
+ # ------------------------------
+ for oname in keylist:
+ if oname is not None and oname not in self.gears:
+ logging.debug("Adding switch: %s", oname)
+ self.add_switch(oname)
+
+ # ----------------
+ # Create all links
+ # ----------------
+ for oname in keylist:
+ if oname is None:
+ continue
+ tup = (topodef[oname],) if is_string(topodef[oname]) else topodef[oname]
+ for e in tup:
+ desc = e.split(":")
+ name = desc[0]
+ ifname = desc[1] if len(desc) > 1 else None
+ sifname = desc[2] if len(desc) > 2 else None
+ self.add_link(self.gears[oname], self.gears[name], sifname, ifname)
+
+ self.net.configure_hosts()
def _load_config(self):
"""
pytestini_path = os.path.join(CWD, "../pytest.ini")
self.config.read(pytestini_path)
- def add_router(self, name=None, cls=topotest.Router, **params):
+ def add_router(self, name=None, cls=None, **params):
"""
Adds a new router to the topology. This function has the following
options:
* `routertype`: (optional) `frr`
Returns a TopoRouter.
"""
+ if cls is None:
+ cls = topotest.Router
if name is None:
name = "r{}".format(self.routern)
if name in self.gears:
self.routern += 1
return self.gears[name]
- def add_switch(self, name=None, cls=topotest.LegacySwitch):
+ def add_switch(self, name=None):
"""
Adds a new switch to the topology. This function has the following
options:
if name in self.gears:
raise KeyError("switch already exists")
- self.gears[name] = TopoSwitch(self, cls, name)
+ self.gears[name] = TopoSwitch(self, name)
self.switchn += 1
return self.gears[name]
node1.register_link(ifname1, node2, ifname2)
node2.register_link(ifname2, node1, ifname1)
- self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2)
+ self.net.add_link(node1.name, node2.name, ifname1, ifname2)
def get_gears(self, geartype):
"""
"""
return self.get_gears(TopoExaBGP)
- def start_topology(self, log_level=None):
- """
- Starts the topology class. Possible `log_level`s are:
- 'debug': all information possible
- 'info': informational messages
- 'output': default logging level defined by Mininet
- 'warning': only warning, error and critical messages
- 'error': only error and critical messages
- 'critical': only critical messages
- """
- # If log_level is not specified use the configuration.
- if log_level is None:
- log_level = self.config.get(self.CONFIG_SECTION, "verbosity")
-
- # Set python logger level
- logger_config.set_log_level(log_level)
-
- # Run mininet
- if log_level == "debug":
- setLogLevel(log_level)
-
+ def start_topology(self):
+ """Starts the topology class."""
logger.info("starting topology: {}".format(self.modname))
self.net.start()
"""
if router is None:
# pylint: disable=r1704
+ # XXX should be hosts?
for _, router in self.routers().items():
router.start()
else:
self.net.stop()
- def mininet_cli(self):
+ def get_exabgp_cmd(self):
+ if not self.exabgp_cmd:
+ self.exabgp_cmd = get_exabgp_cmd(self.net)
+ return self.exabgp_cmd
+
+ def cli(self):
"""
Interrupt the test and call the command line interface for manual
inspection. Should be only used on non production code.
"""
- if not sys.stdin.isatty():
- raise EnvironmentError(
- "you must run pytest with '-s' in order to use mininet CLI"
- )
+ self.net.cli()
- CLI(self.net)
+ mininet_cli = cli
def is_memleak_enabled(self):
"Returns `True` if memory leak report is enable, otherwise `False`."
class TopoGear(object):
"Abstract class for type checking"
- def __init__(self):
- self.tgen = None
- self.name = None
- self.cls = None
+ def __init__(self, tgen, name, **params):
+ self.tgen = tgen
+ self.name = name
+ self.params = params
self.links = {}
self.linkn = 0
+ # Would be nice for this to point at the gears log directory rather than the
+ # test's.
+ self.logdir = tgen.logdir
+ self.gearlogdir = None
+
def __str__(self):
links = ""
for myif, dest in self.links.items():
return 'TopoGear<name="{}",links=[{}]>'.format(self.name, links)
+ @property
+ def net(self):
+ return self.tgen.net[self.name]
+
def start(self):
"Basic start function that just reports equipment start"
logger.info('starting "{}"'.format(self.name))
def stop(self, wait=True, assertOnError=True):
- "Basic start function that just reports equipment stop"
- logger.info('stopping "{}"'.format(self.name))
+ "Basic stop function that just reports equipment stop"
+ logger.info('"{}" base stop called'.format(self.name))
return ""
- def run(self, command):
+ def cmd(self, command, **kwargs):
"""
Runs the provided command string in the router and returns a string
with the response.
"""
- return self.tgen.net[self.name].cmd(command)
+ return self.net.cmd_legacy(command, **kwargs)
+
+ def cmd_raises(self, command, **kwargs):
+ """
+ Runs the provided command string in the router and returns a string
+ with the response. Raise an exception on any error.
+ """
+ return self.net.cmd_raises(command, **kwargs)
+
+ run = cmd
def popen(self, *params, **kwargs):
"""
- Popen on the router.
+ Creates a pipe with the given command. Same args as python Popen.
+ If `command` is a string then will be invoked with shell, otherwise
+ `command` is a list and will be invoked w/o shell. Returns a popen object.
"""
- return self.tgen.net[self.name].popen(*params, **kwargs)
+ return self.net.popen(*params, **kwargs)
def add_link(self, node, myif=None, nodeif=None):
"""
extract = ""
if netns is not None:
extract = "ip netns exec {} ".format(netns)
+
return self.run("{}ip link set dev {} {}".format(extract, myif, operation))
def peer_link_enable(self, myif, enabled=True, netns=None):
self.links[myif] = (node, nodeif)
+ def _setup_tmpdir(self):
+ topotest.setup_node_tmpdir(self.logdir, self.name)
+ self.gearlogdir = "{}/{}".format(self.logdir, self.name)
+ return "{}/{}.log".format(self.logdir, self.name)
+
class TopoRouter(TopoGear):
"""
# The default required directories by FRR
PRIVATE_DIRS = [
"/etc/frr",
+ "/etc/snmp",
"/var/run/frr",
"/var/log",
]
* daemondir: daemon binary directory
* routertype: 'frr'
"""
- super(TopoRouter, self).__init__()
- self.tgen = tgen
- self.net = None
- self.name = name
- self.cls = cls
- self.options = {}
+ super(TopoRouter, self).__init__(tgen, name, **params)
self.routertype = params.get("routertype", "frr")
if "privateDirs" not in params:
params["privateDirs"] = self.PRIVATE_DIRS
- self.options["memleak_path"] = params.get("memleak_path", None)
-
- # Create new log directory
- self.logdir = "/tmp/topotests/{}".format(self.tgen.modname)
- # Clean up before starting new log files: avoids removing just created
- # log files.
- self._prepare_tmpfiles()
# Propagate the router log directory
+ logfile = self._setup_tmpdir()
params["logdir"] = self.logdir
- # setup the per node directory
- dir = "{}/{}".format(self.logdir, self.name)
- os.system("mkdir -p " + dir)
- os.system("chmod -R go+rw /tmp/topotests")
+ self.logger = topolog.get_logger(name, log_level="debug", target=logfile)
+ params["logger"] = self.logger
+ tgen.net.add_host(self.name, cls=cls, **params)
+ topotest.fix_netns_limits(tgen.net[name])
- # Open router log file
- logfile = "{0}/{1}.log".format(self.logdir, name)
- self.logger = logger_config.get_logger(name=name, target=logfile)
+ # Mount gear log directory on a common path
+ self.net.bind_mount(self.gearlogdir, "/tmp/gearlogdir")
- self.tgen.topo.addNode(self.name, cls=self.cls, **params)
+ # Ensure pid file
+ with open(os.path.join(self.logdir, self.name + ".pid"), "w") as f:
+ f.write(str(self.net.pid) + "\n")
def __str__(self):
gear = super(TopoRouter, self).__str__()
gear += " TopoRouter<>"
return gear
- def _prepare_tmpfiles(self):
- # Create directories if they don't exist
- try:
- os.makedirs(self.logdir, 0o755)
- except OSError:
- pass
-
- # Allow unprivileged daemon user (frr) to create log files
- try:
- # Only allow group, if it exist.
- gid = grp.getgrnam(self.routertype)[2]
- os.chown(self.logdir, 0, gid)
- os.chmod(self.logdir, 0o775)
- except KeyError:
- # Allow anyone, but set the sticky bit to avoid file deletions
- os.chmod(self.logdir, 0o1777)
-
- # Try to find relevant old logfiles in /tmp and delete them
- map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
- # Remove old valgrind files
- map(os.remove, glob.glob("{}/{}.valgrind.*".format(self.logdir, self.name)))
- # Remove old core files
- map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
-
def check_capability(self, daemon, param):
"""
Checks a capability daemon against an argument option
"""
daemonstr = self.RD.get(daemon)
self.logger.info('check capability {} for "{}"'.format(param, daemonstr))
- return self.tgen.net[self.name].checkCapability(daemonstr, param)
+ return self.net.checkCapability(daemonstr, param)
def load_config(self, daemon, source=None, param=None):
- """
- Loads daemon configuration from the specified source
+ """Loads daemon configuration from the specified source
Possible daemon values are: TopoRouter.RD_ZEBRA, TopoRouter.RD_RIP,
TopoRouter.RD_RIPNG, TopoRouter.RD_OSPF, TopoRouter.RD_OSPF6,
TopoRouter.RD_ISIS, TopoRouter.RD_BGP, TopoRouter.RD_LDP,
TopoRouter.RD_PIM, TopoRouter.RD_PBR, TopoRouter.RD_SNMP.
+
+ Possible `source` values are `None` for an empty config file, a path name which is
+ used directly, or a file name with no path components which is first looked for
+ directly and then looked for under a sub-directory named after router.
+
+ This API unfortunately allows for source to not exist for any and
+ all routers.
"""
daemonstr = self.RD.get(daemon)
self.logger.info('loading "{}" configuration: {}'.format(daemonstr, source))
- self.tgen.net[self.name].loadConf(daemonstr, source, param)
+ self.net.loadConf(daemonstr, source, param)
def check_router_running(self):
"""
Run a series of checks and returns a status string.
"""
self.logger.info("checking if daemons are running")
- return self.tgen.net[self.name].checkRouterRunning()
+ return self.net.checkRouterRunning()
def start(self):
"""
* Start daemons (e.g. FRR)
* Configure daemon logging files
"""
- self.logger.debug("starting")
- nrouter = self.tgen.net[self.name]
+
+ nrouter = self.net
result = nrouter.startRouter(self.tgen)
+ # Enable command logging
+
# Enable all daemon command logging, logging files
# and set them to the start dir.
for daemon, enabled in nrouter.daemons.items():
- if enabled == 0:
- continue
- self.vtysh_cmd(
- "configure terminal\nlog commands\nlog file {}.log".format(daemon),
- daemon=daemon,
- )
+ if enabled and daemon != "snmpd":
+ self.vtysh_cmd(
+ "\n".join(
+ [
+ "clear log cmdline-targets",
+ "conf t",
+ "log file {}.log debug".format(daemon),
+ "log commands",
+ "log timestamp precision 3",
+ ]
+ ),
+ daemon=daemon,
+ )
if result != "":
self.tgen.set_error(result)
- else:
+ elif nrouter.daemons["ldpd"] == 1 or nrouter.daemons["pathd"] == 1:
# Enable MPLS processing on all interfaces.
- for interface in self.links.keys():
- set_sysctl(nrouter, "net.mpls.conf.{}.input".format(interface), 1)
+ for interface in self.links:
+ topotest.sysctl_assure(
+ nrouter, "net.mpls.conf.{}.input".format(interface), 1
+ )
return result
- def __stop_internal(self, wait=True, assertOnError=True):
- """
- Stop router, private internal version
- * Kill daemons
- """
- self.logger.debug("stopping: wait {}, assert {}".format(wait, assertOnError))
- return self.tgen.net[self.name].stopRouter(wait, assertOnError)
-
def stop(self):
"""
Stop router cleanly:
- * Signal daemons twice, once without waiting, and then a second time
- with a wait to ensure the daemons exit cleanly
+ * Signal daemons twice, once with SIGTERM, then with SIGKILL.
"""
- self.logger.debug("stopping")
- self.__stop_internal(False, False)
- return self.__stop_internal(True, False)
+ self.logger.debug("stopping (no assert)")
+ return self.net.stopRouter(False)
def startDaemons(self, daemons):
"""
* Configure daemon logging files
"""
self.logger.debug("starting")
- nrouter = self.tgen.net[self.name]
+ nrouter = self.net
result = nrouter.startRouterDaemons(daemons)
+ if daemons is None:
+ daemons = nrouter.daemons.keys()
+
# Enable all daemon command logging, logging files
# and set them to the start dir.
- for daemon, enabled in nrouter.daemons.items():
- for d in daemons:
- if enabled == 0:
- continue
+ for daemon in daemons:
+ enabled = nrouter.daemons[daemon]
+ if enabled and daemon != "snmpd":
self.vtysh_cmd(
- "configure terminal\nlog commands\nlog file {}.log".format(daemon),
+ "\n".join(
+ [
+ "clear log cmdline-targets",
+ "conf t",
+ "log file {}.log debug".format(daemon),
+ "log commands",
+ "log timestamp precision 3",
+ ]
+ ),
daemon=daemon,
)
forcefully using SIGKILL
"""
self.logger.debug("Killing daemons using SIGKILL..")
- return self.tgen.net[self.name].killRouterDaemons(daemons, wait, assertOnError)
+ return self.net.killRouterDaemons(daemons, wait, assertOnError)
def vtysh_cmd(self, command, isjson=False, daemon=None):
"""
vtysh_command = 'vtysh {} -c "{}" 2>/dev/null'.format(dparam, command)
+ self.logger.info('vtysh command => "{}"'.format(command))
output = self.run(vtysh_command)
- self.logger.info(
- "\nvtysh command => {}\nvtysh output <= {}".format(command, output)
- )
+
+ dbgout = output.strip()
+ if dbgout:
+ if "\n" in dbgout:
+ dbgout = dbgout.replace("\n", "\n\t")
+ self.logger.info("vtysh result:\n\t{}".format(dbgout))
+ else:
+ self.logger.info('vtysh result: "{}"'.format(dbgout))
+
if isjson is False:
return output
try:
return json.loads(output)
except ValueError as error:
- logger.warning("vtysh_cmd: %s: failed to convert json output: %s: %s", self.name, str(output), str(error))
+ logger.warning(
+ "vtysh_cmd: %s: failed to convert json output: %s: %s",
+ self.name,
+ str(output),
+ str(error),
+ )
return {}
def vtysh_multicmd(self, commands, pretty_output=True, daemon=None):
else:
vtysh_command = "vtysh {} -f {}".format(dparam, fname)
+ dbgcmds = commands if is_string(commands) else "\n".join(commands)
+ dbgcmds = "\t" + dbgcmds.replace("\n", "\n\t")
+ self.logger.info("vtysh command => FILE:\n{}".format(dbgcmds))
+
res = self.run(vtysh_command)
os.unlink(fname)
- self.logger.info(
- '\nvtysh command => "{}"\nvtysh output <= "{}"'.format(vtysh_command, res)
- )
-
+ dbgres = res.strip()
+ if dbgres:
+ if "\n" in dbgres:
+ dbgres = dbgres.replace("\n", "\n\t")
+ self.logger.info("vtysh result:\n\t{}".format(dbgres))
+ else:
+ self.logger.info('vtysh result: "{}"'.format(dbgres))
return res
def report_memory_leaks(self, testname):
TOPOTESTS_CHECK_MEMLEAK set or memleak_path configured in `pytest.ini`.
"""
memleak_file = (
- os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.options["memleak_path"]
+ os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.params["memleak_path"]
)
if memleak_file == "" or memleak_file == None:
return
self.stop()
self.logger.info("running memory leak report")
- self.tgen.net[self.name].report_memory_leaks(memleak_file, testname)
+ self.net.report_memory_leaks(memleak_file, testname)
def version_info(self):
"Get equipment information from 'show version'."
Usage example: router.has_version('>', '1.0')
"""
- return self.tgen.net[self.name].checkRouterVersion(cmpop, version)
+ return self.net.checkRouterVersion(cmpop, version)
def has_type(self, rtype):
"""
return rtype == curtype
def has_mpls(self):
- nrouter = self.tgen.net[self.name]
- return nrouter.hasmpls
+ return self.net.hasmpls
class TopoSwitch(TopoGear):
# pylint: disable=too-few-public-methods
- def __init__(self, tgen, cls, name):
- super(TopoSwitch, self).__init__()
- self.tgen = tgen
- self.net = None
- self.name = name
- self.cls = cls
- self.tgen.topo.addSwitch(name, cls=self.cls)
+ def __init__(self, tgen, name, **params):
+ super(TopoSwitch, self).__init__(tgen, name, **params)
+ tgen.net.add_switch(name)
def __str__(self):
gear = super(TopoSwitch, self).__str__()
* `privateDirs`: directories that will be mounted on a different domain
(e.g. '/etc/important_dir').
"""
- super(TopoHost, self).__init__()
- self.tgen = tgen
- self.net = None
- self.name = name
- self.options = params
- self.tgen.topo.addHost(name, **params)
+ super(TopoHost, self).__init__(tgen, name, **params)
+
+ # Propagate the router log directory
+ logfile = self._setup_tmpdir()
+ params["logdir"] = self.logdir
+
+ # Odd to have 2 logfiles for each host
+ self.logger = topolog.get_logger(name, log_level="debug", target=logfile)
+ params["logger"] = self.logger
+ tgen.net.add_host(name, **params)
+ topotest.fix_netns_limits(tgen.net[name])
+
+ # Mount gear log directory on a common path
+ self.net.bind_mount(self.gearlogdir, "/tmp/gearlogdir")
def __str__(self):
gear = super(TopoHost, self).__str__()
gear += ' TopoHost<ip="{}",defaultRoute="{}",privateDirs="{}">'.format(
- self.options["ip"],
- self.options["defaultRoute"],
- str(self.options["privateDirs"]),
+ self.params["ip"],
+ self.params["defaultRoute"],
+ str(self.params["privateDirs"]),
)
return gear
"""
params["privateDirs"] = self.PRIVATE_DIRS
super(TopoExaBGP, self).__init__(tgen, name, **params)
- self.tgen.topo.addHost(name, **params)
def __str__(self):
gear = super(TopoExaBGP, self).__str__()
* Make all python files runnable
* Run ExaBGP with env file `env_file` and configuration peer*/exabgp.cfg
"""
- self.run("mkdir /etc/exabgp")
+ exacmd = self.tgen.get_exabgp_cmd()
+ assert exacmd, "Can't find a usabel ExaBGP (must be < version 4)"
+
+ self.run("mkdir -p /etc/exabgp")
self.run("chmod 755 /etc/exabgp")
+ self.run("cp {}/exa-* /etc/exabgp/".format(CWD))
self.run("cp {}/* /etc/exabgp/".format(peer_dir))
if env_file is not None:
self.run("cp {} /etc/exabgp/exabgp.env".format(env_file))
self.run("chmod 644 /etc/exabgp/*")
self.run("chmod a+x /etc/exabgp/*.py")
self.run("chown -R exabgp:exabgp /etc/exabgp")
- output = self.run("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
+
+ output = self.run(exacmd + " -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
if output == None or len(output) == 0:
output = "<none>"
+
logger.info("{} exabgp started, output={}".format(self.name, output))
def stop(self, wait=True, assertOnError=True):
# Disable linter branch warning. It is expected to have these here.
# pylint: disable=R0912
-def diagnose_env_linux():
+def diagnose_env_linux(rundir):
"""
Run diagnostics in the running environment. Returns `True` when everything
is ok, otherwise `False`.
"""
ret = True
- # Test log path exists before installing handler.
- if not os.path.isdir("/tmp"):
- logger.warning("could not find /tmp for logs")
- else:
- os.system("mkdir -p /tmp/topotests")
- # Log diagnostics to file so it can be examined later.
- fhandler = logging.FileHandler(filename="/tmp/topotests/diagnostics.txt")
- fhandler.setLevel(logging.DEBUG)
- fhandler.setFormatter(
- logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
- )
- logger.addHandler(fhandler)
-
- logger.info("Running environment diagnostics")
-
# Load configuration
config = configparser.ConfigParser(defaults=tgen_defaults)
pytestini_path = os.path.join(CWD, "../pytest.ini")
config.read(pytestini_path)
+ # Test log path exists before installing handler.
+ os.system("mkdir -p " + rundir)
+ # Log diagnostics to file so it can be examined later.
+ fhandler = logging.FileHandler(filename="{}/diagnostics.txt".format(rundir))
+ fhandler.setLevel(logging.DEBUG)
+ fhandler.setFormatter(logging.Formatter(fmt=topolog.FORMAT))
+ logger.addHandler(fhandler)
+
+ logger.info("Running environment diagnostics")
+
# Assert that we are running as root
if os.getuid() != 0:
logger.error("you must run topotest as root")
ret = False
# Assert that we have mininet
- if os.system("which mn >/dev/null 2>/dev/null") != 0:
- logger.error("could not find mininet binary (mininet is not installed)")
- ret = False
+ # if os.system("which mn >/dev/null 2>/dev/null") != 0:
+ # logger.error("could not find mininet binary (mininet is not installed)")
+ # ret = False
# Assert that we have iproute installed
if os.system("which ip >/dev/null 2>/dev/null") != 0:
if fname != "zebra":
continue
- os.system("{} -v 2>&1 >/tmp/topotests/frr_zebra.txt".format(path))
+ os.system("{} -v 2>&1 >{}/frr_zebra.txt".format(path, rundir))
# Test MPLS availability
krel = platform.release()
if not topotest.module_present("mpls-iptunnel", load=False) != 0:
logger.info("LDPd tests will not run (missing mpls-iptunnel kernel module)")
- # TODO remove me when we start supporting exabgp >= 4
- try:
- p = os.popen("exabgp -v")
- line = p.readlines()
- version = line[0].split()
- if topotest.version_cmp(version[2], "4") >= 0:
- logger.warning(
- "BGP topologies are still using exabgp version 3, expect failures"
- )
- p.close()
-
- # We want to catch all exceptions
- # pylint: disable=W0702
- except:
- logger.warning("failed to find exabgp or returned error")
+ if not get_exabgp_cmd():
+ logger.warning("Failed to find exabgp < 4")
- # After we logged the output to file, remove the handler.
logger.removeHandler(fhandler)
fhandler.close()
return True
-def diagnose_env():
+def diagnose_env(rundir):
if sys.platform.startswith("linux"):
- return diagnose_env_linux()
+ return diagnose_env_linux(rundir)
elif sys.platform.startswith("freebsd"):
return diagnose_env_freebsd()
# OF THIS SOFTWARE.
#
-from collections import OrderedDict
-from json import dumps as json_dumps
-from re import search as re_search
+import json
import ipaddress
-import pytest
-import ipaddr
+import os
+from collections import OrderedDict
from copy import deepcopy
+from re import search as re_search
+import pytest
-# Import topogen and topotest helpers
-from lib.topolog import logger
-
-# Required to instantiate the topology builder class.
+from lib.bgp import create_router_bgp
from lib.common_config import (
- number_to_row,
- number_to_column,
- load_config_to_router,
+ create_bgp_community_lists,
create_interfaces_cfg,
- create_static_routes,
create_prefix_lists,
create_route_maps,
- create_bgp_community_lists,
+ create_static_routes,
create_vrf_cfg,
+ load_config_to_routers,
+ start_topology,
+ topo_daemons,
+ number_to_column,
)
-
-from lib.pim import create_pim_config, create_igmp_config
-from lib.bgp import create_router_bgp
from lib.ospf import create_router_ospf, create_router_ospf6
-
-ROUTER_LIST = []
+from lib.pim import create_igmp_config, create_pim_config
+from lib.topolog import logger
-def build_topo_from_json(tgen, topo):
+def build_topo_from_json(tgen, topo=None):
"""
Reads configuration from JSON file. Adds routers, creates interface
names dynamically and link routers as defined in JSON to create
topology. Assigns IPs dynamically to all interfaces of each router.
* `tgen`: Topogen object
- * `topo`: json file data
+ * `topo`: json file data, or use tgen.json_topo if None
"""
+ if topo is None:
+ topo = tgen.json_topo
- ROUTER_LIST = sorted(
- topo["routers"].keys(), key=lambda x: int(re_search("\d+", x).group(0))
+ router_list = sorted(
+ topo["routers"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))
)
- SWITCH_LIST = []
+ switch_list = []
if "switches" in topo:
- SWITCH_LIST = sorted(
- topo["switches"].keys(), key=lambda x: int(re_search("\d+", x).group(0))
+ switch_list = sorted(
+ topo["switches"].keys(), key=lambda x: int(re_search(r"\d+", x).group(0))
)
- listRouters = sorted(ROUTER_LIST[:])
- listSwitches = sorted(SWITCH_LIST[:])
+ listRouters = sorted(router_list[:])
+ listSwitches = sorted(switch_list[:])
listAllRouters = deepcopy(listRouters)
dictSwitches = {}
- for routerN in ROUTER_LIST:
+ for routerN in router_list:
logger.info("Topo: Add router {}".format(routerN))
tgen.add_router(routerN)
- for switchN in SWITCH_LIST:
+ for switchN in switch_list:
logger.info("Topo: Add switch {}".format(switchN))
dictSwitches[switchN] = tgen.add_switch(switchN)
# Physical Interfaces
if "links" in topo["routers"][curRouter]:
for destRouterLink, data in sorted(
- topo["routers"][curRouter]["links"].iteritems()
+ topo["routers"][curRouter]["links"].items()
):
currRouter_lo_json = topo["routers"][curRouter]["links"][destRouterLink]
# Loopback interfaces
logger.debug(
"Generated link data for router: %s\n%s",
curRouter,
- json_dumps(
+ json.dumps(
topo["routers"][curRouter]["links"], indent=4, sort_keys=True
),
)
] = "{}/{}".format(
ipv6Next, topo["link_ip_start"]["v6mask"]
)
- ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step)
+ ipv6Next = ipaddress.IPv6Address(int(ipv6Next) + ipv6Step)
logger.debug(
"Generated link data for router: %s\n%s",
curRouter,
- json_dumps(
+ json.dumps(
topo["routers"][curRouter]["links"], indent=4, sort_keys=True
),
)
-def linux_intf_config_from_json(tgen, topo):
+def linux_intf_config_from_json(tgen, topo=None):
"""Configure interfaces from linux based on topo."""
+ if topo is None:
+ topo = tgen.json_topo
+
routers = topo["routers"]
for rname in routers:
- router = tgen.gears[rname]
+ router = tgen.net[rname]
links = routers[rname]["links"]
for rrname in links:
link = links[rrname]
else:
lname = link["interface"]
if "ipv4" in link:
- router.run("ip addr add {} dev {}".format(link["ipv4"], lname))
+ router.cmd_raises("ip addr add {} dev {}".format(link["ipv4"], lname))
if "ipv6" in link:
- router.run("ip -6 addr add {} dev {}".format(link["ipv6"], lname))
+ router.cmd_raises(
+ "ip -6 addr add {} dev {}".format(link["ipv6"], lname)
+ )
-def build_config_from_json(tgen, topo, save_bkup=True):
+def build_config_from_json(tgen, topo=None, save_bkup=True):
"""
Reads initial configuraiton from JSON for each router, builds
configuration and loads its to router.
* `tgen`: Topogen object
- * `topo`: json file data
+ * `topo`: json file data, or use tgen.json_topo if None
"""
func_dict = OrderedDict(
]
)
+ if topo is None:
+ topo = tgen.json_topo
+
data = topo["routers"]
for func_type in func_dict.keys():
logger.info("Checking for {} configuration in input data".format(func_type))
func_dict.get(func_type)(tgen, data, build=True)
- for router in sorted(topo["routers"].keys()):
- logger.debug("Configuring router {}...".format(router))
+ routers = sorted(topo["routers"].keys())
+ result = load_config_to_routers(tgen, routers, save_bkup)
+ if not result:
+ logger.info("build_config_from_json: failed to configure topology")
+ pytest.exit(1)
+
+
+def create_tgen_from_json(testfile, json_file=None):
+ """Create a topogen object given a testfile.
+
+ - `testfile` : The path to the testfile.
+ - `json_file` : The path to the json config file. If None the pathname is derived
+ from the `testfile` first by trying to replace `.py` by `.json` and if that isn't
+ present then by removing `test_` prefix as well.
+ """
+ from lib.topogen import Topogen # Topogen imports this module too
+
+ thisdir = os.path.dirname(os.path.realpath(testfile))
+ basename = os.path.basename(testfile)
+ logger.debug("starting standard JSON based module setup for %s", basename)
+
+ assert basename.startswith("test_")
+ assert basename.endswith(".py")
+ json_file = os.path.join(thisdir, basename[:-3] + ".json")
+ if not os.path.exists(json_file):
+ json_file = os.path.join(thisdir, basename[5:-3] + ".json")
+ assert os.path.exists(json_file)
+ with open(json_file, "r") as topof:
+ topo = json.load(topof)
+
+ # Create topology
+ tgen = Topogen(lambda tgen: build_topo_from_json(tgen, topo), basename[:-3])
+ tgen.json_topo = topo
+ return tgen
+
+
+def setup_module_from_json(testfile, json_file=None):
+ """Do the standard module setup for JSON based test.
+
+ * `testfile` : The path to the testfile. The name is used to derive the json config
+ file name as well (removing `test_` prefix and replacing `.py` suffix with `.json`
+ """
+ # Create topology object
+ tgen = create_tgen_from_json(testfile, json_file)
+
+ # Start routers (and their daemons)
+ start_topology(tgen, topo_daemons(tgen))
+
+ # Configure routers
+ build_config_from_json(tgen)
+ assert not tgen.routers_have_failure()
- result = load_config_to_router(tgen, router, save_bkup)
- if not result:
- logger.info("Failed while configuring {}".format(router))
- pytest.exit(1)
+ return tgen
This file defines our logging abstraction.
"""
-import sys
import logging
+import os
+import subprocess
+import sys
+
+if sys.version_info[0] > 2:
+ pass
+else:
+ pass
+
+try:
+ from xdist import is_xdist_controller
+except ImportError:
+
+ def is_xdist_controller():
+ return False
+
+
+BASENAME = "topolog"
# Helper dictionary to convert Topogen logging levels to Python's logging.
DEBUG_TOPO2LOGGING = {
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
+FORMAT = "%(asctime)s.%(msecs)03d %(levelname)s: %(name)s: %(message)s"
+
+handlers = {}
+logger = logging.getLogger("topolog")
-class InfoFilter(logging.Filter):
- def filter(self, rec):
- return rec.levelno in (logging.DEBUG, logging.INFO)
+def set_handler(l, target=None):
+ if target is None:
+ h = logging.NullHandler()
+ else:
+ if isinstance(target, str):
+ h = logging.FileHandler(filename=target, mode="w")
+ else:
+ h = logging.StreamHandler(stream=target)
+ h.setFormatter(logging.Formatter(fmt=FORMAT))
+ # Don't filter anything at the handler level
+ h.setLevel(logging.DEBUG)
+ l.addHandler(h)
+ return h
-#
-# Logger class definition
-#
+def set_log_level(l, level):
+ "Set the logging level."
+ # Messages sent to this logger only are created if this level or above.
+ log_level = DEBUG_TOPO2LOGGING.get(level, level)
+ l.setLevel(log_level)
-class Logger(object):
- """
- Logger class that encapsulates logging functions, internaly it uses Python
- logging module with a separated instance instead of global.
+def get_logger(name, log_level=None, target=None):
+ l = logging.getLogger("{}.{}".format(BASENAME, name))
- Default logging level is 'info'.
- """
+ if log_level is not None:
+ set_log_level(l, log_level)
- def __init__(self):
- # Create default global logger
- self.log_level = logging.INFO
- self.logger = logging.Logger("topolog", level=self.log_level)
+ if target is not None:
+ set_handler(l, target)
- handler_stdout = logging.StreamHandler(sys.stdout)
- handler_stdout.setLevel(logging.DEBUG)
- handler_stdout.addFilter(InfoFilter())
- handler_stdout.setFormatter(
- logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
- )
- handler_stderr = logging.StreamHandler()
- handler_stderr.setLevel(logging.WARNING)
- handler_stderr.setFormatter(
- logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
- )
+ return l
- self.logger.addHandler(handler_stdout)
- self.logger.addHandler(handler_stderr)
- # Handle more loggers
- self.loggers = {"topolog": self.logger}
+# nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
- def set_log_level(self, level):
- "Set the logging level"
- self.log_level = DEBUG_TOPO2LOGGING.get(level)
- self.logger.setLevel(self.log_level)
- def get_logger(self, name="topolog", log_level=None, target=sys.stdout):
- """
- Get a new logger entry. Allows creating different loggers for formating,
- filtering or handling (file, stream or stdout/stderr).
- """
- if log_level is None:
- log_level = self.log_level
- if name in self.loggers:
- return self.loggers[name]
+def get_test_logdir(nodeid=None):
+ """Get log directory relative pathname."""
+ xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
+ mode = os.getenv("PYTEST_XDIST_MODE", "no")
- nlogger = logging.Logger(name, level=log_level)
- if isinstance(target, str):
- handler = logging.FileHandler(filename=target)
- else:
- handler = logging.StreamHandler(stream=target)
+ if not nodeid:
+ nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
- handler.setFormatter(
- logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
+ cur_test = nodeid.replace("[", "_").replace("]", "_")
+ path, testname = cur_test.split("::")
+ path = path[:-3].replace("/", ".")
+
+ # We use different logdir paths based on how xdist is running.
+ if mode == "each":
+ return os.path.join(path, testname, xdist_worker)
+ elif mode == "load":
+ return os.path.join(path, testname)
+ else:
+ assert (
+ mode == "no" or mode == "loadfile" or mode == "loadscope"
+ ), "Unknown dist mode {}".format(mode)
+
+ return path
+
+
+def logstart(nodeid, location, rundir):
+ """Called from pytest before module setup."""
+
+ mode = os.getenv("PYTEST_XDIST_MODE", "no")
+ worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
+
+ # We only per-test log in the workers (or non-dist)
+ if not worker and mode != "no":
+ return
+
+ handler_id = nodeid + worker
+ assert handler_id not in handlers
+
+ rel_log_dir = get_test_logdir(nodeid)
+ exec_log_dir = os.path.join(rundir, rel_log_dir)
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(exec_log_dir), shell=True
+ )
+ exec_log_path = os.path.join(exec_log_dir, "exec.log")
+
+ # Add test based exec log handler
+ h = set_handler(logger, exec_log_path)
+ handlers[handler_id] = h
+
+ if worker:
+ logger.info(
+ "Logging on worker %s for %s into %s", worker, handler_id, exec_log_path
)
- nlogger.addHandler(handler)
- self.loggers[name] = nlogger
- return nlogger
+ else:
+ logger.info("Logging for %s into %s", handler_id, exec_log_path)
-#
-# Global variables
-#
+def logfinish(nodeid, location):
+ """Called from pytest after module teardown."""
+ # This function may not be called if pytest is interrupted.
+
+ worker = os.getenv("PYTEST_TOPOTEST_WORKER", "")
+ handler_id = nodeid + worker
+
+ if handler_id in handlers:
+ # Remove test based exec log handler
+ if worker:
+ logger.info("Closing logs for %s", handler_id)
+
+ h = handlers[handler_id]
+ logger.removeHandler(handlers[handler_id])
+ h.flush()
+ h.close()
+ del handlers[handler_id]
+
-logger_config = Logger()
-logger = logger_config.logger
+console_handler = set_handler(logger, None)
+set_log_level(logger, "debug")
# OF THIS SOFTWARE.
#
-import json
-import os
+import difflib
import errno
-import re
-import sys
import functools
import glob
+import json
+import os
+import pdb
+import platform
+import re
+import resource
+import signal
import subprocess
+import sys
import tempfile
-import platform
-import difflib
import time
-import signal
+from copy import deepcopy
+import lib.topolog as topolog
from lib.topolog import logger
-from copy import deepcopy
if sys.version_info[0] > 2:
import configparser
+ from collections.abc import Mapping
else:
import ConfigParser as configparser
+ from collections import Mapping
-from mininet.topo import Topo
-from mininet.net import Mininet
-from mininet.node import Node, OVSSwitch, Host
-from mininet.log import setLogLevel, info
-from mininet.cli import CLI
-from mininet.link import Intf
-from mininet.term import makeTerm
+from lib import micronet
+from lib.micronet_compat import Node
g_extra_config = {}
+def get_logs_path(rundir):
+ logspath = topolog.get_test_logdir()
+ return os.path.join(rundir, logspath)
+
+
def gdb_core(obj, daemon, corefiles):
gdbcmds = """
info threads
* `d2`: parsed JSON data structure
Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
- in d1, e.g. when d2 is a "subset" of d1 without honoring any order. Otherwise an
+ in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
error report is generated and wrapped in a 'json_cmp_result()'. There are special
parameters and notations explained below which can be used to cover rather unusual
cases:
return (False, result)
+def router_json_cmp_retry(router, cmd, data, exact=False, retry_timeout=10.0):
+ """
+ Runs `cmd` that returns JSON data (normally the command ends with 'json')
+ and compare with `data` contents. Retry by default for 10 seconds
+ """
+
+ def test_func():
+ return router_json_cmp(router, cmd, data, exact)
+
+ ok, _ = run_and_expect(test_func, None, int(retry_timeout), 1)
+ return ok
+
+
def int2dpid(dpid):
"Converting Integer to DPID"
"""
Generates a temporary file in '/tmp' with `content` and returns the file name.
"""
+ if isinstance(content, list) or isinstance(content, tuple):
+ content = "\n".join(content)
fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
fname = fde.name
fde.write(content)
and (callingProc != "checkAddressSanitizerError")
and (callingProc != "checkRouterCores")
and (callingProc != "stopRouter")
- and (callingProc != "__stop_internal")
and (callingProc != "stop")
and (callingProc != "stop_topology")
and (callingProc != "checkRouterRunning")
return
addressSanitizerError = re.search(
- "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
+ r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
)
if addressSanitizerError:
processAddressSanitizerError(addressSanitizerError, output, router, component)
with open(file, "r") as asanErrorFile:
asanError = asanErrorFile.read()
addressSanitizerError = re.search(
- "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
+ r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
)
if addressSanitizerError:
processAddressSanitizerError(
return False
-def addRouter(topo, name):
- "Adding a FRRouter to Topology"
+def _sysctl_atleast(commander, variable, min_value):
+ if isinstance(min_value, tuple):
+ min_value = list(min_value)
+ is_list = isinstance(min_value, list)
- MyPrivateDirs = [
- "/etc/frr",
- "/var/run/frr",
- "/var/log",
- ]
- if sys.platform.startswith("linux"):
- return topo.addNode(name, cls=LinuxRouter, privateDirs=MyPrivateDirs)
- elif sys.platform.startswith("freebsd"):
- return topo.addNode(name, cls=FreeBSDRouter, privateDirs=MyPrivateDirs)
+ sval = commander.cmd_raises("sysctl -n " + variable).strip()
+ if is_list:
+ cur_val = [int(x) for x in sval.split()]
+ else:
+ cur_val = int(sval)
+
+ set_value = False
+ if is_list:
+ for i, v in enumerate(cur_val):
+ if v < min_value[i]:
+ set_value = True
+ else:
+ min_value[i] = v
+ else:
+ if cur_val < min_value:
+ set_value = True
+ if set_value:
+ if is_list:
+ valstr = " ".join([str(x) for x in min_value])
+ else:
+ valstr = str(min_value)
+ logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
+ commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
-def set_sysctl(node, sysctl, value):
- "Set a sysctl value and return None on success or an error string"
- valuestr = "{}".format(value)
- command = "sysctl {0}={1}".format(sysctl, valuestr)
- cmdret = node.cmd(command)
+def _sysctl_assure(commander, variable, value):
+ if isinstance(value, tuple):
+ value = list(value)
+ is_list = isinstance(value, list)
- matches = re.search(r"([^ ]+) = ([^\s]+)", cmdret)
- if matches is None:
- return cmdret
- if matches.group(1) != sysctl:
- return cmdret
- if matches.group(2) != valuestr:
- return cmdret
+ sval = commander.cmd_raises("sysctl -n " + variable).strip()
+ if is_list:
+ cur_val = [int(x) for x in sval.split()]
+ else:
+ cur_val = sval
- return None
+ set_value = False
+ if is_list:
+ for i, v in enumerate(cur_val):
+ if v != value[i]:
+ set_value = True
+ else:
+ value[i] = v
+ else:
+ if cur_val != str(value):
+ set_value = True
+ if set_value:
+ if is_list:
+ valstr = " ".join([str(x) for x in value])
+ else:
+ valstr = str(value)
+ logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
+ commander.cmd_raises('sysctl -w {}="{}"\n'.format(variable, valstr))
-def assert_sysctl(node, sysctl, value):
- "Set and assert that the sysctl is set with the specified value."
- assert set_sysctl(node, sysctl, value) is None
+
+def sysctl_atleast(commander, variable, min_value, raises=False):
+ try:
+ if commander is None:
+ commander = micronet.Commander("topotest")
+ return _sysctl_atleast(commander, variable, min_value)
+ except subprocess.CalledProcessError as error:
+ logger.warning(
+ "%s: Failed to assure sysctl min value %s = %s",
+ commander,
+ variable,
+ min_value,
+ )
+ if raises:
+ raise
+
+
+def sysctl_assure(commander, variable, value, raises=False):
+ try:
+ if commander is None:
+ commander = micronet.Commander("topotest")
+ return _sysctl_assure(commander, variable, value)
+ except subprocess.CalledProcessError as error:
+ logger.warning(
+ "%s: Failed to assure sysctl value %s = %s",
+ commander,
+ variable,
+ value,
+ exc_info=True,
+ )
+ if raises:
+ raise
+
+
+def rlimit_atleast(rname, min_value, raises=False):
+ try:
+ cval = resource.getrlimit(rname)
+ soft, hard = cval
+ if soft < min_value:
+ nval = (min_value, hard if min_value < hard else min_value)
+ logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
+ resource.setrlimit(rname, nval)
+ except subprocess.CalledProcessError as error:
+ logger.warning(
+ "Failed to assure rlimit [%s] = %s", rname, min_value, exc_info=True
+ )
+ if raises:
+ raise
+
+
+def fix_netns_limits(ns):
+
+ # Maximum read and write socket buffer sizes
+ sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10 * 1024, 87380, 16 * 2 ** 20])
+ sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10 * 1024, 87380, 16 * 2 ** 20])
+
+ sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
+ sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
+ sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
+
+ sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
+ sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
+
+ # XXX if things fail look here as this wasn't done previously
+ sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
+ sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
+
+ # ARP
+ sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
+ sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
+ # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
+ sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
+ sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
+ sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
+ # Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
+ sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
+
+ sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
+
+ # Keep ipv6 permanent addresses on an admin down
+ sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
+ if version_cmp(platform.release(), "4.20") >= 0:
+ sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
+
+ sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
+ sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
+
+ # igmp
+ sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
+
+ # Use neigh information on selection of nexthop for multipath hops
+ sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
+
+
+def fix_host_limits():
+ """Increase system limits."""
+
+ rlimit_atleast(resource.RLIMIT_NPROC, 8 * 1024)
+ rlimit_atleast(resource.RLIMIT_NOFILE, 16 * 1024)
+ sysctl_atleast(None, "fs.file-max", 16 * 1024)
+ sysctl_atleast(None, "kernel.pty.max", 16 * 1024)
+
+ # Enable coredumps
+ # Original on ubuntu 17.x, but apport won't save as in namespace
+ # |/usr/share/apport/apport %p %s %c %d %P
+ sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
+ sysctl_assure(None, "kernel.core_uses_pid", 1)
+ sysctl_assure(None, "fs.suid_dumpable", 1)
+
+ # Maximum connection backlog
+ sysctl_atleast(None, "net.core.netdev_max_backlog", 4 * 1024)
+
+ # Maximum read and write socket buffer sizes
+ sysctl_atleast(None, "net.core.rmem_max", 16 * 2 ** 20)
+ sysctl_atleast(None, "net.core.wmem_max", 16 * 2 ** 20)
+
+ # Garbage Collection Settings for ARP and Neighbors
+ sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4 * 1024)
+ sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8 * 1024)
+ sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4 * 1024)
+ sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8 * 1024)
+ # Hold entries for 10 minutes
+ sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
+ sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
+
+ # igmp
+ sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
+
+ # MLD
+ sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
+
+ # Increase routing table size to 128K
+ sysctl_atleast(None, "net.ipv4.route.max_size", 128 * 1024)
+ sysctl_atleast(None, "net.ipv6.route.max_size", 128 * 1024)
+
+
+def setup_node_tmpdir(logdir, name):
+ # Cleanup old log, valgrind, and core files.
+ subprocess.check_call(
+ "rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(logdir, name), shell=True
+ )
+
+ # Setup the per node directory.
+ nodelogdir = "{}/{}".format(logdir, name)
+ subprocess.check_call(
+ "mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True
+ )
+ logfile = "{0}/{1}.log".format(logdir, name)
+ return logfile
class Router(Node):
"A Node with IPv4/IPv6 forwarding enabled"
def __init__(self, name, **params):
- super(Router, self).__init__(name, **params)
- self.logdir = params.get("logdir")
# Backward compatibility:
# Load configuration defaults like topogen.
"memleak_path": "",
}
)
+
self.config_defaults.read(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
)
# If this topology is using old API and doesn't have logdir
# specified, then attempt to generate an unique logdir.
+ self.logdir = params.get("logdir")
if self.logdir is None:
- cur_test = os.environ["PYTEST_CURRENT_TEST"]
- self.logdir = "/tmp/topotests/" + cur_test[
- cur_test.find("/") + 1 : cur_test.find(".py")
- ].replace("/", ".")
-
- # If the logdir is not created, then create it and set the
- # appropriated permissions.
- if not os.path.isdir(self.logdir):
- os.system("mkdir -p " + self.logdir + "/" + name)
- os.system("chmod -R go+rw /tmp/topotests")
- # Erase logs of previous run
- os.system("rm -rf " + self.logdir + "/" + name)
+ self.logdir = get_logs_path(g_extra_config["rundir"])
+
+ if not params.get("logger"):
+ # If logger is present topogen has already set this up
+ logfile = setup_node_tmpdir(self.logdir, name)
+ l = topolog.get_logger(name, log_level="debug", target=logfile)
+ params["logger"] = l
+
+ super(Router, self).__init__(name, **params)
self.daemondir = None
self.hasmpls = False
self.reportCores = True
self.version = None
- self.ns_cmd = "sudo nsenter -m -n -t {} ".format(self.pid)
+ self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
try:
# Allow escaping from running inside docker
cgroup = open("/proc/1/cgroup").read()
def terminate(self):
# Stop running FRR daemons
self.stopRouter()
-
- # Disable forwarding
- set_sysctl(self, "net.ipv4.ip_forward", 0)
- set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
super(Router, self).terminate()
- os.system("chmod -R go+rw /tmp/topotests")
+ os.system("chmod -R go+rw " + self.logdir)
# Return count of running daemons
def listDaemons(self):
ret = []
- rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
- errors = ""
- if re.search(r"No such file or directory", rundaemons):
- return 0
- if rundaemons is not None:
- bet = rundaemons.split("\n")
- for d in bet[:-1]:
- daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
- if daemonpid.isdigit() and pid_exists(int(daemonpid)):
- ret.append(os.path.basename(d.rstrip().rsplit(".", 1)[0]))
-
+ rc, stdout, _ = self.cmd_status(
+ "ls -1 /var/run/%s/*.pid" % self.routertype, warn=False
+ )
+ if rc:
+ return ret
+ for d in stdout.strip().split("\n"):
+ pidfile = d.strip()
+ try:
+ pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
+ name = os.path.basename(pidfile[:-4])
+
+ # probably not compatible with bsd.
+ rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
+ if rc:
+ logger.warning(
+ "%s: %s exited leaving pidfile %s (%s)",
+ self.name,
+ name,
+ pidfile,
+ pid,
+ )
+ self.cmd("rm -- " + pidfile)
+ else:
+ ret.append((name, pid))
+ except (subprocess.CalledProcessError, ValueError):
+ pass
return ret
- def stopRouter(self, wait=True, assertOnError=True, minErrorVersion="5.1"):
+ def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
# Stop Running FRR Daemons
- rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
- errors = ""
- if re.search(r"No such file or directory", rundaemons):
- return errors
- if rundaemons is not None:
- dmns = rundaemons.split("\n")
- # Exclude empty string at end of list
- for d in dmns[:-1]:
- # Only check if daemonfilepath starts with /
- # Avoids hang on "-> Connection closed" in above self.cmd()
- if d[0] == '/':
- daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
- if daemonpid.isdigit() and pid_exists(int(daemonpid)):
- daemonname = os.path.basename(d.rstrip().rsplit(".", 1)[0])
- logger.info("{}: stopping {}".format(self.name, daemonname))
- try:
- os.kill(int(daemonpid), signal.SIGTERM)
- except OSError as err:
- if err.errno == errno.ESRCH:
- logger.error(
- "{}: {} left a dead pidfile (pid={})".format(
- self.name, daemonname, daemonpid
- )
- )
- else:
- logger.info(
- "{}: {} could not kill pid {}: {}".format(
- self.name, daemonname, daemonpid, str(err)
- )
- )
-
- if not wait:
- return errors
-
- running = self.listDaemons()
+ running = self.listDaemons()
+ if not running:
+ return ""
+
+ logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
+ for name, pid in running:
+ logger.info("{}: sending SIGTERM to {}".format(self.name, name))
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError as err:
+ logger.info(
+ "%s: could not kill %s (%s): %s", self.name, name, pid, str(err)
+ )
- if running:
+ running = self.listDaemons()
+ if running:
+ for _ in range(0, 5):
sleep(
- 0.1,
+ 0.5,
"{}: waiting for daemons stopping: {}".format(
- self.name, ", ".join(running)
+ self.name, ", ".join([x[0] for x in running])
),
)
running = self.listDaemons()
+ if not running:
+ break
- counter = 20
- while counter > 0 and running:
- sleep(
- 0.5,
- "{}: waiting for daemons stopping: {}".format(
- self.name, ", ".join(running)
- ),
- )
- running = self.listDaemons()
- counter -= 1
-
- if running:
- # 2nd round of kill if daemons didn't exit
- dmns = rundaemons.split("\n")
- # Exclude empty string at end of list
- for d in dmns[:-1]:
- daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
- if daemonpid.isdigit() and pid_exists(int(daemonpid)):
- logger.info(
- "{}: killing {}".format(
- self.name,
- os.path.basename(d.rstrip().rsplit(".", 1)[0]),
- )
- )
- self.cmd("kill -7 %s" % daemonpid)
- self.waitOutput()
- self.cmd("rm -- {}".format(d.rstrip()))
+ if not running:
+ return ""
- if not wait:
- return errors
+ logger.warning(
+ "%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running])
+ )
+ for name, pid in running:
+ pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
+ logger.info("%s: killing %s", self.name, name)
+ self.cmd("kill -SIGBUS %d" % pid)
+ self.cmd("rm -- " + pidfile)
+
+ sleep(
+ 0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name
+ )
errors = self.checkRouterCores(reportOnce=True)
if self.checkRouterVersion("<", minErrorVersion):
# ignore errors in old versions
errors = ""
- if assertOnError and errors is not None and len(errors) > 0:
+ if assertOnError and (errors is not None) and len(errors) > 0:
assert "Errors found - details follow:" == 0, errors
return errors
def removeIPs(self):
for interface in self.intfNames():
- self.cmd("ip address flush", interface)
+ try:
+ self.intf_ip_cmd(interface, "ip address flush " + interface)
+ except Exception as ex:
+ logger.error("%s can't remove IPs %s", self, str(ex))
+ # pdb.set_trace()
+ # assert False, "can't remove IPs %s" % str(ex)
def checkCapability(self, daemon, param):
if param is not None:
return True
def loadConf(self, daemon, source=None, param=None):
+ """Enabled and set config for a daemon.
+
+ Arranges for loading of daemon configuration from the specified source. Possible
+ `source` values are `None` for an empty config file, a path name which is used
+ directly, or a file name with no path components which is first looked for
+ directly and then looked for under a sub-directory named after router.
+ """
+
+ # Unfortunately this API allowsfor source to not exist for any and all routers.
+ if source:
+ head, tail = os.path.split(source)
+ if not head and not self.path_exists(tail):
+ script_dir = os.environ["PYTEST_TOPOTEST_SCRIPTDIR"]
+ router_relative = os.path.join(script_dir, self.name, tail)
+ if self.path_exists(router_relative):
+ source = router_relative
+ self.logger.info(
+ "using router relative configuration: {}".format(source)
+ )
+
# print "Daemons before:", self.daemons
if daemon in self.daemons.keys():
self.daemons[daemon] = 1
if param is not None:
self.daemons_options[daemon] = param
- if source is None:
- self.cmd("touch /etc/%s/%s.conf" % (self.routertype, daemon))
- self.waitOutput()
+ conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
+ if source is None or not os.path.exists(source):
+ self.cmd_raises("rm -f " + conf_file)
+ self.cmd_raises("touch " + conf_file)
else:
- self.cmd("cp %s /etc/%s/%s.conf" % (source, self.routertype, daemon))
- self.waitOutput()
- self.cmd("chmod 640 /etc/%s/%s.conf" % (self.routertype, daemon))
- self.waitOutput()
- self.cmd(
- "chown %s:%s /etc/%s/%s.conf"
- % (self.routertype, self.routertype, self.routertype, daemon)
- )
- self.waitOutput()
+ self.cmd_raises("cp {} {}".format(source, conf_file))
+ self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
+ self.cmd_raises("chmod 664 {}".format(conf_file))
if (daemon == "snmpd") and (self.routertype == "frr"):
+ # /etc/snmp is private mount now
self.cmd('echo "agentXSocket /etc/frr/agentx" > /etc/snmp/frr.conf')
+ self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
+
if (daemon == "zebra") and (self.daemons["staticd"] == 0):
# Add staticd with zebra - if it exists
- staticd_path = os.path.join(self.daemondir, "staticd")
+ try:
+ staticd_path = os.path.join(self.daemondir, "staticd")
+ except:
+ pdb.set_trace()
+
if os.path.isfile(staticd_path):
self.daemons["staticd"] = 1
self.daemons_options["staticd"] = ""
logger.info("No daemon {} known".format(daemon))
# print "Daemons after:", self.daemons
- # Run a command in a new window (gnome-terminal, screen, tmux, xterm)
def runInWindow(self, cmd, title=None):
- topo_terminal = os.getenv("FRR_TOPO_TERMINAL")
- if topo_terminal or ("TMUX" not in os.environ and "STY" not in os.environ):
- term = topo_terminal if topo_terminal else "xterm"
- makeTerm(self, title=title if title else cmd, term=term, cmd=cmd)
- else:
- nscmd = self.ns_cmd + cmd
- if "TMUX" in os.environ:
- self.cmd("tmux select-layout main-horizontal")
- wcmd = "tmux split-window -h"
- cmd = "{} {}".format(wcmd, nscmd)
- elif "STY" in os.environ:
- if os.path.exists(
- "/run/screen/S-{}/{}".format(os.environ["USER"], os.environ["STY"])
- ):
- wcmd = "screen"
- else:
- wcmd = "sudo -u {} screen".format(os.environ["SUDO_USER"])
- cmd = "{} {}".format(wcmd, nscmd)
- self.cmd(cmd)
+ return self.run_in_window(cmd, title)
def startRouter(self, tgen=None):
# Disable integrated-vtysh-config
self.hasmpls = True
if self.hasmpls != True:
return "LDP/MPLS Tests need mpls kernel modules"
+
+ # Really want to use sysctl_atleast here, but only when MPLS is actually being
+ # used
self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
shell_routers = g_extra_config["shell"]
if "all" in shell_routers or self.name in shell_routers:
- self.runInWindow(os.getenv("SHELL", "bash"))
-
- vtysh_routers = g_extra_config["vtysh"]
- if "all" in vtysh_routers or self.name in vtysh_routers:
- self.runInWindow("vtysh")
+ self.run_in_window(os.getenv("SHELL", "bash"))
if self.daemons["eigrpd"] == 1:
eigrpd_path = os.path.join(self.daemondir, "eigrpd")
logger.info("BFD Test, but no bfdd compiled or installed")
return "BFD Test, but no bfdd compiled or installed"
- return self.startRouterDaemons(tgen=tgen)
+ status = self.startRouterDaemons(tgen=tgen)
+
+ vtysh_routers = g_extra_config["vtysh"]
+ if "all" in vtysh_routers or self.name in vtysh_routers:
+ self.run_in_window("vtysh")
+
+ return status
def getStdErr(self, daemon):
return self.getLog("err", daemon)
return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
def startRouterDaemons(self, daemons=None, tgen=None):
- "Starts all FRR daemons for this router."
+ "Starts FRR daemons for this router."
asan_abort = g_extra_config["asan_abort"]
gdb_breakpoints = g_extra_config["gdb_breakpoints"]
valgrind_memleaks = g_extra_config["valgrind_memleaks"]
strace_daemons = g_extra_config["strace_daemons"]
- bundle_data = ""
-
- if os.path.exists("/etc/frr/support_bundle_commands.conf"):
- bundle_data = subprocess.check_output(
- ["cat /etc/frr/support_bundle_commands.conf"], shell=True
+ # Get global bundle data
+ if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
+ # Copy global value if was covered by namespace mount
+ bundle_data = ""
+ if os.path.exists("/etc/frr/support_bundle_commands.conf"):
+ with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
+ bundle_data = rf.read()
+ self.cmd_raises(
+ "cat > /etc/frr/support_bundle_commands.conf",
+ stdin=bundle_data,
)
- self.cmd(
- "echo '{}' > /etc/frr/support_bundle_commands.conf".format(bundle_data)
- )
# Starts actual daemons without init (ie restart)
# cd to per node directory
- self.cmd("install -d {}/{}".format(self.logdir, self.name))
- self.cmd("cd {}/{}".format(self.logdir, self.name))
+ self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
+ self.set_cwd("{}/{}".format(self.logdir, self.name))
self.cmd("umask 000")
# Re-enable to allow for report per run
cmdenv = "ASAN_OPTIONS="
if asan_abort:
cmdenv = "abort_on_error=1:"
- cmdenv += "log_path={0}/{1}.{2}.asan ".format(self.logdir, self.name, daemon)
+ cmdenv += "log_path={0}/{1}.{2}.asan ".format(
+ self.logdir, self.name, daemon
+ )
if valgrind_memleaks:
- this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
- supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp"))
- cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file)
+ this_dir = os.path.dirname(
+ os.path.abspath(os.path.realpath(__file__))
+ )
+ supp_file = os.path.abspath(
+ os.path.join(this_dir, "../../../tools/valgrind.supp")
+ )
+ cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(
+ daemon, self.logdir, self.name, supp_file
+ )
if valgrind_extra:
- cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes"
+ cmdenv += (
+ "--gen-suppressions=all --expensive-definedness-checks=yes"
+ )
elif daemon in strace_daemons or "all" in strace_daemons:
- cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(daemon, self.logdir, self.name)
+ cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(
+ daemon, self.logdir, self.name
+ )
cmdopt = "{} --log file:{}.log --log-level debug".format(
daemon_opts, daemon
gdbcmd += " -ex 'b {}'".format(bp)
gdbcmd += " -ex 'run {}'".format(cmdopt)
- self.runInWindow(gdbcmd, daemon)
+ self.run_in_window(gdbcmd, daemon)
+
+ logger.info(
+ "%s: %s %s launched in gdb window", self, self.routertype, daemon
+ )
else:
if daemon != "snmpd":
cmdopt += " -d "
cmdopt += rediropt
- self.cmd(" ".join([cmdenv, binary, cmdopt]))
- logger.info("{}: {} {} started".format(self, self.routertype, daemon))
+
+ try:
+ self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
+ except subprocess.CalledProcessError as error:
+ self.logger.error(
+ '%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
+ self,
+ daemon,
+ error.returncode,
+ error.cmd,
+ '\n:stdout: "{}"'.format(error.stdout.strip())
+ if error.stdout
+ else "",
+ '\n:stderr: "{}"'.format(error.stderr.strip())
+ if error.stderr
+ else "",
+ )
+ else:
+ logger.info("%s: %s %s started", self, self.routertype, daemon)
# Start Zebra first
if "zebra" in daemons_list:
daemons_list.remove("staticd")
if "snmpd" in daemons_list:
+ # Give zerbra a chance to configure interface addresses that snmpd daemon
+ # may then use.
+ time.sleep(2)
+
start_daemon("snmpd")
while "snmpd" in daemons_list:
daemons_list.remove("snmpd")
- # Fix Link-Local Addresses
- # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
- self.cmd(
- "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done"
- )
+ if daemons is None:
+ # Fix Link-Local Addresses on initial startup
+ # Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
+ _, output, _ = self.cmd_status(
+ "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
+ stderr=subprocess.STDOUT,
+ )
+ logger.debug("Set MACs:\n%s", output)
# Now start all the other daemons
for daemon in daemons_list:
if re.search(r"No such file or directory", rundaemons):
return "Daemons are not running"
+ # Update the permissions on the log files
+ self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
+ self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
+
return ""
def killRouterDaemons(
)
)
self.cmd("kill -9 %s" % daemonpid)
- self.waitOutput()
if pid_exists(int(daemonpid)):
numRunning += 1
if wait and numRunning > 0:
)
)
self.cmd("kill -9 %s" % daemonpid)
- self.waitOutput()
self.cmd("rm -- {}".format(d.rstrip()))
if wait:
errors = self.checkRouterCores(reportOnce=True)
leakfile.close()
-class LinuxRouter(Router):
- "A Linux Router Node with IPv4/IPv6 forwarding enabled."
-
- def __init__(self, name, **params):
- Router.__init__(self, name, **params)
-
- def config(self, **params):
- Router.config(self, **params)
- # Enable forwarding on the router
- assert_sysctl(self, "net.ipv4.ip_forward", 1)
- assert_sysctl(self, "net.ipv6.conf.all.forwarding", 1)
- # Enable coredumps
- assert_sysctl(self, "kernel.core_uses_pid", 1)
- assert_sysctl(self, "fs.suid_dumpable", 1)
- # this applies to the kernel not the namespace...
- # original on ubuntu 17.x, but apport won't save as in namespace
- # |/usr/share/apport/apport %p %s %c %d %P
- corefile = "%e_core-sig_%s-pid_%p.dmp"
- assert_sysctl(self, "kernel.core_pattern", corefile)
-
- def terminate(self):
- """
- Terminate generic LinuxRouter Mininet instance
- """
- set_sysctl(self, "net.ipv4.ip_forward", 0)
- set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
- Router.terminate(self)
-
-
-class FreeBSDRouter(Router):
- "A FreeBSD Router Node with IPv4/IPv6 forwarding enabled."
-
- def __init__(self, name, **params):
- Router.__init__(self, name, **params)
-
-
-class LegacySwitch(OVSSwitch):
- "A Legacy Switch without OpenFlow"
-
- def __init__(self, name, **params):
- OVSSwitch.__init__(self, name, failMode="standalone", **params)
- self.switchIP = None
-
-
def frr_unicode(s):
"""Convert string to unicode, depending on python version"""
if sys.version_info[0] > 2:
return s
else:
- return unicode(s)
+ return unicode(s) # pylint: disable=E0602
+
+
+def is_mapping(o):
+ return isinstance(o, Mapping)
import os
import sys
-import json
from functools import partial
import pytest
-import socket
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
+
+# Required to instantiate the topology builder class.
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
+from lib.pim import McastTesterHelper
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd, pytest.mark.pimd]
-#
-# Test global variables:
-# They are used to handle communicating with external application.
-#
-APP_SOCK_PATH = '/tmp/topotests/apps.sock'
-HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py")
-app_listener = None
-app_clients = {}
-
-def listen_to_applications():
- "Start listening socket to connect with applications."
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
-
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
- sock.bind(APP_SOCK_PATH)
- sock.listen(10)
- global app_listener
- app_listener = sock
-
-def accept_host(host):
- "Accept connection from application running in hosts."
- global app_listener, app_clients
- conn = app_listener.accept()
- app_clients[host] = {
- 'fd': conn[0],
- 'address': conn[1]
- }
-
-def close_applications():
- "Signal applications to stop and close all sockets."
- global app_listener, app_clients
-
- # Close listening socket.
- app_listener.close()
-
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
-
- # Close all host connections.
- for host in ["h1", "h2"]:
- if app_clients.get(host) is None:
- continue
- app_clients["h1"]["fd"].close()
-
-
-class MSDPMeshTopo1(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # Create 3 routers
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- # Create stub networks for multicast traffic.
- tgen.add_host("h1", "192.168.10.2/24", "192.168.10.1")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["h1"])
-
- tgen.add_host("h2", "192.168.30.2/24", "192.168.30.1")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["h2"])
+app_helper = McastTesterHelper()
+
+
+def build_topo(tgen):
+ "Build function"
+
+ # Create 3 routers
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+ # Create stub networks for multicast traffic.
+ tgen.add_host("h1", "192.168.10.2/24", "via 192.168.10.1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["h1"])
+
+ tgen.add_host("h2", "192.168.30.2/24", "via 192.168.30.1")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["h2"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(MSDPMeshTopo1, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# Initialize all routers.
tgen.start_router()
- # Start applications socket.
- listen_to_applications()
+ app_helper.init(tgen)
def test_wait_ospf_convergence():
topotest.router_json_cmp,
tgen.gears[router],
"show {} route json".format(iptype),
- {route: [{"protocol": proto}]}
+ {route: [{"protocol": proto}]},
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=1)
assertmsg = '"{}" OSPF convergence failure'.format(router)
logger.info("test MSDP convergence")
- tgen.gears["h1"].run("{} --send='0.7' '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, '229.0.1.10', 'h1-eth0'))
- accept_host("h1")
-
- tgen.gears["h2"].run("{} '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, '229.0.1.10', 'h2-eth0'))
- accept_host("h2")
-
def expect_msdp_peer(router, peer, sa_count=0):
"Expect MSDP peer connection to be established with SA amount."
- logger.info("waiting MSDP connection from peer {} on router {}".format(peer, router))
+ logger.info(
+ "waiting MSDP connection from peer {} on router {}".format(peer, router)
+ )
test_func = partial(
topotest.router_json_cmp,
tgen.gears[router],
"show ip msdp peer json",
- {peer: {"state": "established", "saCount": sa_count}}
+ {peer: {"state": "established", "saCount": sa_count}},
)
_, result = topotest.run_and_expect(test_func, None, count=40, wait=2)
assertmsg = '"{}" MSDP connection failure'.format(router)
assert result is None, assertmsg
+ mcastaddr = "229.0.1.10"
+ logger.info("Starting helper1")
+ app_helper.run("h1", ["--send=0.7", mcastaddr, "h1-eth0"])
+
+ logger.info("Starting helper2")
+ app_helper.run("h2", [mcastaddr, "h2-eth0"])
+
# R1 peers.
expect_msdp_peer("r1", "10.254.254.2")
expect_msdp_peer("r1", "10.254.254.3")
topotest.router_json_cmp,
tgen.gears[router],
"show ip msdp sa json",
- {group: {source: {"local": local, "rp": rp, "sptSetup": spt_setup}}}
+ {group: {source: {"local": local, "rp": rp, "sptSetup": spt_setup}}},
)
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assertmsg = '"{}" MSDP SA failure'.format(router)
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
- close_applications()
+ app_helper.cleanup()
tgen.stop_topology()
import os
import sys
import json
-import socket
-import tempfile
from functools import partial
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib import topotest
+
+# Required to instantiate the topology builder class.
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
+from lib.pim import McastTesterHelper
pytestmark = [pytest.mark.bgpd, pytest.mark.pimd]
-#
-# Test global variables:
-# They are used to handle communicating with external application.
-#
-APP_SOCK_PATH = '/tmp/topotests/apps.sock'
-HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py")
-app_listener = None
-app_clients = {}
-
-
-def listen_to_applications():
- "Start listening socket to connect with applications."
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
-
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
- sock.bind(APP_SOCK_PATH)
- sock.listen(10)
- global app_listener
- app_listener = sock
-
-
-def accept_host(host):
- "Accept connection from application running in hosts."
- global app_listener, app_clients
- conn = app_listener.accept()
- app_clients[host] = {
- 'fd': conn[0],
- 'address': conn[1]
- }
-
-
-def close_applications():
- "Signal applications to stop and close all sockets."
- global app_listener, app_clients
-
- # Close listening socket.
- app_listener.close()
-
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
+app_helper = McastTesterHelper()
- # Close all host connections.
- for host in ["h1", "h2"]:
- if app_clients.get(host) is None:
- continue
- app_clients[host]["fd"].close()
+def build_topo(tgen):
+ "Build function"
-class MSDPTopo1(Topo):
- "Test topology builder"
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s4")
+ # switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch("s4")
- #switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
+ # Create a host connected and direct at r4:
+ tgen.add_host("h1", "192.168.4.100/24", "via 192.168.4.1")
+ switch.add_link(tgen.gears["h1"])
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r4"])
-
- # Create a host connected and direct at r4:
- tgen.add_host("h1", "192.168.4.100/24", "192.168.4.1")
- switch.add_link(tgen.gears["h1"])
-
- # Create a host connected and direct at r1:
- switch = tgen.add_switch("s6")
- tgen.add_host("h2", "192.168.10.100/24", "192.168.10.1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["h2"])
+ # Create a host connected and direct at r1:
+ switch = tgen.add_switch("s6")
+ tgen.add_host("h2", "192.168.10.100/24", "via 192.168.10.1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["h2"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(MSDPTopo1, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# Initialize all routers.
tgen.start_router()
- # Start applications socket.
- listen_to_applications()
+ app_helper.init(tgen)
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
- close_applications()
+ app_helper.cleanup()
tgen.stop_topology()
expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp")
-def test_mroute_install():
+def _test_mroute_install():
"Test that multicast routes propagated and installed"
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- tgen.gears["h1"].run("{} '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, '229.1.2.3', 'h1-eth0'))
- accept_host("h1")
-
- tgen.gears["h2"].run("{} --send='0.7' '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, '229.1.2.3', 'h2-eth0'))
- accept_host("h2")
-
#
# Test R1 mroute
#
expect_1 = {
- '229.1.2.3': {
- '192.168.10.100': {
- 'iif': 'r1-eth2',
- 'flags': 'SFT',
- 'oil': {
- 'r1-eth0': {
- 'source': '192.168.10.100',
- 'group': '229.1.2.3'
- },
- 'r1-eth1': None
- }
+ "229.1.2.3": {
+ "192.168.10.100": {
+ "iif": "r1-eth2",
+ "flags": "SFT",
+ "oil": {
+ "r1-eth0": {"source": "192.168.10.100", "group": "229.1.2.3"},
+ "r1-eth1": None,
+ },
}
}
}
# Create a deep copy of `expect_1`.
expect_2 = json.loads(json.dumps(expect_1))
# The route will be either via R2 or R3.
- expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth0'] = None
- expect_2['229.1.2.3']['192.168.10.100']['oil']['r1-eth1'] = {
- 'source': '192.168.10.100',
- 'group': '229.1.2.3'
+ expect_2["229.1.2.3"]["192.168.10.100"]["oil"]["r1-eth0"] = None
+ expect_2["229.1.2.3"]["192.168.10.100"]["oil"]["r1-eth1"] = {
+ "source": "192.168.10.100",
+ "group": "229.1.2.3",
}
def test_r1_mroute():
"Test r1 multicast routing table function"
- out = tgen.gears['r1'].vtysh_cmd('show ip mroute json', isjson=True)
+ out = tgen.gears["r1"].vtysh_cmd("show ip mroute json", isjson=True)
if topotest.json_cmp(out, expect_1) is None:
return None
return topotest.json_cmp(out, expect_2)
- logger.info('Waiting for R1 multicast routes')
+ logger.info("Waiting for R1 multicast routes")
_, val = topotest.run_and_expect(test_r1_mroute, None, count=55, wait=2)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
#
# Test routers 2 and 3.
"source": "192.168.10.100",
"group": "229.1.2.3",
}
- }
+ },
}
}
}
"source": "192.168.10.100",
"group": "229.1.2.3",
}
- }
+ },
}
}
}
def test_r2_r3_mroute():
"Test r2/r3 multicast routing table function"
- r2_out = tgen.gears['r2'].vtysh_cmd('show ip mroute json', isjson=True)
- r3_out = tgen.gears['r3'].vtysh_cmd('show ip mroute json', isjson=True)
+ r2_out = tgen.gears["r2"].vtysh_cmd("show ip mroute json", isjson=True)
+ r3_out = tgen.gears["r3"].vtysh_cmd("show ip mroute json", isjson=True)
if topotest.json_cmp(r2_out, expect_r2) is not None:
return topotest.json_cmp(r3_out, expect_r3)
return topotest.json_cmp(r2_out, expect_r2)
- logger.info('Waiting for R2 and R3 multicast routes')
+ logger.info("Waiting for R2 and R3 multicast routes")
_, val = topotest.run_and_expect(test_r2_r3_mroute, None, count=55, wait=2)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
#
# Test router 4
"source": "*",
"group": "229.1.2.3",
"inboundInterface": "lo",
- "outboundInterface": "pimreg"
+ "outboundInterface": "pimreg",
},
"r4-eth2": {
"source": "*",
"group": "229.1.2.3",
"inboundInterface": "lo",
- "outboundInterface": "r4-eth2"
- }
- }
+ "outboundInterface": "r4-eth2",
+ },
+ },
},
"192.168.10.100": {
"iif": "r4-eth0",
"inboundInterface": "r4-eth0",
"outboundInterface": "r4-eth2",
}
- }
- }
+ },
+ },
}
}
test_func = partial(
topotest.router_json_cmp,
- tgen.gears['r4'], "show ip mroute json", expect_4,
+ tgen.gears["r4"],
+ "show ip mroute json",
+ expect_4,
)
- logger.info('Waiting for R4 multicast routes')
+ logger.info("Waiting for R4 multicast routes")
_, val = topotest.run_and_expect(test_func, None, count=55, wait=2)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
+
+
+def test_mroute_install():
+ tgen = get_topogen()
+ # pytest.skip("FOO")
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ logger.info("Starting helper1")
+ mcastaddr = "229.1.2.3"
+ app_helper.run("h1", [mcastaddr, "h1-eth0"])
+
+ logger.info("Starting helper2")
+ app_helper.run("h2", ["--send=0.7", mcastaddr, "h2-eth0"])
+
+ _test_mroute_install()
def test_msdp():
"192.168.0.2": {
"peer": "192.168.0.2",
"local": "192.168.0.1",
- "state": "established"
+ "state": "established",
},
"192.168.1.2": {
"peer": "192.168.1.2",
"local": "192.168.1.1",
- "state": "established"
- }
+ "state": "established",
+ },
}
r1_sa_expect = {
"229.1.2.3": {
"group": "229.1.2.3",
"rp": "-",
"local": "yes",
- "sptSetup": "-"
+ "sptSetup": "-",
}
}
}
"192.168.0.1": {
"peer": "192.168.0.1",
"local": "192.168.0.2",
- "state": "established"
+ "state": "established",
},
"192.168.2.2": {
"peer": "192.168.2.2",
"local": "192.168.2.1",
- "state": "established"
- }
+ "state": "established",
+ },
}
# Only R2 or R3 will get this SA.
r2_r3_sa_expect = {
"192.168.1.1": {
"peer": "192.168.1.1",
"local": "192.168.1.2",
- "state": "established"
+ "state": "established",
},
- #"192.169.3.2": {
+ # "192.169.3.2": {
# "peer": "192.168.3.2",
# "local": "192.168.3.1",
# "state": "established"
- #}
+ # }
}
r4_expect = {
"192.168.2.1": {
"peer": "192.168.2.1",
"local": "192.168.2.2",
- "state": "established"
+ "state": "established",
},
- #"192.168.3.1": {
+ # "192.168.3.1": {
# "peer": "192.168.3.1",
# "local": "192.168.3.2",
# "state": "established"
- #}
+ # }
}
r4_sa_expect = {
"229.1.2.3": {
"group": "229.1.2.3",
"rp": "192.168.1.1",
"local": "no",
- "sptSetup": "yes"
+ "sptSetup": "yes",
}
}
}
- for router in [('r1', r1_expect, r1_sa_expect),
- ('r2', r2_expect, r2_r3_sa_expect),
- ('r3', r3_expect, r2_r3_sa_expect),
- ('r4', r4_expect, r4_sa_expect)]:
+ for router in [
+ ("r1", r1_expect, r1_sa_expect),
+ ("r2", r2_expect, r2_r3_sa_expect),
+ ("r3", r3_expect, r2_r3_sa_expect),
+ ("r4", r4_expect, r4_sa_expect),
+ ]:
test_func = partial(
topotest.router_json_cmp,
- tgen.gears[router[0]], "show ip msdp peer json", router[1]
+ tgen.gears[router[0]],
+ "show ip msdp peer json",
+ router[1],
)
- logger.info('Waiting for {} msdp peer data'.format(router[0]))
+ logger.info("Waiting for {} msdp peer data".format(router[0]))
_, val = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
test_func = partial(
topotest.router_json_cmp,
- tgen.gears[router[0]], "show ip msdp sa json", router[2]
+ tgen.gears[router[0]],
+ "show ip msdp sa json",
+ router[2],
)
- logger.info('Waiting for {} msdp SA data'.format(router[0]))
+ logger.info("Waiting for {} msdp SA data".format(router[0]))
_, val = topotest.run_and_expect(test_func, None, count=30, wait=1)
- assert val is None, 'multicast route convergence failure'
+ assert val is None, "multicast route convergence failure"
def test_memory_leak():
import os
import sys
-import json
import time
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
step,
addKernelRoute,
create_static_routes,
- iperfSendIGMPJoin,
stop_router,
start_router,
shutdown_bringup_interface,
reset_config_on_routers,
do_countdown,
apply_raw_config,
- kill_iperf,
run_frr_cmd,
required_linux_kernel_version,
topo_daemons,
clear_ip_mroute,
clear_ip_pim_interface_traffic,
verify_pim_interface_traffic,
+ McastTesterHelper,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/mcast_pim_bsmp_01.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
TOPOLOGY = """
BSR2_ADDR = "10.2.1.1/32"
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/mcast_pim_bsmp_01.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
# Creating configuration from JSON
build_config_from_json(tgen, topo)
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
- # Add kernal route for source
- group = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["pkt_dst"]
- bsr_interface = topo["routers"][bsr]["links"][fhr]["interface"]
- result = addKernelRoute(tgen, bsr, bsr_interface, group)
- assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
-
# RP Mapping
rp_mapping = topo["routers"][bsr]["bsm"]["bsr_packets"][packet]["rp_mapping"]
if int(mask) == 32:
group = group.split("/")[0]
- # Add kernal routes for sender
- s_interface = topo["routers"][sender]["links"][fhr]["interface"]
- result = addKernelRoute(tgen, sender, s_interface, group)
- assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
-
- # Add kernal routes for receiver
- r_interface = topo["routers"][receiver]["links"][lhr]["interface"]
- result = addKernelRoute(tgen, receiver, r_interface, group)
- assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
-
# Add static routes for RPs in FHR and LHR
next_hop_fhr = topo["routers"][rp]["links"][fhr]["ipv4"].split("/")[0]
next_hop_lhr = topo["routers"][rp]["links"][lhr]["ipv4"].split("/")[0]
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
step("pre-configure BSM packet")
step("Configure cisco-1 as BSR1 1.1.2.7")
result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet9")
assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
do_countdown(5)
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
step("pre-configure BSM packet")
step("Configure cisco-1 as BSR1 1.1.2.7")
state_before = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
state_before, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step("Sending BSR after Configure black hole address for BSR and candidate RP")
step("Send BSR packet from b1 to FHR")
state_after = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
state_after, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
result = verify_state_incremented(state_before, state_after)
assert result is not True, "Testcase{} : Failed Error: {}".format(tc_name, result)
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
stop_router(tgen, "i1")
start_router(tgen, "i1")
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify again if BSR is installed from bsm forwarded by f1
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Use scapy to send pre-defined packet from senser to receiver
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
assert (
rp_add1 == rp2[group]
), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
- tc_name,
- rp_add1,
+ tc_name, rp_add1, rp2[group]
)
# Verify if that rp is installed
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Send IGMP join to LHR
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
do_countdown(5)
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
result = pre_config_to_bsm(
tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
)
assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
# Send IGMP join for group 225.1.1.1 from receiver
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
import os
import sys
-import json
import time
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
step,
addKernelRoute,
create_static_routes,
- iperfSendIGMPJoin,
- stop_router,
- start_router,
- shutdown_bringup_interface,
- kill_router_daemons,
- start_router_daemons,
reset_config_on_routers,
- do_countdown,
- apply_raw_config,
- kill_iperf,
run_frr_cmd,
required_linux_kernel_version,
topo_daemons,
)
from lib.pim import (
- create_pim_config,
add_rp_interfaces_and_pim_config,
reconfig_interfaces,
scapy_send_bsr_raw_packet,
verify_upstream_iif,
verify_igmp_groups,
verify_ip_pim_upstream_rpf,
- enable_disable_pim_unicast_bsm,
- enable_disable_pim_bsm,
clear_ip_mroute,
clear_ip_pim_interface_traffic,
- verify_pim_interface_traffic,
+ McastTesterHelper,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/mcast_pim_bsmp_02.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
TOPOLOGY = """
b1_____
BSR2_ADDR = "10.2.1.1/32"
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/mcast_pim_bsmp_02.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
# Creating configuration from JSON
build_config_from_json(tgen, topo)
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
assert (
rp_add1 == rp2[group]
), "Testcase {} :Failed \n Error : rp expected {} rp received {}".format(
- tc_name,
- rp_add1,
- rp2[group] if group in rp2 else None
+ tc_name, rp_add1, rp2[group] if group in rp2 else None
)
# Verify if that rp is installed
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
- clear_ip_mroute(tgen)
- reset_config_on_routers(tgen)
- clear_ip_pim_interface_traffic(tgen, topo)
-
# Don"t run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
+ app_helper.stop_all_hosts()
+ clear_ip_mroute(tgen)
+ reset_config_on_routers(tgen)
+ clear_ip_pim_interface_traffic(tgen, topo)
+
reset_config_on_routers(tgen)
result = pre_config_to_bsm(
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet1"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
dut = "l1"
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
reset_config_on_routers(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
result = pre_config_to_bsm(
tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
)
result = scapy_send_bsr_raw_packet(tgen, topo, "b1", "f1", "packet2")
assert result is True, "Testcase {} :Failed \n Error {}".format(tc_name, result)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
# Verify bsr state in FHR
tc_name = request.node.name
write_test_header(tc_name)
- kill_iperf(tgen)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
step("pre-configure BSM packet")
result = pre_config_to_bsm(
tgen, topo, tc_name, "b1", "s1", "r1", "f1", "i1", "l1", "packet1"
bsr_ip = topo["routers"]["b1"]["bsm"]["bsr_packets"]["packet8"]["bsr"].split("/")[0]
time.sleep(1)
- result = iperfSendIGMPJoin(tgen, "r1", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r1", GROUP_ADDRESS, "l1")
assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result)
dut = "l1"
import os
import sys
-import json
import time
-import datetime
from time import sleep
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
step,
- iperfSendIGMPJoin,
- addKernelRoute,
apply_raw_config,
reset_config_on_routers,
- iperfSendTraffic,
- kill_iperf,
shutdown_bringup_interface,
- kill_router_daemons,
- start_router,
- start_router_daemons,
- stop_router,
required_linux_kernel_version,
topo_daemons,
)
verify_ip_mroutes,
verify_pim_interface_traffic,
verify_upstream_iif,
- verify_pim_neighbors,
- verify_pim_state,
verify_ip_pim_join,
clear_ip_mroute,
clear_ip_pim_interface_traffic,
verify_igmp_config,
- clear_ip_mroute_verify,
+ McastTesterHelper,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.pimd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/multicast_pim_sm_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+pytestmark = [pytest.mark.pimd]
TOPOLOGY = """
Description:
i1, i2, i3. i4, i5, i6, i7, i8 - FRR running iperf to send IGMP
join and traffic
- l1 - LHR
- f1 - FHR
+ l1 - LHR (last hop router)
+ f1 - FHR (first hop router)
r2 - FRR router
c1 - FRR router
c2 - FRR router
IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"]
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
- tgen = Topogen(CreateTopo, mod.__name__)
+ testdir = os.path.dirname(os.path.realpath(__file__))
+ json_file = "{}/multicast_pim_sm_topo1.json".format(testdir)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
- daemons = topo_daemons(tgen, topo)
+ daemons = topo_daemons(tgen, tgen.json_topo)
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
pytest.skip(tgen.errors)
# Creating configuration from JSON
- build_config_from_json(tgen, topo)
+ build_config_from_json(tgen, tgen.json_topo)
+
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
logger.info("Running setup_module() done")
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
#####################################################
-def config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
-):
- """
- API to do pre-configuration to send IGMP join and multicast
- traffic
-
- parameters:
- -----------
- * `tgen`: topogen object
- * `topo`: input json data
- * `tc_name`: caller test case name
- * `iperf`: router running iperf
- * `iperf_intf`: interface name router running iperf
- * `GROUP_RANGE`: group range
- * `join`: IGMP join, default False
- * `traffic`: multicast traffic, default False
- """
-
- if join:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- if traffic:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- router_list = tgen.routers()
- for router in router_list.keys():
- if router == iperf:
- continue
-
- rnode = router_list[router]
- rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
-
- return True
-
-
def verify_state_incremented(state_before, state_after):
"""
API to compare interface traffic state incrementing
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
pytest.skip(tgen.errors)
step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
- intf_i1_l1 = topo["routers"]["i1"]["links"]["l1"]["interface"]
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", intf_i1_l1, GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- step("joinRx value before join sent")
+ step("get joinRx value before join")
intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]
state_dict = {"r2": {intf_r2_l1: ["joinRx"]}}
state_before = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
state_before, dict
- ), "Testcase {} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, state_before
+ )
- result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ result = app_helper.run_join("i1", IGMP_JOIN, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send the IGMP join first and then start the traffic")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
- intf_i2_f1 = topo["routers"]["i2"]["links"]["f1"]["interface"]
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", intf_i2_f1, GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ result = app_helper.run_traffic("i2", IGMP_JOIN, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
state_after = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
state_after, dict
- ), "Testcase {} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step(
"l1 sent PIM (*,G) join to r2 verify using"
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
input_dict = {
step("Start traffic first and then send the IGMP join")
step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ result = app_helper.run_traffic("i2", IGMP_JOIN, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
step("joinRx value before join sent")
state_dict = {"r2": {"r2-l1-eth2": ["joinRx"]}}
state_before = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
state_before, dict
- ), "Testcase {} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
- result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ result = app_helper.run_join("i1", IGMP_JOIN, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
# (41 * (2 + .5)) == 102.
for data in input_dict:
result = verify_ip_mroutes(
- tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"],
- retry_timeout=102
+ tgen,
+ data["dut"],
+ data["src_address"],
+ IGMP_JOIN,
+ data["iif"],
+ data["oil"],
+ retry_timeout=102,
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
state_after = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
state_after, dict
- ), "Testcase {} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase {} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step(
"l1 sent PIM (*,G) join to r2 verify using"
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP on c1 for group (225.1.1.1-5)")
input_dict = {
"c1": {
"Enable IGMP on FRR1 interface and send IGMP join 225.1.1.1 "
"to 225.1.1.5 from different interfaces"
)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join("i1", IGMP_JOIN_RANGE_1, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3, wait for SPT switchover")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE_1, traffic=True
+ result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "f1")
+ assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+
+ step(
+ "Verify clear ip mroute (*,g) entries are populated by using "
+ "'show ip mroute' cli"
)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN_RANGE_1, 32, 2500)
- assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
+ input_dict = [
+ {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}
+ ]
- step("Clear the mroute on l1, wait for 5 sec")
- result = clear_ip_mroute_verify(tgen, "l1")
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
+ )
+ assert result is True, "Testcase{} : Failed Error: {}".format(tc_name, result)
+
+ step("Clear mroutes on l1")
+ clear_ip_mroute(tgen, "l1")
step(
"After clear ip mroute (*,g) entries are re-populated again"
" 'show ip pim upstream' "
)
- source = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
- input_dict = [
- {"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"}
- ]
-
for data in input_dict:
result = verify_ip_mroutes(
tgen, data["dut"], data["src_address"], IGMP_JOIN, data["iif"], data["oil"]
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
input_dict = {
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1) to R1")
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}},
- "r2": {"igmp": {"interfaces": {"r2-i3-eth1": {"igmp": {"version": "2"}}}}},
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ },
+ "r2": {
+ "igmp": {
+ "interfaces": {
+ "r2-i3-eth1": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ },
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0", "i3": "i3-r2-eth0"}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from R3 to 225.1.1.1 receiver")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ result = app_helper.run_traffic("i2", IGMP_JOIN, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("IGMP is received on FRR1 , FRR2 , FRR3, using " "'show ip igmp groups'")
igmp_groups = {"l1": "l1-i1-eth1", "r2": "r2-i3-eth1", "f1": "f1-i8-eth2"}
for dut, interface in igmp_groups.items():
- result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN)
+ result = verify_igmp_groups(tgen, dut, interface, IGMP_JOIN, retry_timeout=80)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("(*,G) present on all the node with correct OIL" " using 'show ip mroute'")
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) and (232.1.1.1-5)" " in c1")
_GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
"for group (226.1.1.1-5, 232.1.1.1-5)"
)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join("i1", _IGMP_JOIN_RANGE, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i8", "i8-f1-eth0", _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i8", _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join("i8", _IGMP_JOIN_RANGE, "f1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
}
for src, src_intf in input_traffic.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Verify (*,G) are created on FRR1 and FRR3 node " " 'show ip mroute' ")
data["oil"],
expected=False,
)
- assert result is not True, "Testcase {} : Failed \n mroutes are"
- " still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n mroutes are still present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected Behavior: {}".format(result))
step(
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) in c2")
input_dict = {
"c2": {
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-5) to FRR1")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join("i1", IGMP_JOIN_RANGE_1, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to 225.1.1.1-5 receivers")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic("i2", IGMP_JOIN_RANGE_1, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
# Stop r2 router to make r2 router disabled from topology
result = verify_ip_mroutes(
tgen, "c1", "*", IGMP_JOIN, "c1-c2-eth1", "c1-l1-eth0", expected=False
)
- assert result is not True, "Testcase {} : Failed \n mroutes are"
- " still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n mroutes are still present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected Behavior: {}".format(result))
write_test_footer(tc_name)
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure RP on FRR2 (loopback interface) for " "the group range 225.0.0.0/8")
input_dict = {
step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ result = app_helper.run_join("i1", IGMP_JOIN, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ result = app_helper.run_traffic("i2", IGMP_JOIN, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Configure one IGMP interface on FRR3 node and send IGMP" " join (225.1.1.1)")
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i8", "i8-f1-eth0", GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i8", IGMP_JOIN, join_interval=1)
+ result = app_helper.run_join("i8", IGMP_JOIN, "f1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
# Verify mroutes are present in FRR3(f1)
result = verify_ip_mroutes(
tgen, "f1", "*", IGMP_JOIN, "f1-r2-eth3", "f1-i8-eth2", expected=False
)
- assert result is not True, "Testcase {} : Failed \n mroutes are"
- " still present \n Error: {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n mroutes are still present \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Expected Behavior: {}".format(result))
step("IGMP groups are present verify using 'show ip igmp group'")
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ result = app_helper.run_join("i1", IGMP_JOIN, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP on R2 (loopback interface) for the" " group range 225.0.0.0/8")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ result = app_helper.run_traffic("i2", IGMP_JOIN, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
"l1": {
"igmp": {
"interfaces": {
- "l1-i1-eth1": {"igmp": {"query": {"query-interval": 100}}}
+ "l1-i1-eth1": {"igmp": {"query": {"query-interval": 20}}}
}
}
}
"l1": {
"igmp": {
"interfaces": {
- "l1-i1-eth1": {"igmp": {"query": {"query-interval": 200}}}
+ "l1-i1-eth1": {"igmp": {"query": {"query-interval": 25}}}
}
}
}
"l1": {
"igmp": {
"interfaces": {
- "l1-i1-eth1": {"igmp": {"query": {"query-interval": 300}}}
+ "l1-i1-eth1": {"igmp": {"query": {"query-interval": 30}}}
}
}
}
"""
tgen = get_topogen()
+ topo = tgen.json_topo
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Enable IGMP on FRR1 interface and send IGMP join (225.1.1.1)")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", IGMP_JOIN, join_interval=1)
+ result = app_helper.run_join("i1", IGMP_JOIN, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- step("Configure IGMP query response time to 10 sec on FRR1")
+ step("Configure IGMP query response time to 10 deci-sec on FRR1")
input_dict_1 = {
"l1": {
"igmp": {
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to 225.1.1.1 receiver")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", IGMP_JOIN, 32, 2500)
+ result = app_helper.run_traffic("i2", IGMP_JOIN, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Delete the PIM and IGMP on FRR1")
- raw_config = {
- "l1": {"raw_config": ["interface l1-i1-eth1", "no ip pim"]}
- }
+ raw_config = {"l1": {"raw_config": ["interface l1-i1-eth1", "no ip pim"]}}
result = apply_raw_config(tgen, raw_config)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
result = create_pim_config(tgen, topo["routers"])
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- step("Configure max query response timer 100sec on FRR1")
+ step("Configure max query response timer 100 decisec on FRR1")
input_dict_3 = {
"l1": {
"igmp": {
"l1-i1-eth1": {
"igmp": {
"version": "2",
- "query": {"query-max-response-time": 110},
+ "query": {"query-max-response-time": 105},
}
}
}
"l1-i1-eth1": {
"igmp": {
"version": "2",
- "query": {"query-max-response-time": 120},
+ "query": {"query-max-response-time": 110},
}
}
}
"l1-i1-eth1": {
"igmp": {
"version": "2",
- "query": {"query-max-response-time": 140},
+ "query": {"query-max-response-time": 115},
}
}
}
"l1-i1-eth1": {
"igmp": {
"version": "2",
- "query": {"query-max-response-time": 150},
+ "query": {"query-max-response-time": 120},
}
}
}
import os
import sys
-import json
import time
-import datetime
-from time import sleep
import pytest
pytestmark = pytest.mark.pimd
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
step,
- iperfSendIGMPJoin,
- addKernelRoute,
reset_config_on_routers,
- iperfSendTraffic,
- kill_iperf,
shutdown_bringup_interface,
kill_router_daemons,
start_router,
verify_upstream_iif,
verify_pim_neighbors,
verify_pim_state,
- verify_ip_pim_join,
clear_ip_mroute,
clear_ip_pim_interface_traffic,
- verify_igmp_config,
+ McastTesterHelper,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.pimd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/multicast_pim_sm_topo2.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+pytestmark = [pytest.mark.pimd]
TOPOLOGY = """
IGMP_JOIN_RANGE_3 = ["227.1.1.1", "227.1.1.2", "227.1.1.3", "227.1.1.4", "227.1.1.5"]
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/multicast_pim_sm_topo2.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
# Creating configuration from JSON
build_config_from_json(tgen, topo)
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
#####################################################
-def config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
-):
- """
- API to do pre-configuration to send IGMP join and multicast
- traffic
-
- parameters:
- -----------
- * `tgen`: topogen object
- * `topo`: input json data
- * `tc_name`: caller test case name
- * `iperf`: router running iperf
- * `iperf_intf`: interface name router running iperf
- * `GROUP_RANGE`: group range
- * `join`: IGMP join, default False
- * `traffic`: multicast traffic, default False
- """
-
- if join:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- if traffic:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- router_list = tgen.routers()
- for router in router_list.keys():
- if router == iperf:
- continue
-
- rnode = router_list[router]
- rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
-
- return True
-
-
def verify_state_incremented(state_before, state_after):
"""
API to compare interface traffic state incrementing
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join(recvr, _IGMP_JOIN_RANGE, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
# Verifying mroutes before PIMd restart, fetching uptime
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join(recvr, _IGMP_JOIN_RANGE, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Verifying mroutes before FRR restart, fetching uptime")
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) and " "(232.1.1.1-5) in c2")
_GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
"(226.1.1.1-5) and (232.1.1.1-5)"
)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join("i1", _IGMP_JOIN_RANGE, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to '226.1.1.1-5'" ", '232.1.1.1-5' receiver")
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
step("registerRx and registerStopTx value before traffic sent")
state_dict = {"c2": {"c2-f1-eth1": ["registerRx", "registerStopTx"]}}
state_before = verify_pim_interface_traffic(tgen, state_dict)
tc_name, result
)
- result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic("i2", _IGMP_JOIN_RANGE, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
step("Stop the traffic to all the receivers")
- kill_iperf(tgen, "i2", "remove_traffic")
+ app_helper.stop_host("i2")
step(
"Null register packet being send periodically from FRR3 to RP, "
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join(recvr, _IGMP_JOIN_RANGE, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
input_src = {"i2": "i2-f1-eth0", "i5": "i5-c2-eth0"}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
intf_l1_c1 = "l1-c1-eth0"
shutdown_bringup_interface(tgen, dut, intf_l1_c1, False)
- done_flag = False
- for retry in range(1, 11):
- result = verify_upstream_iif(
- tgen, "l1", "Unknown", source, IGMP_JOIN_RANGE_2, expected=False
- )
- if result is not True:
- done_flag = True
- else:
- continue
-
- if done_flag:
- logger.info("Expected Behavior: {}".format(result))
- break
-
- assert done_flag is True, (
+ result = verify_upstream_iif(
+ tgen, "l1", "Unknown", source, IGMP_JOIN_RANGE_2, expected=False
+ )
+ assert result is not True, (
"Testcase {} : Failed Error: \n "
"mroutes are still present, after waiting for 10 mins".format(tc_name)
)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Stop the traffic to all the receivers")
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
for data in input_dict:
result = verify_ip_mroutes(
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP on c1 for group range " "(226.1.1.1-5) and (232.1.1.1-5)")
_GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
"Enable IGMP on FRR1 interface and send IGMP join"
" (226.1.1.1-5) and (232.1.1.1-5)"
)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join("i1", _IGMP_JOIN_RANGE, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
"Send multicast traffic from FRR3 to all the receivers "
"(226.1.1.1-5) and (232.1.1.1-5)"
)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic("i2", _IGMP_JOIN_RANGE, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
" join (226.1.1.1-5) and (232.1.1.1-5)"
)
input_dict = {
- "c2": {"igmp": {"interfaces": {"c2-i5-eth2": {"igmp": {"version": "2"}}}}}
+ "c2": {
+ "igmp": {
+ "interfaces": {
+ "c2-i5-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i5", "i5-c2-eth0", _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i5", _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join("i5", _IGMP_JOIN_RANGE, "c2")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("FRR1 has 10 (*.G) and 10 (S,G) verify using 'show ip mroute count'")
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for group range (226.1.1.1-5) and " "(232.1.1.1-5) on c1")
_GROUP_RANGE = GROUP_RANGE_2 + GROUP_RANGE_3
_IGMP_JOIN_RANGE = IGMP_JOIN_RANGE_2 + IGMP_JOIN_RANGE_3
"(226.1.1.1-5) and (232.1.1.1-5)"
)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i1", "i1-l1-eth0", _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i2", "i2-f1-eth0", _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
step("Send IGMP join (226.1.1.1-5, 232.1.1.1-5) to LHR(l1)")
- result = iperfSendIGMPJoin(tgen, "i1", _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join("i1", _IGMP_JOIN_RANGE, "l1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Send multicast traffic from FRR3 to '226.1.1.1-5'" ", '232.1.1.1-5' receiver")
- result = iperfSendTraffic(tgen, "i2", _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic("i2", _IGMP_JOIN_RANGE, "f1")
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
step("Configure one IGMP interface on f1 node and send IGMP" " join (225.1.1.1)")
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, "i8", "i8-f1-eth0", _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, "i8", _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join("i8", _IGMP_JOIN_RANGE, "f1")
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step(
"l1 and f1 has 10 IGMP groups (226.1.1.1-5, 232.1.1.1-5),"
# Stop the multicast traffic
step("Stop the traffic to all the receivers")
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
step(
"After traffic stopped , verify (*,G) entries are not flushed"
{"dut": "f1", "src_address": "*", "iif": "f1-c2-eth0", "oil": "f1-i8-eth2"},
{"dut": "l1", "src_address": "*", "iif": "l1-c1-eth0", "oil": "l1-i1-eth1"},
]
-
- done_flag = False
- for retry in range(1, 11):
- for data in input_dict:
- result = verify_ip_mroutes(
- tgen,
- data["dut"],
- data["src_address"],
- _IGMP_JOIN_RANGE,
- data["iif"],
- data["oil"],
- )
-
- if result is True:
- done_flag = True
- else:
- continue
-
- if done_flag:
- break
-
- assert done_flag is True, (
- "Testcase {} : Failed Error: \n "
- "mroutes are still present, after waiting for 10 mins".format(tc_name)
- )
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ )
+ assert (
+ result is True
+ ), "Testcase {} : Failed Error mroutes were flushed.".format(tc_name)
step(
"After traffic stopped , verify (S,G) entries are flushed out"
{"dut": "f1", "src_address": source, "iif": "i2-f1-eth0", "oil": "f1-r2-eth3"},
]
- done_flag = False
- for retry in range(1, 11):
- for data in input_dict:
- result = verify_ip_mroutes(
- tgen,
- data["dut"],
- data["src_address"],
- _IGMP_JOIN_RANGE,
- data["iif"],
- data["oil"],
- expected=False,
- )
- if result is not True:
- done_flag = True
- else:
- continue
-
- if done_flag:
- logger.info("Expected Behavior: {}".format(result))
- break
-
- assert done_flag is True, (
- "Testcase {} : Failed Error: \n "
- "mroutes are still present, after waiting for 10 mins".format(tc_name)
- )
+ for data in input_dict:
+ result = verify_ip_mroutes(
+ tgen,
+ data["dut"],
+ data["src_address"],
+ _IGMP_JOIN_RANGE,
+ data["iif"],
+ data["oil"],
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: \nmroutes are still present".format(tc_name)
write_test_footer(tc_name)
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
"f1": {
"igmp": {
"interfaces": {
- "f1-i8-eth2": {"igmp": {"version": "2"}},
- "f1-i2-eth1": {"igmp": {"version": "2"}},
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ },
+ "f1-i2-eth1": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ },
+ }
+ }
+ },
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ "l1-i6-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
}
}
},
- "l1": {"igmp": {"interfaces": {"l1-i6-eth2": {"igmp": {"version": "2"}}}}},
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
- )
+ result = app_helper.run_join(recvr, _IGMP_JOIN_RANGE, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
- result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure one source in FRR2 , one in c1")
step(
"Send multicast traffic from both the sources to all the"
input_src = {"i3": "i3-r2-eth0"}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
"After all the IGMP groups received with correct port using"
source = topo["routers"]["i3"]["links"]["r2"]["ipv4"].split("/")[0]
input_dict_all = [
- {"dut": "l1", "src_address": source, "iif": ["l1-r2-eth4", "l1-c1-eth0"],
- "oil": ["l1-i1-eth1", "l1-i6-eth2"]},
+ {
+ "dut": "l1",
+ "src_address": source,
+ "iif": ["l1-r2-eth4", "l1-c1-eth0"],
+ "oil": ["l1-i1-eth1", "l1-i6-eth2"],
+ },
{"dut": "f1", "src_address": source, "iif": "f1-r2-eth3", "oil": "f1-i8-eth2"},
]
for data in input_dict_all:
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure static RP for (226.1.1.1-5) in c1")
step("Configure static RP for (232.1.1.1-5) in c2")
)
input_dict = {
- "f1": {"igmp": {"interfaces": {"f1-i8-eth2": {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ "f1-i8-eth2": {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i1": "i1-l1-eth0", "i8": "i8-f1-eth0"}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join(recvr, _IGMP_JOIN_RANGE, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure 1 source in FRR1 , 1 in FRR3")
input_src = {"i6": "i6-l1-eth0", "i2": "i2-f1-eth0"}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
import os
import re
import sys
-import json
import time
import datetime
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
step,
- iperfSendIGMPJoin,
- addKernelRoute,
reset_config_on_routers,
- iperfSendTraffic,
- kill_iperf,
shutdown_bringup_interface,
- kill_router_daemons,
- start_router,
- start_router_daemons,
- stop_router,
apply_raw_config,
- add_interfaces_to_vlan,
- tcpdump_capture_start,
- tcpdump_capture_stop,
- LOGDIR,
check_router_status,
required_linux_kernel_version,
topo_daemons,
create_igmp_config,
verify_igmp_groups,
verify_ip_mroutes,
- clear_ip_mroute_verify,
clear_ip_mroute,
clear_ip_pim_interface_traffic,
verify_igmp_config,
- verify_pim_neighbors,
verify_pim_config,
verify_pim_interface,
verify_upstream_iif,
verify_multicast_traffic,
verify_pim_rp_info,
- get_refCount_for_mroute,
verify_multicast_flag_state,
+ McastTesterHelper,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/multicast_pim_sm_topo3.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+CWD = os.path.dirname(os.path.realpath(__file__))
+pytestmark = pytest.mark.pimd
TOPOLOGY = """
SAME_VLAN_IP_2 = {"ip": "10.1.1.2", "subnet": "255.255.255.0", "cidr": "24"}
SAME_VLAN_IP_3 = {"ip": "10.1.1.3", "subnet": "255.255.255.0", "cidr": "24"}
SAME_VLAN_IP_4 = {"ip": "10.1.1.4", "subnet": "255.255.255.0", "cidr": "24"}
-TCPDUMP_FILE = "{}/{}".format(LOGDIR, "v2query.txt")
-
-
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
def setup_module(mod):
logger.info("Running setup_module to create topology")
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/multicast_pim_sm_topo3.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
# Creating configuration from JSON
build_config_from_json(tgen, topo)
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
#####################################################
-def config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
-):
- """
- API to do pre-configuration to send IGMP join and multicast
- traffic
-
- parameters:
- -----------
- * `tgen`: topogen object
- * `topo`: input json data
- * `tc_name`: caller test case name
- * `iperf`: router running iperf
- * `iperf_intf`: interface name router running iperf
- * `GROUP_RANGE`: group range
- * `join`: IGMP join, default False
- * `traffic`: multicast traffic, default False
- """
-
- if join:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- if traffic:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- router_list = tgen.routers()
- for router in router_list.keys():
- if router == iperf:
- continue
-
- rnode = router_list[router]
- rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
-
- for router in topo["routers"].keys():
- if "static_routes" in topo["routers"][router]:
- static_routes = topo["routers"][router]["static_routes"]
- for static_route in static_routes:
- network = static_route["network"]
- next_hop = static_route["next_hop"]
- if type(network) is not list:
- network = [network]
- for net in network:
- addKernelRoute(tgen, router, iperf_intf, net, next_hop)
- return True
-
-
def verify_mroute_repopulated(uptime_before, uptime_after):
"""
API to compare uptime for mroutes
* `state_after` : State dictionary for any particular instance
"""
- for router, state_data in state_before.items():
- for state, value in state_data.items():
- if state_before[router][state] >= state_after[router][state]:
- errormsg = (
- "[DUT: %s]: state %s value has not"
- " incremented, Initial value: %s, "
- "Current value: %s [FAILED!!]"
- % (
- router,
+ for ttype, v1 in state_before.items():
+ for intf, v2 in v1.items():
+ for state, value in v2.items():
+ if value >= state_after[ttype][intf][state]:
+ errormsg = "[DUT: %s]: state %s value has not incremented, Initial value: %s, Current value: %s [FAILED!!]" % (
+ intf,
state,
- state_before[router][state],
- state_after[router][state],
+ value,
+ state_after[ttype][intf][state],
)
+ return errormsg
+
+ logger.info(
+ "[DUT: %s]: State %s value is incremented, Initial value: %s, Current value: %s [PASSED!!]",
+ intf,
+ state,
+ value,
+ state_after[ttype][intf][state],
)
- return errormsg
-
- logger.info(
- "[DUT: %s]: State %s value is "
- "incremented, Initial value: %s, Current value: %s"
- " [PASSED!!]",
- router,
- state,
- state_before[router][state],
- state_after[router][state],
- )
return True
"""
- filepath = os.path.join(LOGDIR, tgen.modname, router, cap_file)
+ filepath = os.path.join(tgen.logdir, router, cap_file)
with open(filepath) as f:
if len(re.findall("{}".format(message), f.read())) < count:
errormsg = "[DUT: %s]: Verify Message: %s in tcpdump" " [FAILED!!]" % (
"""
- filepath = os.path.join(LOGDIR, tgen.modname, router, cap_file)
+ filepath = os.path.join(tgen.logdir, router, cap_file)
with open(filepath) as f:
if len(re.findall(message, f.read())) < 1:
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Enable the PIM on all the interfaces of FRR1, FRR2, FRR3")
step(
"Enable IGMP of FRR1 interface and send IGMP joins "
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (226.1.1.1-5) in R2")
input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Removing FRR3 to simulate topo " "FHR(FRR1)---LHR(FRR2)")
intf_l1_c1 = topo["routers"]["l1"]["links"]["c1"]["interface"]
intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
input_dict = {
- "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2"}}}}}
+ "r2": {
+ "igmp": {
+ "interfaces": {
+ intf_r2_i3: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (226.1.1.1-5) in R2")
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP on R2 (loopback interface) for " "the group range 225.0.0.0/8")
input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
" 'show ip pim upstream' 'show ip mroute' "
)
- done_flag = False
- for retry in range(1, 11):
- result = verify_upstream_iif(
- tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
- )
- if result is not True:
- done_flag = True
- else:
- continue
- if done_flag:
- logger.info("Expected Behavior: {}".format(result))
- break
-
- assert done_flag is True, (
- "Testcase {} : Failed Error: \n "
- "mroutes are still present, after waiting for 10 mins".format(tc_name)
+ result = verify_upstream_iif(
+ tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: \n mroutes are still present".format(tc_name)
step("No shut the Source interface just after the upstream is expired" " from FRR1")
shutdown_bringup_interface(tgen, "f1", intf_f1_i2, True)
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Enable the PIM on all the interfaces of FRR1, R2 and FRR3" " routers")
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP on R2 (loopback interface) for " "the group range 225.0.0.0/8")
input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
" 'show ip pim upstream' 'show ip mroute' "
)
- done_flag = False
- for retry in range(1, 11):
- result = verify_upstream_iif(
- tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
- )
- if result is not True:
- done_flag = True
- else:
- continue
- if done_flag:
- logger.info("Expected Behavior: {}".format(result))
- break
-
- assert done_flag is True, (
- "Testcase {} : Failed Error: \n "
- "mroutes are still present, after waiting for 10 mins".format(tc_name)
+ result = verify_upstream_iif(
+ tgen, "l1", "Unknown", source_i2, IGMP_JOIN_RANGE_1, expected=False
)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed Error: \nmroutes are still present".format(tc_name)
step("No shut the Source interface just after the upstream is expired" " from FRR1")
shutdown_bringup_interface(tgen, "l1", intf_l1_i1, True)
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Enable PIM on all routers")
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Configure source on FRR1 and start the traffic for" " (225.1.1.1-225.1.1.10)")
input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
)
input_dict_2 = {
- "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}}
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict_2)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
)
input_dict_2 = {
- "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2"}}}}}
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict_2)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Enable PIM on all routers")
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Configure source on FRR1 and start the traffic for" " (225.1.1.1-225.1.1.10)")
input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i6 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
intf_l1_i1 = topo["routers"]["l1"]["links"]["i1"]["interface"]
input_dict_1 = {
- "l1": {"igmp": {"interfaces": {intf_l1_i1: {"igmp": {"version": "2", "query": {"query-max-response-time": 40, "query-interval": 5}}}}}}
+ "l1": {
+ "igmp": {
+ "interfaces": {
+ intf_l1_i1: {
+ "igmp": {
+ "version": "2",
+ "query": {
+ "query-max-response-time": 40,
+ "query-interval": 5,
+ },
+ }
+ }
+ }
+ }
+ }
}
result = verify_igmp_config(tgen, input_dict_1)
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure 'ip pim' on receiver interface on FRR1")
step("Enable PIM on all routers")
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure 'ip pim' on receiver interface on FRR1")
step("Enable PIM on all routers")
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
input_src = {"i2": topo["routers"]["i2"]["links"]["f1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i2 = topo["routers"]["i2"]["links"]["f1"]["ipv4"].split("/")[0]
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step("Configure 'ip pim' on receiver interface on FRR1")
step("Enable PIM on all routers")
step("Enable IGMP on FRR1 interface and send IGMP join " "(225.1.1.1-225.1.1.10)")
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in cisco-1(f1)")
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
step(
"Remove cisco connected link to simulate topo "
"LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))"
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (225.1.1.1-5) as R2")
input_src = {"i6": topo["routers"]["i6"]["links"]["l1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step("Send prune from receiver-1 (using ctrl+c) on iperf interface")
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_traffic = {"f1": {"traffic_sent": [intf_f1_i8]}}
step("Send IGMP joins again from LHR,check IGMP joins and starg received")
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
for data in input_dict_starg:
step("Send traffic from FHR and verify mroute upstream")
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
step(
"Remove cisco connected link to simulate topo "
"LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))"
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (225.1.1.1-5) as R2")
}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
intf_r2_l1 = topo["routers"]["r2"]["links"]["l1"]["interface"]
shutdown_bringup_interface(tgen, "r2", intf_r2_l1, False)
- kill_iperf(tgen, dut="i2", action="remove_traffic")
+ app_helper.stop_host("i2")
step("Verify RP info after Shut the link from FHR to RP from RP node")
dut = "l1"
step("Verify PIM Nbrs after Shut the link from FHR to RP from FHR node")
- kill_iperf(tgen, dut="i6", action="remove_traffic")
+ app_helper.stop_host("i6")
step("Verify RP info after Shut the link from FHR to RP from FHR node")
dut = "l1"
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
step(
"Remove cisco connected link to simulate topo "
"LHR(FRR1(f1))----RP(cisco(f1)---FHR(FRR3(l1))"
intf_f1_i8 = topo["routers"]["f1"]["links"]["i8"]["interface"]
input_dict = {
- "f1": {"igmp": {"interfaces": {intf_f1_i8: {"igmp": {"version": "2"}}}}}
+ "f1": {
+ "igmp": {
+ "interfaces": {
+ intf_f1_i8: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i8": topo["routers"]["i8"]["links"]["f1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (225.1.1.1-5) as R2")
}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i2 = topo["routers"]["i6"]["links"]["l1"]["ipv4"].split("/")[0]
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step(
"Remove FRR3 to cisco connected link to simulate topo "
"FHR(FRR3(l1))---LHR(FRR1(r2)----RP(FRR2(f1))"
intf_r2_i3 = topo["routers"]["r2"]["links"]["i3"]["interface"]
input_dict = {
- "r2": {"igmp": {"interfaces": {intf_r2_i3: {"igmp": {"version": "2"}}}}}
+ "r2": {
+ "igmp": {
+ "interfaces": {
+ intf_r2_i3: {
+ "igmp": {"version": "2", "query": {"query-interval": 15}}
+ }
+ }
+ }
+ }
}
result = create_igmp_config(tgen, topo, input_dict)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
input_join = {"i3": topo["routers"]["i3"]["links"]["r2"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join(recvr, _IGMP_JOIN_RANGE, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP for (226.1.1.1-5) and (232.1.1.1-5) in (f1)")
input_src = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
check_router_status(tgen)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
step(
"Remove FRR3 to FRR2 connected link to simulate topo "
"FHR(FRR3)---LHR(FRR1)----RP(FFR2)"
input_join = {"i1": topo["routers"]["i1"]["links"]["l1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, _GROUP_RANGE, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, _IGMP_JOIN_RANGE, join_interval=1)
+ result = app_helper.run_join(recvr, _IGMP_JOIN_RANGE, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure RP for (225.1.1.1-5) in (f1)")
input_src = {"i3": topo["routers"]["i3"]["links"]["r2"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, _GROUP_RANGE, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, _IGMP_JOIN_RANGE, 32, 2500)
+ result = app_helper.run_traffic(src, _IGMP_JOIN_RANGE, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
"""
import os
-import re
import sys
-import json
import time
-import datetime
-from time import sleep
import pytest
pytestmark = pytest.mark.pimd
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
step,
- iperfSendIGMPJoin,
- addKernelRoute,
reset_config_on_routers,
- iperfSendTraffic,
- kill_iperf,
shutdown_bringup_interface,
- start_router,
- stop_router,
apply_raw_config,
create_static_routes,
required_linux_kernel_version,
from lib.pim import (
create_pim_config,
create_igmp_config,
- verify_igmp_groups,
verify_ip_mroutes,
clear_ip_pim_interface_traffic,
- verify_igmp_config,
- verify_pim_neighbors,
- verify_pim_config,
- verify_pim_interface,
verify_upstream_iif,
clear_ip_mroute,
- verify_multicast_traffic,
verify_pim_rp_info,
verify_pim_interface_traffic,
- verify_igmp_interface,
+ McastTesterHelper,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/multicast_pim_sm_topo4.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+from lib.topojson import build_config_from_json
TOPOLOGY = """
NEW_ADDRESS_2_SUBNET = "192.168.20.2/24"
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
logger.info("Running setup_module to create topology")
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/multicast_pim_sm_topo4.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
# Creating configuration from JSON
build_config_from_json(tgen, topo)
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
#####################################################
-def config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, iperf, iperf_intf, GROUP_RANGE, join=False, traffic=False
-):
- """
- API to do pre-configuration to send IGMP join and multicast
- traffic
-
- parameters:
- -----------
- * `tgen`: topogen object
- * `topo`: input json data
- * `tc_name`: caller test case name
- * `iperf`: router running iperf
- * `iperf_intf`: interface name router running iperf
- * `GROUP_RANGE`: group range
- * `join`: IGMP join, default False
- * `traffic`: multicast traffic, default False
- """
-
- if join:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- if traffic:
- # Add route to kernal
- result = addKernelRoute(tgen, iperf, iperf_intf, GROUP_RANGE)
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- router_list = tgen.routers()
- for router in router_list.keys():
- if router == iperf:
- continue
-
- rnode = router_list[router]
- rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
-
- for router in topo["routers"].keys():
- if "static_routes" in topo["routers"][router]:
- static_routes = topo["routers"][router]["static_routes"]
- for static_route in static_routes:
- network = static_route["network"]
- next_hop = static_route["next_hop"]
- if type(network) is not list:
- network = [network]
-
- return True
-
-
def verify_state_incremented(state_before, state_after):
"""
API to compare interface traffic state incrementing
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
step(
"Remove c1-c2 connected link to simulate topo "
"c1(FHR)---l1(RP)----r2---f1-----c2(LHR)"
input_join = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (225.1.1.1-5) as R2")
input_src = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i4 = topo["routers"]["i4"]["links"]["c1"]["ipv4"].split("/")[0]
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
step(
"Remove c1-c2 connected link to simulate topo "
"c1(LHR)---l1(RP)----r2---f1-----c2(FHR)"
input_join = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (225.1.1.1-5) as R2")
input_src = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0]
tc_name = request.node.name
write_test_header(tc_name)
+ # Don"t run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
# Creating configuration from JSON
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
reset_config_on_routers(tgen)
clear_ip_pim_interface_traffic(tgen, topo)
- # Don"t run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
step(
"Remove c1-c2 connected link to simulate topo "
"c1(LHR)---l1(RP)----r2---f1-----c2(FHR)"
input_join = {"i4": topo["routers"]["i4"]["links"]["c1"]["interface"]}
for recvr, recvr_intf in input_join.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, recvr, recvr_intf, GROUP_RANGE_1, join=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendIGMPJoin(tgen, recvr, IGMP_JOIN_RANGE_1, join_interval=1)
+ result = app_helper.run_join(recvr, IGMP_JOIN_RANGE_1, join_intf=recvr_intf)
assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
step("Configure static RP for (225.1.1.1-5) as R2")
input_src = {"i5": topo["routers"]["i5"]["links"]["c2"]["interface"]}
for src, src_intf in input_src.items():
- result = config_to_send_igmp_join_and_traffic(
- tgen, topo, tc_name, src, src_intf, GROUP_RANGE_1, traffic=True
- )
- assert result is True, "Testcase {}: Failed Error: {}".format(tc_name, result)
-
- result = iperfSendTraffic(tgen, src, IGMP_JOIN_RANGE_1, 32, 2500)
+ result = app_helper.run_traffic(src, IGMP_JOIN_RANGE_1, bind_intf=src_intf)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
source_i5 = topo["routers"]["i5"]["links"]["c2"]["ipv4"].split("/")[0]
c1_state_before = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
c1_state_before, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step("Flap PIM nbr while doing interface c1-l1 interface shut from f1 side")
shutdown_bringup_interface(tgen, "c1", intf_c1_l1, False)
c1_state_after = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
c1_state_after, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step("verify stats not increamented on c1")
result = verify_state_incremented(c1_state_before, c1_state_after)
l1_state_before = verify_pim_interface_traffic(tgen, l1_state_dict)
assert isinstance(
l1_state_before, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step("Flap PIM nbr while doing interface r2-c1 shut from r2 side")
shutdown_bringup_interface(tgen, "l1", intf_l1_c1, False)
l1_state_after = verify_pim_interface_traffic(tgen, l1_state_dict)
assert isinstance(
l1_state_after, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step("verify stats not increamented on l1")
result = verify_state_incremented(l1_state_before, l1_state_after)
c1_state_before = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
c1_state_before, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step("Flap c1-r2 pim nbr while changing ip address from c1 side")
c1_l1_ip_subnet = topo["routers"]["c1"]["links"]["l1"]["ipv4"]
c1_state_after = verify_pim_interface_traffic(tgen, state_dict)
assert isinstance(
c1_state_after, dict
- ), "Testcase{} : Failed \n state_before is not dictionary \n "
- "Error: {}".format(tc_name, result)
+ ), "Testcase{} : Failed \n state_before is not dictionary \n Error: {}".format(
+ tc_name, result
+ )
step("verify stats not increamented on c1")
result = verify_state_incremented(c1_state_before, c1_state_after)
import os
import sys
-import json
import time
from time import sleep
import datetime
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.topolog import logger
write_test_footer,
reset_config_on_routers,
step,
- iperfSendIGMPJoin,
- iperfSendTraffic,
- addKernelRoute,
shutdown_bringup_interface,
kill_router_daemons,
start_router_daemons,
create_static_routes,
- kill_iperf,
topo_daemons,
)
from lib.pim import (
clear_ip_pim_interfaces,
clear_ip_mroute,
clear_ip_mroute_verify,
+ McastTesterHelper,
)
pytestmark = [pytest.mark.pimd, pytest.mark.staticd]
-# Reading the data from JSON File for topology and configuration creation
-jsonFile = "{}/multicast_pim_static_rp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- TOPO = json.load(topoJson)
-except IOError:
- logger.info("Could not read file: %s", jsonFile)
-
# Global variables
GROUP_RANGE_ALL = "224.0.0.0/4"
GROUP_RANGE = "225.1.1.1/32"
SOURCE = "Static"
-class CreateTopo(Topo):
- """
- Test BasicTopo - topology 1
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function"""
- tgen = get_topogen(self)
+def build_topo(tgen):
+ """Build function"""
- # Building topology from json file
- build_topo_from_json(tgen, TOPO)
-
- def dumdum(self):
- """ Dummy """
- print("%s", self.name)
+ # Building topology from json file
+ build_topo_from_json(tgen, TOPO)
def setup_module(mod):
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/multicast_pim_static_rp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global TOPO
+ TOPO = tgen.json_topo
# ... and here it calls Mininet initialization functions.
result = verify_pim_neighbors(tgen, TOPO)
assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
+ # XXX Replace this using "with McastTesterHelper()... " in each test if possible.
+ global app_helper
+ app_helper = McastTesterHelper(tgen)
+
logger.info("Running setup_module() done")
tgen = get_topogen()
+ app_helper.cleanup()
+
# Stop toplogy and Remove tmp files
tgen.stop_topology()
#####################################################
-def config_to_send_igmp_join_and_traffic(tgen, tc_name):
- """
- API to do pre-configuration to send IGMP join and multicast
- traffic
-
- parameters:
- -----------
- * `tgen`: topogen object
- * `tc_name`: caller test case name
- """
-
- step("r0: Add route to kernal")
- result = addKernelRoute(tgen, "r0", "r0-r1-eth0", GROUP_RANGE_ALL)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- step("r5: Add route to kernal")
- result = addKernelRoute(tgen, "r5", "r5-r3-eth0", GROUP_RANGE_ALL)
- assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
-
- rnode = tgen.routers()["r1"]
- rnode.run("ip route add 10.0.6.0/24 via 10.0.2.2")
- rnode = tgen.routers()["r2"]
- rnode.run("ip route add 10.0.6.0/24 via 10.0.4.2")
- rnode = tgen.routers()["r4"]
- rnode.run("ip route add 10.0.6.0/24 via 10.0.5.1")
-
- router_list = tgen.routers()
- for router in router_list.keys():
- rnode = router_list[router]
- rnode.run("echo 2 > /proc/sys/net/ipv4/conf/all/rp_filter")
-
- return True
-
-
def verify_mroute_repopulated(uptime_before, uptime_after):
"""
API to compare uptime for mroutes
pytest.skip(tgen.errors)
step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1")
step("Configure r2 loopback interface as RP")
)
step("r0 : Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
)
step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is up and join timer is running \n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
dut = "r1"
intf = "r1-r3-eth2"
shutdown_bringup_interface(tgen, dut, intf, False)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
dut = "r1"
intf = "r1-r3-eth2"
shutdown_bringup_interface(tgen, dut, intf, False)
assert isinstance(
state_before, dict
), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
- tc_name, result
+ tc_name, state_before
)
step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1")
step("Enable PIM between r1 and r2")
step("r0 : Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 : Verify rp info")
"r1: join state should not be joined and join timer should stop,"
"verify using show ip pim upstream"
)
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: join state is joined and timer is not stopped \n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on R1 interface")
step("Configure r2 loopback interface as RP")
step("Enable PIM between r1 and r2")
)
step("r0 : Send IGMP join (225.1.1.1) to r1, when rp is not configured" "in r1")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: IGMP group is received on R1 verify using show ip igmp groups")
step("r1: Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is joined and timer is running \n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface and send IGMP " "join (225.1.1.1) to r1")
step("Configure r2 loopback interface as RP")
step("Enable PIM between r1 and r2")
assert isinstance(
state_before, dict
), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
- tc_name, result
+ tc_name, state_before
)
step("r1: Make RP un-reachable")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 : Send IGMP join for 225.1.1.1")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 : Verify IGMP groups")
)
step("r1 : Verify upstream join state and join timer")
- result = verify_join_state_and_timer(tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False)
+ result = verify_join_state_and_timer(
+ tgen, dut, iif, STAR, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r1: upstream join state is joined and timer is running\n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range " "224.0.0.0/4")
step("Configure RP on r4 (loopback interface) for the group range " "225.1.1.1/32")
shutdown_bringup_interface(tgen, dut, intf, False)
dut = "r1"
- intf = "r1-r3-eth1"
+ intf = "r1-r3-eth2"
shutdown_bringup_interface(tgen, dut, intf, False)
step("r1 : Verify joinTx count before sending join")
assert isinstance(
state_before, dict
), "Testcase{} : Failed \n state_before is not dictionary \n " "Error: {}".format(
- tc_name, result
+ tc_name, state_before
)
step("r0 : Send IGMP join for 225.1.1.1")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1 : Verify IGMP groups")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r1 (loopback interface) for the group range" " 224.0.0.0/4")
step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 225.1.1.0/24")
step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Verify IGMP groups")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to R1")
step("Configure RP on r3 (loopback interface) for the group range" " 224.0.0.0/4")
step("Enable the PIM on all the interfaces of r1, r2, r3 and r4 routers")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", GROUP_ADDRESS, 32, 2500)
+ result = app_helper.run_traffic("r5", GROUP_ADDRESS, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("r3: Verify (S, G) upstream join state and join timer")
result = verify_join_state_and_timer(
- tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False)
+ tgen, dut, iif, SOURCE_ADDRESS, GROUP_ADDRESS, expected=False
+ )
assert result is not True, (
"Testcase {} : Failed \n "
"r3: (S,G) upstream state is joined and join timer is running\n Error: {}".format(
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface and send IGMP join (225.1.1.1) to r1")
step("Configure RP on r2 (loopback interface) for the group range" "225.1.1.0/24")
step("Enable the PIM on all the interfaces of r1-r2-r3")
group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
step("r0: Send IGMP join for 10 groups")
- result = iperfSendIGMPJoin(tgen, "r0", group_address_list, join_interval=1)
+ result = app_helper.run_join("r0", group_address_list, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", group_address_list, 32, 2500)
+ result = app_helper.run_traffic("r5", group_address_list, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Delete existing RP configuration")
input_dict = {
"r2": {
group_address_list = GROUP_ADDRESS_LIST_1 + GROUP_ADDRESS_LIST_2
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", group_address_list, join_interval=1)
+ result = app_helper.run_join("r0", group_address_list, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r5: Send multicast traffic for group 225.1.1.1")
- result = iperfSendTraffic(tgen, "r5", group_address_list, 32, 2500)
+ result = app_helper.run_traffic("r5", group_address_list, "r3")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify (*, G) upstream IIF interface")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
# Steps to execute
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (loopback interface) for the group range" " 224.0.0.0/4")
step("r1: Delete the RP config")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
step("Creating configuration from JSON")
reset_config_on_routers(tgen)
- kill_iperf(tgen)
+ app_helper.stop_all_hosts()
clear_ip_mroute(tgen)
clear_ip_pim_interface_traffic(tgen, TOPO)
- step("pre-configuration to send IGMP join and multicast traffic")
- result = config_to_send_igmp_join_and_traffic(tgen, tc_name)
- assert result is True, "Testcase{}: Failed Error: {}".format(tc_name, result)
-
step("Enable IGMP on r1 interface")
step("Configure RP on r2 (lo) for the group range" " 224.0.0.0/4")
step("r2: Delete the RP configuration")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r0: Send IGMP join")
- result = iperfSendIGMPJoin(tgen, "r0", GROUP_ADDRESS, join_interval=1)
+ result = app_helper.run_join("r0", GROUP_ADDRESS, "r1")
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("r1: Verify IGMP groups")
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.nhrpd]
-class NHRPTopo(Topo):
- "Test topology builder"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- # Create 3 routers.
- for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ # Create 3 routers.
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r3'])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r2'])
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
def _populate_iface():
tgen = get_topogen()
- cmds_tot_hub = ['ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.2.1.{1} remote 0.0.0.0',
- 'ip link set dev {0}-gre0 up',
- 'echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6']
-
- cmds_tot = ['ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.1.1.{1} remote 0.0.0.0',
- 'ip link set dev {0}-gre0 up',
- 'echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6',
- 'echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6']
+ cmds_tot_hub = [
+ "ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.2.1.{1} remote 0.0.0.0",
+ "ip link set dev {0}-gre0 up",
+ "echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6",
+ ]
+
+ cmds_tot = [
+ "ip tunnel add {0}-gre0 mode gre ttl 64 key 42 dev {0}-eth0 local 10.1.1.{1} remote 0.0.0.0",
+ "ip link set dev {0}-gre0 up",
+ "echo 0 > /proc/sys/net/ipv4/ip_forward_use_pmtu",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-eth0/disable_ipv6",
+ "echo 1 > /proc/sys/net/ipv6/conf/{0}-gre0/disable_ipv6",
+ ]
for cmd in cmds_tot_hub:
- input = cmd.format('r2', '2')
- logger.info('input: '+cmd)
- output = tgen.net['r2'].cmd(cmd.format('r2', '2'))
- logger.info('output: '+output);
+ input = cmd.format("r2", "2")
+ logger.info("input: " + cmd)
+ output = tgen.net["r2"].cmd(cmd.format("r2", "2"))
+ logger.info("output: " + output)
for cmd in cmds_tot:
- input = cmd.format('r1', '1')
- logger.info('input: '+cmd)
- output = tgen.net['r1'].cmd(cmd.format('r1', '1'))
- logger.info('output: '+output);
+ input = cmd.format("r1", "1")
+ logger.info("input: " + cmd)
+ output = tgen.net["r1"].cmd(cmd.format("r1", "1"))
+ logger.info("output: " + output)
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(NHRPTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
_populate_iface()
-
- for rname, router in router_list.iteritems():
+
+ for rname, router in router_list.items():
router.load_config(
TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname)),
+ os.path.join(CWD, "{}/zebra.conf".format(rname)),
)
- if rname in ('r1', 'r2'):
+ if rname in ("r1", "r2"):
router.load_config(
- TopoRouter.RD_NHRP,
- os.path.join(CWD, '{}/nhrpd.conf'.format(rname))
+ TopoRouter.RD_NHRP, os.path.join(CWD, "{}/nhrpd.conf".format(rname))
)
# Initialize all routers.
- logger.info('Launching NHRP')
+ logger.info("Launching NHRP")
for name in router_list:
router = tgen.gears[name]
router.start()
logger.info("Checking NHRP cache and IPv4 routes for convergence")
router_list = tgen.routers()
- for rname, router in router_list.iteritems():
- if rname == 'r3':
+ for rname, router in router_list.items():
+ if rname == "r3":
continue
- json_file = '{}/{}/nhrp4_cache.json'.format(CWD, router.name)
+ json_file = "{}/{}/nhrp4_cache.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip nhrp cache json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=40,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip nhrp cache json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
- output = router.vtysh_cmd('show ip nhrp cache')
+ output = router.vtysh_cmd("show ip nhrp cache")
logger.info(output)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
- for rname, router in router_list.iteritems():
- if rname == 'r3':
+ for rname, router in router_list.items():
+ if rname == "r3":
continue
- json_file = '{}/{}/nhrp_route4.json'.format(CWD, router.name)
+ json_file = "{}/{}/nhrp_route4.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip route nhrp json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=40,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route nhrp json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
- output = router.vtysh_cmd('show ip route nhrp')
+ output = router.vtysh_cmd("show ip route nhrp")
logger.info(output)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
- for rname, router in router_list.iteritems():
- if rname == 'r3':
+ for rname, router in router_list.items():
+ if rname == "r3":
continue
- logger.info('Dump neighbor information on {}-gre0'.format(rname))
- output = router.run('ip neigh show')
+ logger.info("Dump neighbor information on {}-gre0".format(rname))
+ output = router.run("ip neigh show")
logger.info(output)
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- pingrouter = tgen.gears['r1']
- logger.info('Check Ping IPv4 from R1 to R2 = 10.255.255.2)')
- output = pingrouter.run('ping 10.255.255.2 -f -c 1000')
+ pingrouter = tgen.gears["r1"]
+ logger.info("Check Ping IPv4 from R1 to R2 = 10.255.255.2)")
+ output = pingrouter.run("ping 10.255.255.2 -f -c 1000")
logger.info(output)
- if '1000 packets transmitted, 1000 received' not in output:
- assertmsg = 'expected ping IPv4 from R1 to R2 should be ok'
+ if "1000 packets transmitted, 1000 received" not in output:
+ assertmsg = "expected ping IPv4 from R1 to R2 should be ok"
assert 0, assertmsg
else:
- logger.info('Check Ping IPv4 from R1 to R2 OK')
+ logger.info("Check Ping IPv4 from R1 to R2 OK")
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import re
import sys
import pytest
-from time import sleep
from functools import partial
-from mininet.topo import Topo
# Save the Current Working Directory to find configuration files later.
CWD = os.path.dirname(os.path.realpath(__file__))
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-import platform
-
-pytestmark = [pytest.mark.ospfd]
-
-
-#####################################################
-##
-## Network Topology Definition
-##
-#####################################################
-class NetworkTopo(Topo):
- "OSPFv3 (IPv6) Test Topology 1"
-
- def build(self, **_opts):
- "Build function"
+pytestmark = [pytest.mark.ospfd]
- tgen = get_topogen(self)
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+def build_topo(tgen):
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- #
- # Wire up the switches and routers
- # Note that we specify the link names so we match the config files
- #
+ #
+ # Wire up the switches and routers
+ # Note that we specify the link names so we match the config files
+ #
- # Create a empty network for router 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"], nodeif="r1-stubnet")
+ # Create a empty network for router 1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"], nodeif="r1-stubnet")
- # Create a empty network for router 2
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"], nodeif="r2-stubnet")
+ # Create a empty network for router 2
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"], nodeif="r2-stubnet")
- # Create a empty network for router 3
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r3"], nodeif="r3-stubnet")
+ # Create a empty network for router 3
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-stubnet")
- # Create a empty network for router 4
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet")
+ # Create a empty network for router 4
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet")
- # Interconnect routers 1, 2, and 3
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"], nodeif="r1-sw5")
- switch.add_link(tgen.gears["r2"], nodeif="r2-sw5")
- switch.add_link(tgen.gears["r3"], nodeif="r3-sw5")
+ # Interconnect routers 1, 2, and 3
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"], nodeif="r1-sw5")
+ switch.add_link(tgen.gears["r2"], nodeif="r2-sw5")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-sw5")
- # Interconnect routers 3 and 4
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r3"], nodeif="r3-sw6")
- switch.add_link(tgen.gears["r4"], nodeif="r4-sw6")
+ # Interconnect routers 3 and 4
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-sw6")
+ switch.add_link(tgen.gears["r4"], nodeif="r4-sw6")
#####################################################
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(NetworkTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
logger.info("** %s: Setup Topology" % mod.__name__)
import re
import sys
import pytest
-import platform
-from time import sleep
from functools import partial
-from mininet.topo import Topo
# Save the Current Working Directory to find configuration files later.
CWD = os.path.dirname(os.path.realpath(__file__))
from lib.topotest import iproute2_is_vrf_capable
from lib.common_config import required_linux_kernel_version
-pytestmark = [pytest.mark.ospfd]
-
-#####################################################
-##
-## Network Topology Definition
-##
-#####################################################
-
-
-class NetworkTopo(Topo):
- "OSPFv3 (IPv6) Test Topology 1"
+pytestmark = [pytest.mark.ospfd]
- def build(self, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- #
- # Wire up the switches and routers
- # Note that we specify the link names so we match the config files
- #
+ #
+ # Wire up the switches and routers
+ # Note that we specify the link names so we match the config files
+ #
- # Create a empty network for router 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"], nodeif="r1-stubnet")
+ # Create a empty network for router 1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"], nodeif="r1-stubnet")
- # Create a empty network for router 2
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"], nodeif="r2-stubnet")
+ # Create a empty network for router 2
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"], nodeif="r2-stubnet")
- # Create a empty network for router 3
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r3"], nodeif="r3-stubnet")
+ # Create a empty network for router 3
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-stubnet")
- # Create a empty network for router 4
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet")
+ # Create a empty network for router 4
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet")
- # Interconnect routers 1, 2, and 3
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r1"], nodeif="r1-sw5")
- switch.add_link(tgen.gears["r2"], nodeif="r2-sw5")
- switch.add_link(tgen.gears["r3"], nodeif="r3-sw5")
+ # Interconnect routers 1, 2, and 3
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"], nodeif="r1-sw5")
+ switch.add_link(tgen.gears["r2"], nodeif="r2-sw5")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-sw5")
- # Interconnect routers 3 and 4
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r3"], nodeif="r3-sw6")
- switch.add_link(tgen.gears["r4"], nodeif="r4-sw6")
+ # Interconnect routers 3 and 4
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-sw6")
+ switch.add_link(tgen.gears["r4"], nodeif="r4-sw6")
#####################################################
if result is not True:
pytest.skip("Kernel requirements are not met")
- tgen = Topogen(NetworkTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
logger.info("** %s: Setup Topology" % mod.__name__)
# For debugging, uncomment the next line
# tgen.mininet_cli()
# Verify OSPFv3 Routing Table
- for router, rnode in tgen.routers().iteritems():
+ for router, rnode in tgen.routers().items():
logger.info('Waiting for router "%s" convergence', router)
# Load expected results from the command
r1.vtysh_cmd("clear ipv6 ospf interface r1-sw5")
# Verify OSPFv3 Routing Table
- for router, rnode in tgen.routers().iteritems():
+ for router, rnode in tgen.routers().items():
logger.info('Waiting for router "%s" convergence', router)
# Load expected results from the command
+debug ospf6 lsa router
+debug ospf6 lsa router originate
+debug ospf6 lsa router examine
+debug ospf6 lsa router flooding
+debug ospf6 lsa as-external
+debug ospf6 lsa as-external originate
+debug ospf6 lsa as-external examine
+debug ospf6 lsa as-external flooding
+debug ospf6 lsa intra-prefix
+debug ospf6 lsa intra-prefix originate
+debug ospf6 lsa intra-prefix examine
+debug ospf6 lsa intra-prefix flooding
+debug ospf6 border-routers
+debug ospf6 zebra
+debug ospf6 interface
+debug ospf6 neighbor
+debug ospf6 flooding
+debug ospf6 gr helper
+debug ospf6 spf process
+debug ospf6 route intra-area
+debug ospf6 route inter-area
+debug ospf6 abr
+debug ospf6 asbr
+debug ospf6 nssa
+!
interface r1-eth0
ipv6 ospf6 hello-interval 2
ipv6 ospf6 dead-interval 10
+debug ospf6 lsa router
+debug ospf6 lsa router originate
+debug ospf6 lsa router examine
+debug ospf6 lsa router flooding
+debug ospf6 lsa as-external
+debug ospf6 lsa as-external originate
+debug ospf6 lsa as-external examine
+debug ospf6 lsa as-external flooding
+debug ospf6 lsa intra-prefix
+debug ospf6 lsa intra-prefix originate
+debug ospf6 lsa intra-prefix examine
+debug ospf6 lsa intra-prefix flooding
+debug ospf6 border-routers
+debug ospf6 zebra
+debug ospf6 interface
+debug ospf6 neighbor
+debug ospf6 flooding
+debug ospf6 gr helper
+debug ospf6 spf process
+debug ospf6 route intra-area
+debug ospf6 route inter-area
+debug ospf6 abr
+debug ospf6 asbr
+debug ospf6 nssa
+!
interface r2-eth0
ipv6 ospf6 hello-interval 2
ipv6 ospf6 dead-interval 10
+debug ospf6 lsa router
+debug ospf6 lsa router originate
+debug ospf6 lsa router examine
+debug ospf6 lsa router flooding
+debug ospf6 lsa as-external
+debug ospf6 lsa as-external originate
+debug ospf6 lsa as-external examine
+debug ospf6 lsa as-external flooding
+debug ospf6 lsa intra-prefix
+debug ospf6 lsa intra-prefix originate
+debug ospf6 lsa intra-prefix examine
+debug ospf6 lsa intra-prefix flooding
+debug ospf6 border-routers
+debug ospf6 zebra
+debug ospf6 interface
+debug ospf6 neighbor
+debug ospf6 flooding
+debug ospf6 gr helper
+debug ospf6 spf process
+debug ospf6 route intra-area
+debug ospf6 route inter-area
+debug ospf6 abr
+debug ospf6 asbr
+debug ospf6 nssa
+!
interface r3-eth0
ipv6 ospf6 hello-interval 2
ipv6 ospf6 dead-interval 10
+debug ospf6 lsa router
+debug ospf6 lsa router originate
+debug ospf6 lsa router examine
+debug ospf6 lsa router flooding
+debug ospf6 lsa as-external
+debug ospf6 lsa as-external originate
+debug ospf6 lsa as-external examine
+debug ospf6 lsa as-external flooding
+debug ospf6 lsa intra-prefix
+debug ospf6 lsa intra-prefix originate
+debug ospf6 lsa intra-prefix examine
+debug ospf6 lsa intra-prefix flooding
+debug ospf6 border-routers
+debug ospf6 zebra
+debug ospf6 interface
+debug ospf6 neighbor
+debug ospf6 flooding
+debug ospf6 gr helper
+debug ospf6 spf process
+debug ospf6 route intra-area
+debug ospf6 route inter-area
+debug ospf6 abr
+debug ospf6 asbr
+debug ospf6 nssa
+!
interface r4-eth0
ipv6 ospf6 hello-interval 2
ipv6 ospf6 dead-interval 10
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospf6d]
assert result is None, assertmsg
-def expect_ospfv3_routes(router, routes, wait=5):
+def expect_ospfv3_routes(router, routes, wait=5, type=None, detail=False):
"Run command `ipv6 ospf6 route` and expect route with type."
tgen = get_topogen()
+ if detail == False:
+ if type == None:
+ cmd = "show ipv6 ospf6 route json"
+ else:
+ cmd = "show ipv6 ospf6 route {} json".format(type)
+ else:
+ if type == None:
+ cmd = "show ipv6 ospf6 route detail json"
+ else:
+ cmd = "show ipv6 ospf6 route {} detail json".format(type)
+
logger.info("waiting OSPFv3 router '{}' route".format(router))
test_func = partial(
- topotest.router_json_cmp,
- tgen.gears[router],
- "show ipv6 ospf6 route json",
- {"routes": routes}
+ topotest.router_json_cmp, tgen.gears[router], cmd, {"routes": routes}
)
_, result = topotest.run_and_expect(test_func, None, count=wait, wait=1)
assertmsg = '"{}" convergence failure'.format(router)
assert result is None, assertmsg
-class OSPFv3Topo2(Topo):
- "Test topology builder"
+def dont_expect_route(router, unexpected_route, type=None):
+ "Specialized test function to expect route go missing"
+ tgen = get_topogen()
+
+ if type == None:
+ cmd = "show ipv6 ospf6 route json"
+ else:
+ cmd = "show ipv6 ospf6 route {} json".format(type)
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ output = tgen.gears[router].vtysh_cmd(cmd, isjson=True)
+ if unexpected_route in output["routes"]:
+ return output["routes"][unexpected_route]
+ return None
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+def build_topo(tgen):
+ "Build function"
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(OSPFv3Topo2, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
{
"numberOfIntraAreaRoutes": 1,
"numberOfInterAreaRoutes": 2,
- "numberOfExternal1Routes": 0,
+ "numberOfExternal1Routes": 4,
"numberOfExternal2Routes": 0,
},
)
expect_route("r1", "::/0", metric + 10)
+def test_redistribute_metrics():
+ """
+ Test that the configured metrics are honored when a static route is
+ redistributed.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ # Add new static route on r3.
+ config = """
+ configure terminal
+ ipv6 route 2001:db8:500::/64 Null0
+ """
+ tgen.gears["r3"].vtysh_cmd(config)
+
+ route = {
+ "2001:db8:500::/64": {
+ "metricType": 2,
+ "metricCost": 10,
+ }
+ }
+ logger.info(
+ "Expecting AS-external route 2001:db8:500::/64 to show up with default metrics"
+ )
+ expect_ospfv3_routes("r2", route, wait=30, detail=True)
+
+ # Change the metric of redistributed routes of the static type on r3.
+ config = """
+ configure terminal
+ router ospf6
+ redistribute static metric 50 metric-type 1
+ """
+ tgen.gears["r3"].vtysh_cmd(config)
+
+ # Check if r3 reinstalled 2001:db8:500::/64 using the new metric type and value.
+ route = {
+ "2001:db8:500::/64": {
+ "metricType": 1,
+ "metricCost": 60,
+ }
+ }
+ logger.info(
+ "Expecting AS-external route 2001:db8:500::/64 to show up with updated metric type and value"
+ )
+ expect_ospfv3_routes("r2", route, wait=30, detail=True)
+
+
def test_nssa_lsa_type7():
"""
Test that static route gets announced as external route when redistributed
route = {
"2001:db8:100::/64": {
"pathType": "E1",
- "nextHops": [
- {"nextHop": "::", "interfaceName": "r4-eth0"}
- ]
+ "nextHops": [{"nextHop": "::", "interfaceName": "r4-eth0"}],
}
}
def dont_expect_lsa(unexpected_lsa):
"Specialized test function to expect LSA go missing"
- output = tgen.gears["r4"].vtysh_cmd("show ipv6 ospf6 database type-7 detail json", isjson=True)
- for lsa in output['areaScopedLinkStateDb'][0]['lsa']:
+ output = tgen.gears["r4"].vtysh_cmd(
+ "show ipv6 ospf6 database type-7 detail json", isjson=True
+ )
+ for lsa in output["areaScopedLinkStateDb"][0]["lsa"]:
if lsa["prefix"] == unexpected_lsa["prefix"]:
if lsa["forwardingAddress"] == unexpected_lsa["forwardingAddress"]:
return lsa
return None
- def dont_expect_route(unexpected_route):
- "Specialized test function to expect route go missing"
- output = tgen.gears["r4"].vtysh_cmd("show ipv6 ospf6 route json", isjson=True)
- if output["routes"].has_key(unexpected_route):
- return output["routes"][unexpected_route]
- return None
-
-
logger.info("Expecting LSA type-7 and OSPFv3 route 2001:db8:100::/64 to go away")
# Test that LSA doesn't exist.
assert result is None, assertmsg
# Test that route doesn't exist.
- test_func = partial(dont_expect_route, "2001:db8:100::/64")
+ test_func = partial(dont_expect_route, "r4", "2001:db8:100::/64")
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
assertmsg = '"{}" route still exists'.format("r4")
assert result is None, assertmsg
+def test_nssa_no_summary():
+ """
+ Test the following:
+ * Type-3 inter-area routes should be removed when the NSSA no-summary option
+ is configured;
+ * A type-3 inter-area default route should be originated into the NSSA area
+ when the no-summary option is configured;
+ * Once the no-summary option is unconfigured, all previously existing
+ Type-3 inter-area routes should be re-added, and the inter-area default
+ route removed.
+ """
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ #
+ # Configure area 1 as a NSSA totally stub area.
+ #
+ config = """
+ configure terminal
+ router ospf6
+ area 2 nssa no-summary
+ """
+ tgen.gears["r2"].vtysh_cmd(config)
+
+ logger.info("Expecting inter-area routes to be removed")
+ for route in ["2001:db8:1::/64", "2001:db8:2::/64"]:
+ test_func = partial(dont_expect_route, "r4", route, type="inter-area")
+ _, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
+ assertmsg = "{}'s {} inter-area route still exists".format("r4", route)
+ assert result is None, assertmsg
+
+ logger.info("Expecting inter-area default-route to be added")
+ routes = {"::/0": {}}
+ expect_ospfv3_routes("r4", routes, wait=30, type="inter-area")
+
+ #
+ # Configure area 1 as a regular NSSA area.
+ #
+ config = """
+ configure terminal
+ router ospf6
+ area 2 nssa
+ """
+ tgen.gears["r2"].vtysh_cmd(config)
+
+ logger.info("Expecting inter-area routes to be re-added")
+ routes = {"2001:db8:1::/64": {}, "2001:db8:2::/64": {}}
+ expect_ospfv3_routes("r4", routes, wait=30, type="inter-area")
+
+ logger.info("Expecting inter-area default route to be removed")
+ test_func = partial(dont_expect_route, "r4", "::/0", type="inter-area")
+ _, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
+ assertmsg = "{}'s inter-area default route still exists".format("r4")
+ assert result is None, assertmsg
+
+
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
import sys
import time
import pytest
-import json
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
import ipaddress
from time import sleep
create_interfaces_cfg,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
clear_ospf,
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_asbr_summary_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
NETWORK = {
"ipv4": [
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_asbr_summary_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
dut = "r1"
- result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ result = verify_ospf_rib(tgen, dut, input_dict_summary, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
import sys
import time
import pytest
-import json
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
-import ipaddress
from time import sleep
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
start_topology,
write_test_header,
- kill_router_daemons,
write_test_footer,
reset_config_on_routers,
- stop_router,
- start_router,
verify_rib,
create_static_routes,
step,
- start_router_daemons,
- create_route_maps,
- shutdown_bringup_interface,
topo_daemons,
- create_prefix_lists,
- create_route_maps,
- create_interfaces_cfg,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
- clear_ospf,
verify_ospf_rib,
create_router_ospf,
verify_ospf_summary,
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_asbr_summary_type7_lsa.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
NETWORK = {
"ipv4": [
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_asbr_summary_type7_lsa.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import pytest
from time import sleep
from copy import deepcopy
-import json
from lib.topotest import frr_unicode
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import verify_ospf_neighbor, config_ospf_interface, clear_ospf
from ipaddress import IPv4Address
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_authentication.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
"""
TOPOOLOGY =
Please view in a fixed-width font such as Courier.
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_authentication.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-from copy import deepcopy
-import json
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
write_test_footer,
reset_config_on_routers,
step,
- shutdown_bringup_interface,
topo_daemons,
verify_rib,
stop_router,
from lib.ospf import verify_ospf_neighbor, verify_ospf_rib, create_router_ospf
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
-from ipaddress import IPv4Address
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
3. Verify ospf functionality when staticd is restarted.
"""
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_chaos.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
-
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
def setup_module(mod):
"""
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_chaos.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
def test_ospf_chaos_tc32_p1(request):
- """Verify ospf functionality after restart FRR service. """
+ """Verify ospf functionality after restart FRR service."""
tc_name = request.node.name
write_test_header(tc_name)
tgen = get_topogen()
import sys
import time
import pytest
-import json
from time import sleep
-from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
-from ipaddress import IPv4Address
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
verify_rib,
create_static_routes,
step,
- create_route_maps,
shutdown_bringup_interface,
- create_interfaces_cfg,
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
config_ospf_interface,
- clear_ospf,
verify_ospf_rib,
- create_router_ospf,
- verify_ospf_interface,
redistribute_ospf,
)
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_ecmp.json".format(CWD)
-
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
NETWORK = {
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_ecmp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-import json
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
start_topology,
write_test_header,
- create_interfaces_cfg,
write_test_footer,
reset_config_on_routers,
verify_rib,
create_static_routes,
- check_address_types,
step,
- create_route_maps,
- shutdown_bringup_interface,
- stop_router,
- start_router,
topo_daemons,
)
-from lib.bgp import verify_bgp_convergence, create_router_bgp
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
- config_ospf_interface,
clear_ospf,
verify_ospf_rib,
- create_router_ospf,
- verify_ospf_interface,
redistribute_ospf,
)
-from ipaddress import IPv4Address
pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
# Reading the data from JSON File for topology creation
-
-jsonFile = "{}/ospf_ecmp_lan.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
NETWORK = {
"ipv4": [
"11.0.20.1/32",
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_ecmp_lan.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-import json
from copy import deepcopy
-import ipaddress
from lib.topotest import frr_unicode
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
create_interfaces_cfg,
write_test_footer,
reset_config_on_routers,
- verify_rib,
- create_static_routes,
- check_address_types,
step,
- create_route_maps,
shutdown_bringup_interface,
stop_router,
start_router,
topo_daemons,
)
-from lib.bgp import verify_bgp_convergence, create_router_bgp
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
- config_ospf_interface,
clear_ospf,
- verify_ospf_rib,
create_router_ospf,
verify_ospf_interface,
)
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_lan.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
NETWORK = {
"ipv4": [
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_lan.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import ipaddress
from lib.ospf import (
verify_ospf_neighbor,
- config_ospf_interface,
- clear_ospf,
verify_ospf_rib,
create_router_ospf,
- verify_ospf_interface,
redistribute_ospf,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.topolog import logger
from lib.common_config import (
start_topology,
verify_rib,
create_static_routes,
step,
- create_route_maps,
- shutdown_bringup_interface,
- create_interfaces_cfg,
topo_daemons,
)
-from ipaddress import IPv4Address
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
import os
import sys
import time
import pytest
-import json
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_nssa.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
NETWORK = {
"ipv4": [
"11.0.20.1/32",
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_nssa.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-import json
from copy import deepcopy
from ipaddress import IPv4Address
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
-import ipaddress
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
write_test_header,
write_test_footer,
reset_config_on_routers,
- verify_rib,
- create_static_routes,
step,
- create_route_maps,
- shutdown_bringup_interface,
create_interfaces_cfg,
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
+from lib.topotest import frr_unicode
from lib.ospf import (
- verify_ospf_neighbor,
- config_ospf_interface,
- clear_ospf,
- verify_ospf_rib,
- create_router_ospf,
verify_ospf_interface,
- verify_ospf_database,
)
-pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
+pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_p2mp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
"""
TOPOOLOGY =
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_p2mp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
topo_modify_change_ip = deepcopy(topo)
intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"]
topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str(
- IPv4Address(unicode(intf_ip.split("/")[0])) + 3
+ IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3
) + "/{}".format(intf_ip.split("/")[1])
build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False)
topo_modify_change_ip = deepcopy(topo)
intf_ip = topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"]
topo_modify_change_ip["routers"]["r0"]["links"]["r3"]["ipv4"] = str(
- IPv4Address(unicode(intf_ip.split("/")[0])) + 3
+ IPv4Address(frr_unicode(intf_ip.split("/")[0])) + 3
) + "/{}".format(int(intf_ip.split("/")[1]) + 1)
build_config_from_json(tgen, topo_modify_change_ip, save_bkup=False)
import sys
import time
import pytest
-import json
-from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
create_prefix_lists,
verify_rib,
create_static_routes,
- check_address_types,
step,
create_route_maps,
verify_prefix_lists,
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
- clear_ospf,
verify_ospf_rib,
create_router_ospf,
- verify_ospf_database,
redistribute_ospf,
)
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_routemaps.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
NETWORK = {
"ipv4": [
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_routemaps.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
)
result = verify_rib(
- tgen, "ipv4", dut, input_dict, protocol=protocol, retry_timeout=4, expected=False
+ tgen,
+ "ipv4",
+ dut,
+ input_dict,
+ protocol=protocol,
+ retry_timeout=4,
+ expected=False,
)
assert (
result is not True
write_test_footer(tc_name)
+def test_ospf_routemaps_functionality_tc25_p0(request):
+ """
+ OSPF route map support functionality.
+
+ Verify OSPF route map support functionality
+ when route map actions are toggled.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+
+ reset_config_on_routers(tgen)
+
+ step(
+ "Create static routes(10.0.20.1/32) in R1 and redistribute "
+ "to OSPF using route map."
+ )
+
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_red_r0 = {
+ "r0": {
+ "ospf": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step("Configure route map with permit rule")
+ # Create route map
+ routemaps = {"r0": {"route_maps": {"rmap_ipv4": [{"action": "permit"}]}}}
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that route is advertised to R1.")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ step("Configure route map with deny rule")
+ # Create route map
+ routemaps = {
+ "r0": {"route_maps": {"rmap_ipv4": [{"seq_id": 10, "action": "deny"}]}}
+ }
+ result = create_route_maps(tgen, routemaps)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Api call verify whether OSPF is converged
+ ospf_covergence = verify_ospf_neighbor(tgen, topo)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ step("Verify that route is not advertised to R1.")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_routemaps_functionality_tc22_p0(request):
+ """
+ OSPF Route map - Multiple sequence numbers.
+
+ Verify OSPF route map support functionality with multiple sequence
+ numbers in a single route-map for different match/set clauses.
+
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+ global topo
+ step("Bring up the base config as per the topology")
+
+ reset_config_on_routers(tgen)
+
+ step(
+ "Configure route map with seq number 10 to with ip prefix"
+ " permitting route 10.0.20.1/32 in R1"
+ )
+ step(
+ "Configure route map with seq number 20 to with ip prefix"
+ " permitting route 10.0.20.2/32 in R1"
+ )
+
+ # Create route map
+ input_dict_3 = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "pf_list_1_ipv4"}},
+ },
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv4": {"prefix_lists": "pf_list_2_ipv4"}},
+ },
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": NETWORK["ipv4"][0], "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ # Create ip prefix list
+ input_dict_2 = {
+ "r0": {
+ "prefix_lists": {
+ "ipv4": {
+ "pf_list_2_ipv4": [
+ {"seqid": 10, "network": NETWORK["ipv4"][1], "action": "permit"}
+ ]
+ }
+ }
+ }
+ }
+ result = create_prefix_lists(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure static routes 10.0.20.1/32 and 10.0.20.2 in R1")
+ # Create Static routes
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": 5,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure redistribute static route with route map.")
+ ospf_red_r0 = {
+ "r0": {
+ "ospf": {
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv4"}]
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_red_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {
+ "r0": {
+ "static_routes": [
+ {
+ "network": NETWORK["ipv4"][0],
+ "no_of_ip": 2,
+ "next_hop": "Null0",
+ }
+ ]
+ }
+ }
+ result = create_static_routes(tgen, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that both routes are learned in R1 and R2")
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ dut = "r2"
+ protocol = "ospf"
+ result = verify_ospf_rib(tgen, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Change route map with seq number 20 to deny.")
+ # Create route map
+ input_dict_3 = {
+ "r0": {
+ "route_maps": {
+ "rmap_ipv4": [
+ {
+ "action": "deny",
+ "seq_id": "20",
+ "match": {"ipv4": {"prefix_lists": "pf_list_2_ipv4"}},
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step(
+ "Verify the route 10.0.20.2/32 is withdrawn and not present "
+ "in the routing table of R0 and R1."
+ )
+
+ input_dict = {
+ "r0": {"static_routes": [{"network": NETWORK["ipv4"][1], "next_hop": "Null0"}]}
+ }
+
+ dut = "r1"
+ protocol = "ospf"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ dut = "r2"
+ protocol = "ospf"
+ result = verify_ospf_rib(tgen, dut, input_dict, expected=False)
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ write_test_footer(tc_name)
+
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import time
import pytest
import ipaddress
-import json
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
)
from lib.bgp import verify_bgp_convergence, create_router_bgp
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
clear_ospf,
verify_ospf_rib,
- create_router_ospf,
redistribute_ospf,
config_ospf_interface,
verify_ospf_interface,
# number of retries.
nretry = 5
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_rte_calc.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
NETWORK = {
"ipv4": [
"11.0.20.1/32",
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_rte_calc.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-import json
from copy import deepcopy
from ipaddress import IPv4Address
from lib.topotest import frr_unicode
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
import ipaddress
write_test_header,
write_test_footer,
reset_config_on_routers,
- verify_rib,
- create_static_routes,
step,
- create_route_maps,
- shutdown_bringup_interface,
create_interfaces_cfg,
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
config_ospf_interface,
clear_ospf,
verify_ospf_rib,
- create_router_ospf,
verify_ospf_interface,
- verify_ospf_database,
)
pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospf_single_area.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
"""
TOPOOLOGY =
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospf_single_area.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
)
r1_ospf_mtu = {
- "r1": {"links": {"r0": {"ospf": {"mtu_ignore": True, "delete": True}}}}
+ "r1": {"links": {"r0": {"ospf": {"mtu_ignore": True, "del_action": True}}}}
}
result = config_ospf_interface(tgen, topo, r1_ospf_mtu)
assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
import sys
import time
import pytest
-import json
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
sys.path.append(os.path.join(CWD, "../lib/"))
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.common_config import (
write_test_header,
write_test_footer,
reset_config_on_routers,
- stop_router,
- start_router,
- verify_rib,
- create_static_routes,
step,
- start_router_daemons,
- shutdown_bringup_interface,
topo_daemons,
- create_prefix_lists,
- create_interfaces_cfg,
- run_frr_cmd,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf_neighbor,
verify_ospf6_neighbor,
- create_router_ospf,
- create_router_ospf6,
- verify_ospf_summary,
- redistribute_ospf,
- verify_ospf_database,
)
pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/test_ospf_dual_stack.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
-
-class CreateTopo(Topo):
- """Test topology builder."""
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
def setup_module(mod):
"""Sets up the pytest environment."""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/test_ospf_dual_stack.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
--- /dev/null
+{
+ "ipv4base": "10.0.0.0",
+ "ipv4mask": 24,
+ "link_ip_start": {
+ "ipv4": "10.0.0.0",
+ "v4mask": 24
+ },
+ "lo_prefix": {
+ "ipv4": "1.0.",
+ "v4mask": 32
+ },
+ "switches": {
+ "s1": {
+ "links": {
+ "r0": {
+ "ipv4": "17.1.1.2/24",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 40,
+ "priority": 98
+ }
+ },
+ "r1": {
+ "ipv4": "17.1.1.1/24",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 40,
+ "priority": 99
+ }
+ },
+ "r2": {
+ "ipv4": "17.1.1.3/24",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 40,
+ "priority": 0
+ }
+ },
+ "r3": {
+ "ipv4": "17.1.1.4/24",
+ "ospf": {
+ "area": "0.0.0.0",
+ "hello_interval": 1,
+ "dead_interval": 40,
+ "priority": 0
+ }
+ }
+ }
+ }
+ },
+ "routers": {
+ "r0": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.0",
+ "neighbors": {
+ "r1": {},
+ "r2": {},
+ "r3": {}
+ }
+ }
+ },
+ "r1": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ }
+ },
+ "ospf": {
+ "router_id": "1.1.1.1",
+ "neighbors": {
+ "r0": {},
+ "r2": {},
+ "r3": {}
+ }
+ },
+ "opq_lsa_hex": "01005e00000570708bd051ef080045c0005cc18b0000015904f711010101e00000050204004801010101000000001e8d0000000000000000000000000001000102090300000001010101800000013bd1002c000100040000070800020001010000000003000411010101"
+ },
+ "r2": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.2",
+ "neighbors": {
+ "r1": {},
+ "r0": {}
+ }
+ }
+ },
+ "r3": {
+ "links": {
+ "lo": {
+ "ipv4": "auto",
+ "type": "loopback"
+ }
+ },
+ "ospf": {
+ "router_id": "100.1.1.3",
+ "neighbors": {
+ "r0": {},
+ "r1": {}
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+#!/usr/bin/python
+
+#
+# Copyright (c) 2021 by VMware, Inc. ("VMware")
+# Used Copyright (c) 2018 by Network Device Education Foundation, Inc.
+# ("NetDEF") in this file.
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+
+"""OSPF Basic Functionality Automation."""
+import os
+import sys
+import time
+import pytest
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib.topogen import Topogen, get_topogen
+
+# Import topoJson from lib, to create topology and initial configuration
+from lib.common_config import (
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ step,
+ create_interfaces_cfg,
+ topo_daemons,
+ scapy_send_raw_packet,
+)
+
+from lib.topolog import logger
+from lib.topojson import build_config_from_json
+
+from lib.ospf import (
+ verify_ospf_neighbor,
+ clear_ospf,
+ verify_ospf_gr_helper,
+ create_router_ospf,
+)
+
+# Global variables
+topo = None
+Iters = 5
+sw_name = None
+intf = None
+intf1 = None
+pkt = None
+
+"""
+Topology:
+
+ Please view in a fixed-width font such as Courier.
+ Topo : Broadcast Networks
+ DUT - HR RR
+ +---+ +---+ +---+ +---+
+ |R0 + +R1 + +R2 + +R3 |
+ +-+-+ +-+-+ +-+-+ +-+-+
+ | | | |
+ | | | |
+ --+-----------+--------------+---------------+-----
+ Ethernet Segment
+
+Testcases:
+
+TC1. Verify by default helper support is disabled for FRR ospf
+TC2. OSPF GR on Broadcast : Verify DUT enters Helper mode when neighbor
+ sends grace lsa, helps RR to restart gracefully (RR = DR)
+TC3. OSPF GR on Broadcast : Verify DUT enters Helper mode when neighbor
+ sends grace lsa, helps RR to restart gracefully (RR = BDR)
+TC4. OSPF GR on Broadcast : Verify DUT enters Helper mode when neighbor
+ sends grace lsa, helps RR to restart gracefully (RR = DRother)
+TC5. OSPF GR on P2P : Verify DUT enters Helper mode when neighbor sends
+ grace lsa, helps RR to restart gracefully.
+TC6. Verify all the show commands newly introducted as part of ospf
+ helper support - Json Key verification wrt to show commands.
+TC7. Verify helper when grace lsa is received with different configured
+ value in process level (higher, lower, grace lsa timer above 1800)
+TC8. Verify helper functionality when dut is helping RR and new grace lsa
+ is received from RR.
+"""
+
+
+def setup_module(mod):
+ """
+ Sets up the pytest environment
+
+ * `mod`: module name
+ """
+ global topo, intf, intf1, sw_name, pkt
+ testsuite_run_time = time.asctime(time.localtime(time.time()))
+ logger.info("Testsuite start time: {}".format(testsuite_run_time))
+ logger.info("=" * 40)
+
+ logger.info("Running setup_module to create topology")
+
+ # This function initiates the topology build with Topogen...
+ json_file = "{}/ospf_gr_helper.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
+ # ... and here it calls Mininet initialization functions.
+
+ # get list of daemons needs to be started for this suite.
+ daemons = topo_daemons(tgen, topo)
+
+ # Starting topology, create tmp files which are loaded to routers
+ # to start deamons and then start routers
+ start_topology(tgen, daemons)
+
+ # Creating configuration from JSON
+ build_config_from_json(tgen, topo)
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
+
+ sw_name = "s1"
+ intf = topo["routers"]["r0"]["links"][sw_name]["interface"]
+ intf1 = topo["routers"]["r1"]["links"][sw_name]["interface"]
+ pkt = topo["routers"]["r1"]["opq_lsa_hex"]
+
+ logger.info("Running setup_module() done")
+
+
+def teardown_module():
+ """Teardown the pytest environment"""
+
+ logger.info("Running teardown_module to delete topology")
+
+ tgen = get_topogen()
+
+ try:
+ # Stop toplogy and Remove tmp files
+ tgen.stop_topology
+
+ except OSError:
+ # OSError exception is raised when mininet tries to stop switch
+ # though switch is stopped once but mininet tries to stop same
+ # switch again, where it ended up with exception
+ pass
+
+
+def delete_ospf():
+ """delete ospf process after each test"""
+ tgen = get_topogen()
+ step("Delete ospf process")
+ for rtr in topo["routers"]:
+ ospf_del = {rtr: {"ospf": {"delete": True}}}
+ result = create_router_ospf(tgen, topo, ospf_del)
+ assert result is True, "Testcase: Failed \n Error: {}".format(result)
+
+
+# ##################################
+# Test cases start here.
+# ##################################
+
+
+def test_ospf_gr_helper_tc1_p0(request):
+ """Verify by default helper support is disabled for FRR ospf"""
+
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo, intf, intf1, pkt
+
+ step("Bring up the base config as per the topology")
+ reset_config_on_routers(tgen)
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
+ assert (
+ ospf_covergence is True
+ ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+
+ step("Verify that GR helper route is disabled by default to the in" "the DUT.")
+ input_dict = {
+ "helperSupport": "Disabled",
+ "strictLsaCheck": "Enabled",
+ "restartSupoort": "Planned and Unplanned Restarts",
+ "supportedGracePeriod": 1800,
+ }
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that DUT does not enter helper mode upon receiving the " "grace lsa.")
+
+ # send grace lsa
+ scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+
+ input_dict = {"activeRestarterCnt": 1}
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed. DUT entered helper role " " \n Error: {}".format(
+ tc_name, result
+ )
+
+ step("Configure graceful restart in the DUT")
+ ospf_gr_r0 = {
+ "r0": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that GR helper route is enabled in the DUT.")
+ input_dict = {
+ "helperSupport": "Enabled",
+ "strictLsaCheck": "Enabled",
+ "restartSupoort": "Planned and Unplanned Restarts",
+ "supportedGracePeriod": 1800,
+ }
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r1 = {
+ "r1": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Perform GR in RR.")
+ step("Verify that DUT does enter helper mode upon receiving" " the grace lsa.")
+ input_dict = {"activeRestarterCnt": 1}
+ gracelsa_sent = False
+ repeat = 0
+ dut = "r0"
+ while not gracelsa_sent and repeat < Iters:
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ if isinstance(result, str):
+ repeat += 1
+ gracelsa_sent = False
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Unconfigure the GR helper command.")
+ ospf_gr_r0 = {
+ "r0": {
+ "ospf": {
+ "graceful-restart": {"helper-only": [], "opaque": True, "delete": True}
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {"helperSupport": "Disabled"}
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure gr helper using the router id")
+ ospf_gr_r0 = {
+ "r0": {
+ "ospf": {"graceful-restart": {"helper-only": ["1.1.1.1"], "opaque": True}}
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that DUT does enter helper mode upon receiving" " the grace lsa.")
+ input_dict = {"activeRestarterCnt": 1}
+ gracelsa_sent = False
+ repeat = 0
+ dut = "r0"
+ while not gracelsa_sent and repeat < Iters:
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ if isinstance(result, str):
+ repeat += 1
+ gracelsa_sent = False
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Un Configure gr helper using the router id")
+ ospf_gr_r0 = {
+ "r0": {
+ "ospf": {
+ "graceful-restart": {
+ "helper-only": ["1.1.1.1"],
+ "opaque": True,
+ "delete": True,
+ }
+ }
+ }
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that GR helper router is disabled in the DUT for" " router id x.x.x.x")
+ input_dict = {"enabledRouterIds": [{"routerId": "1.1.1.1"}]}
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed, Helper role enabled for RR\n Error: {}".format(
+ tc_name, result
+ )
+ delete_ospf()
+ write_test_footer(tc_name)
+
+
+def test_ospf_gr_helper_tc2_p0(request):
+ """
+ OSPF GR on Broadcast : Verify DUT enters Helper mode when neighbor
+ sends grace lsa, helps RR to restart gracefully (RR = DR)
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo, intf, intf1, pkt
+
+ step("Bring up the base config as per the topology")
+ step(
+ "Configure DR priority as 99 in RR , DUT dr priority = 98 "
+ "& reset ospf process in all the routers"
+ )
+ reset_config_on_routers(tgen)
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
+ assert (
+ ospf_covergence is True
+ ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ ospf_gr_r0 = {
+ "r0": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r1 = {
+ "r1": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that DUT enters into helper mode.")
+
+ input_dict = {"activeRestarterCnt": 1}
+ gracelsa_sent = False
+ repeat = 0
+ dut = "r0"
+ while not gracelsa_sent and repeat < Iters:
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ if isinstance(result, str):
+ repeat += 1
+ gracelsa_sent = False
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ delete_ospf()
+ write_test_footer(tc_name)
+
+
+def test_ospf_gr_helper_tc3_p1(request):
+ """
+ OSPF GR on Broadcast : Verify DUT enters Helper mode when neighbor
+ sends grace lsa, helps RR to restart gracefully (RR = BDR)
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo, intf, intf1, pkt
+
+ step("Bring up the base config as per the topology")
+ step(
+ "Configure DR priority as 99 in RR , DUT dr priority = 98 "
+ "& reset ospf process in all the routers"
+ )
+ reset_config_on_routers(tgen)
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
+ assert (
+ ospf_covergence is True
+ ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ step(
+ "Configure DR pririty 100 on R0 and clear ospf neighbors " "on all the routers."
+ )
+
+ input_dict = {
+ "r0": {
+ "links": {
+ sw_name: {
+ "interface": topo["routers"]["r0"]["links"][sw_name]["interface"],
+ "ospf": {"priority": 100},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear ospf neighbours in all routers")
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr)
+
+ step("Verify that DR election is triggered and R0 is elected as DR")
+ input_dict = {
+ "r0": {
+ "ospf": {
+ "neighbors": {
+ "r1": {"state": "Full", "role": "Backup"},
+ "r2": {"state": "Full", "role": "DROther"},
+ "r3": {"state": "Full", "role": "DROther"},
+ }
+ }
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r0 = {
+ "r0": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r1 = {
+ "r1": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that DUT enters into helper mode.")
+
+ input_dict = {"activeRestarterCnt": 1}
+ gracelsa_sent = False
+ repeat = 0
+ dut = "r0"
+ while not gracelsa_sent and repeat < Iters:
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ if isinstance(result, str):
+ repeat += 1
+ gracelsa_sent = False
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ delete_ospf()
+ write_test_footer(tc_name)
+
+
+def test_ospf_gr_helper_tc4_p1(request):
+ """
+ OSPF GR on Broadcast : Verify DUT enters Helper mode when neighbor
+ sends grace lsa, helps RR to restart gracefully (RR = DRother)
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo, intf, intf1, pkt
+
+ step("Bring up the base config as per the topology")
+ step(
+ "Configure DR priority as 99 in RR , DUT dr priority = 98 "
+ "& reset ospf process in all the routers"
+ )
+ reset_config_on_routers(tgen)
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
+ assert (
+ ospf_covergence is True
+ ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ step(
+ "Configure DR pririty 100 on R0 and clear ospf neighbors " "on all the routers."
+ )
+
+ input_dict = {
+ "r0": {
+ "links": {
+ sw_name: {
+ "interface": topo["routers"]["r0"]["links"][sw_name]["interface"],
+ "ospf": {"priority": 0},
+ }
+ }
+ }
+ }
+
+ result = create_interfaces_cfg(tgen, input_dict)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
+
+ step("Clear ospf neighbours in all routers")
+ for rtr in topo["routers"]:
+ clear_ospf(tgen, rtr)
+
+ step("Verify that DR election is triggered and R0 is elected as 2-Way")
+ input_dict = {
+ "r0": {
+ "ospf": {
+ "neighbors": {
+ "r1": {"state": "Full", "role": "DR"},
+ "r2": {"state": "2-Way", "role": "DROther"},
+ "r3": {"state": "2-Way", "role": "DROther"},
+ }
+ }
+ }
+ }
+ dut = "r0"
+ result = verify_ospf_neighbor(tgen, topo, dut, input_dict, lan=True)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r0 = {
+ "r0": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r1 = {
+ "r1": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that DUT enters into helper mode.")
+
+ input_dict = {"activeRestarterCnt": 1}
+ gracelsa_sent = False
+ repeat = 0
+ dut = "r0"
+ while not gracelsa_sent and repeat < Iters:
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ if isinstance(result, str):
+ repeat += 1
+ gracelsa_sent = False
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ delete_ospf()
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_gr_helper_tc7_p1(request):
+ """
+ Test ospf gr helper
+ Verify helper when grace lsa is received with different configured
+ value in process level (higher, lower, grace lsa timer above 1800)
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo, intf, intf1, pkt
+
+ step("Bring up the base config as per the topology")
+ step(
+ "Configure DR priority as 99 in RR , DUT dr priority = 98 "
+ "& reset ospf process in all the routers"
+ )
+ step(
+ "Enable GR on RR and DUT with grace period on RR = 333"
+ "and grace period on DUT = 300"
+ )
+ reset_config_on_routers(tgen)
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
+ assert (
+ ospf_covergence is True
+ ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ ospf_gr_r0 = {
+ "r0": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r1 = {
+ "r1": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {"supportedGracePeriod": 1800}
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Configure grace period = 1801 on RR and restart ospf .")
+ grace_period_1801 = "01005e00000570708bd051ef080045c0005cbeb10000015907d111010101e00000050204004801010101000000009714000000000000000000000000000100010209030000000101010180000001c8e9002c000100040000016800020001010000000003000411010101"
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, grace_period_1801)
+
+ step("Verify R0 does not enter helper mode.")
+ input_dict = {"activeRestarterCnt": 1}
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict, expected=False)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed. DUT entered helper role " " \n Error: {}".format(
+ tc_name, result
+ )
+
+ delete_ospf()
+
+ write_test_footer(tc_name)
+
+
+def test_ospf_gr_helper_tc8_p1(request):
+ """
+ Test ospf gr helper
+
+ Verify helper functionality when dut is helping RR and new grace lsa
+ is received from RR.
+ """
+ tc_name = request.node.name
+ write_test_header(tc_name)
+ tgen = get_topogen()
+
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ global topo, intf, intf1, pkt
+
+ step("Bring up the base config as per the topology")
+ step("Enable GR")
+ reset_config_on_routers(tgen)
+ ospf_covergence = verify_ospf_neighbor(tgen, topo, lan=True)
+ assert (
+ ospf_covergence is True
+ ), "OSPF is not after reset config \n Error:" " {}".format(ospf_covergence)
+ ospf_gr_r0 = {
+ "r0": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r0)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ ospf_gr_r1 = {
+ "r1": {"ospf": {"graceful-restart": {"helper-only": [], "opaque": True}}}
+ }
+ result = create_router_ospf(tgen, topo, ospf_gr_r1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ input_dict = {"supportedGracePeriod": 1800}
+ dut = "r0"
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Verify that DUT enters into helper mode.")
+
+ input_dict = {"activeRestarterCnt": 1}
+ gracelsa_sent = False
+ repeat = 0
+ dut = "r0"
+ while not gracelsa_sent and repeat < Iters:
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ if isinstance(result, str):
+ repeat += 1
+ gracelsa_sent = False
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ step("Send the Grace LSA again to DUT when RR is in GR.")
+ input_dict = {"activeRestarterCnt": 1}
+ gracelsa_sent = False
+ repeat = 0
+ dut = "r0"
+ while not gracelsa_sent and repeat < Iters:
+ gracelsa_sent = scapy_send_raw_packet(tgen, topo, "r1", intf1, pkt)
+ result = verify_ospf_gr_helper(tgen, topo, dut, input_dict)
+ if isinstance(result, str):
+ repeat += 1
+ gracelsa_sent = False
+
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ delete_ospf()
+
+ write_test_footer(tc_name)
+
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
import sys
import pytest
import json
-import re
-import tempfile
from time import sleep
from functools import partial
)
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
outputs = {}
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]:
+ tgen.add_router(router)
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "rt7"]:
- tgen.add_router(router)
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt1"], nodeif="stub1")
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt3")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt2")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt1"], nodeif="stub1")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt3")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt3")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt2")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt3")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt4")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt3")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt3")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt7")
+ switch.add_link(tgen.gears["rt7"], nodeif="eth-rt6")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
-
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt7")
- switch.add_link(tgen.gears["rt7"], nodeif="eth-rt6")
-
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt7"], nodeif="stub1")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt7"], nodeif="stub1")
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import pytest
import json
-import re
from time import sleep
from functools import partial
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.ospfd, pytest.mark.pathd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "dst"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6", "dst"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ # switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
- #switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
+ # switch = tgen.add_switch("s3")
+ # switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
+ # switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
- #switch = tgen.add_switch("s3")
- #switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
- #switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
-
- switch = tgen.add_switch("s9")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-dst")
- switch.add_link(tgen.gears["dst"], nodeif="eth-rt6")
+ switch = tgen.add_switch("s9")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-dst")
+ switch.add_link(tgen.gears["dst"], nodeif="eth-rt6")
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
frrdir = tgen.config.get(tgen.CONFIG_SECTION, "frrdir")
if not os.path.isfile(os.path.join(frrdir, "pathd")):
- pytest.skip("pathd daemon wasn't built in:"+frrdir)
+ pytest.skip("pathd daemon wasn't built in:" + frrdir)
tgen.start_topology()
candidate_output = router.vtysh_cmd("show mpls table json")
candidate_output_json = json.loads(candidate_output)
for item in candidate_output_json.items():
- # logger.info('item "%s"', item)
- if item[0] == candidate_key:
- matched_key = True
- if positive:
- break
+ # logger.info('item "%s"', item)
+ if item[0] == candidate_key:
+ matched_key = True
+ if positive:
+ break
if positive:
if matched_key:
matched = True
assertmsg = "{} don't has entry {} but is was expected".format(
- router.name, candidate_key)
+ router.name, candidate_key
+ )
else:
if not matched_key:
matched = True
assertmsg = "{} has entry {} but is wans't expected".format(
- router.name, candidate_key)
+ router.name, candidate_key
+ )
if matched:
logger.info('Success "%s" in "%s"', router.name, fn_name)
return
for rname, endpoint in [("rt1", "6.6.6.6"), ("rt6", "1.1.1.1")]:
add_candidate_path(rname, endpoint, 100, "default")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
check_bsid(rname, bsid, test_srte_init_step1.__name__, False)
create_sr_policy(rname, endpoint, bsid)
add_candidate_path(rname, endpoint, 100, "default")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
add_candidate_path(rname, endpoint, 100, "default")
# now change the segment list name
add_candidate_path(rname, endpoint, 100, "default", "test")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_segment(rname, "test", 10)
delete_segment(rname, "test", 20)
delete_segment(rname, "test", 30)
add_segment_adj(rname, "test", 20, "10.0.6.5", "10.0.6.4")
add_segment_adj(rname, "test", 30, "10.0.2.4", "10.0.2.2")
add_segment_adj(rname, "test", 40, "10.0.1.2", "10.0.1.1")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
add_candidate_path(rname, endpoint, 100, "default")
# now change the segment list name
add_candidate_path(rname, endpoint, 200, "test", "test")
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_segment(rname, "test", 10)
delete_segment(rname, "test", 20)
delete_segment(rname, "test", 30)
add_segment_adj(rname, "test", 30, "10.0.2.99", "10.0.2.99")
add_segment_adj(rname, "test", 40, "10.0.1.99", "10.0.1.99")
# So policy sticks with default sl even higher prio
- check_bsid(rname, "1111" if rname == "rt1" else "6666", test_srte_init_step1.__name__, True)
+ check_bsid(
+ rname,
+ "1111" if rname == "rt1" else "6666",
+ test_srte_init_step1.__name__,
+ True,
+ )
delete_candidate_path(rname, endpoint, 100)
import sys
import pytest
import json
-import re
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5", "rt6"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2")
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s7")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
- switch = tgen.add_switch("s7")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4")
-
- switch = tgen.add_switch("s8")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
- switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
+ switch = tgen.add_switch("s8")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6")
+ switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5")
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib.topogen import Topogen, TopoRouter, get_topogen
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
-class NetworkTopo(Topo):
- "OSPF topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
+ # Create routers
+ for router in range(1, 4):
+ tgen.add_router("r{}".format(router))
- tgen = get_topogen(self)
+ # R1-R2 backbone area
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- # Create routers
- for router in range(1, 4):
- tgen.add_router("r{}".format(router))
-
- # R1-R2 backbone area
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
- # R2-R3 NSSA area
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ # R2-R3 NSSA area
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(NetworkTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
# This is a sample of configuration loading.
# pylint: disable=C0413
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
# Import topogen and topotest helpers
from lib import topotest
pytestmark = [pytest.mark.ospfd]
-class OspfTeTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self):
- "Build function"
- tgen = get_topogen(self)
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ # Interconect router 1 and 2 with 2 links
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- # Interconect router 1 and 2 with 2 links
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ # Interconect router 3 and 2
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r2"])
- # Interconect router 3 and 2
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r2"])
+ # Interconect router 4 and 2
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r4"])
+ switch.add_link(tgen.gears["r2"])
- # Interconect router 4 and 2
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r4"])
- switch.add_link(tgen.gears["r2"])
-
- # Interconnect router 3 with next AS
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r3"])
+ # Interconnect router 3 with next AS
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
logger.info("\n\n---- Starting OSPF TE tests ----\n")
- tgen = Topogen(OspfTeTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
import sys
import pytest
import json
-import re
-from time import sleep
from functools import partial
# Save the Current Working Directory to find configuration files.
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
-class TemplateTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ #
+ # Define FRR Routers
+ #
+ for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]:
+ tgen.add_router(router)
- #
- # Define FRR Routers
- #
- for router in ["rt1", "rt2", "rt3", "rt4", "rt5"]:
- tgen.add_router(router)
+ #
+ # Define connections
+ #
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
- #
- # Define connections
- #
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt2")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["rt1"], nodeif="eth-rt3")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt1")
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2")
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3")
-
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
- switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5")
+ switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4")
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
-class OSPFTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ # Create a empty network for router 1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
- # Create a empty network for router 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
+ # Create a empty network for router 2
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
- # Create a empty network for router 2
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
+ # Interconect router 1, 2 and 3
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- # Interconect router 1, 2 and 3
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
+ # Create empty netowrk for router3
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r3"])
- # Create empty netowrk for router3
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r3"])
+ # Interconect router 3 and 4
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- # Interconect router 3 and 4
- switch = tgen.add_switch("s5")
- switch.add_link(tgen.gears["r3"])
- switch.add_link(tgen.gears["r4"])
-
- # Create a empty network for router 4
- switch = tgen.add_switch("s6")
- switch.add_link(tgen.gears["r4"])
+ # Create a empty network for router 4
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(OSPFTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
ospf6_config = "ospf6d.conf"
"""
import os
-import re
import sys
from functools import partial
import pytest
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
-class OSPFTopo(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # Create 3 routers
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
- # Create 3 routers
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
+ # Create a empty network for router 1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
- # Create a empty network for router 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
+ # Create a empty network for router 2
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
- # Create a empty network for router 2
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
+ # Interconect router 1, 2 and 3
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- # Interconect router 1, 2 and 3
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- # Create empty netowrk for router3
- switch = tgen.add_switch("s4")
- switch.add_link(tgen.gears["r3"])
+ # Create empty netowrk for router3
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(OSPFTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
logger.info("Testing with VRF Namespace support")
- cmds = [
- "if [ -e /var/run/netns/{0}-ospf-cust1 ] ; then ip netns del {0}-ospf-cust1 ; fi",
- "ip netns add {0}-ospf-cust1",
- "ip link set dev {0}-eth0 netns {0}-ospf-cust1",
- "ip netns exec {0}-ospf-cust1 ip link set {0}-eth0 up",
- "ip link set dev {0}-eth1 netns {0}-ospf-cust1",
- "ip netns exec {0}-ospf-cust1 ip link set {0}-eth1 up",
- ]
-
for rname, router in router_list.items():
-
- # create VRF rx-ospf-cust1 and link rx-eth0 to rx-ospf-cust1
- for cmd in cmds:
- output = tgen.net[rname].cmd(cmd.format(rname))
+ # create VRF rx-ospf-cust1 and link rx-eth{0,1} to rx-ospf-cust1
+ ns = "{}-ospf-cust1".format(rname)
+ router.net.add_netns(ns)
+ router.net.set_intf_netns(rname + "-eth0", ns, up=True)
+ router.net.set_intf_netns(rname + "-eth1", ns, up=True)
router.load_config(
TopoRouter.RD_ZEBRA,
"Teardown the pytest environment"
tgen = get_topogen()
- # move back rx-eth0 to default VRF
- # delete rx-vrf
- cmds = [
- "ip netns exec {0}-ospf-cust1 ip link set {0}-eth0 netns 1",
- "ip netns exec {0}-ospf-cust1 ip link set {0}-eth1 netns 1",
- "ip netns delete {0}-ospf-cust1",
- ]
-
+ # Move interfaces out of vrf namespace and delete the namespace
router_list = tgen.routers()
for rname, router in router_list.items():
- for cmd in cmds:
- tgen.net[rname].cmd(cmd.format(rname))
+ tgen.net[rname].reset_intf_netns(rname + "-eth0")
+ tgen.net[rname].reset_intf_netns(rname + "-eth1")
+ tgen.net[rname].delete_netns(rname + "-ospf-cust1")
tgen.stop_topology()
"""
import os
-import re
import sys
from functools import partial
import pytest
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd]
-class OSPFTopo(Topo):
- "Test topology builder"
+CWD = os.path.dirname(os.path.realpath(__file__))
+
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+def build_topo(tgen):
+ "Build function"
- # Create 4 routers
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
+ # Create 4 routers
+ for routern in range(1, 3):
+ tgen.add_router("r{}".format(routern))
- # Create a empty network for router 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
+ # Create a empty network for router 1
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
- # Create a empty network for router 2
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
+ # Create a empty network for router 2
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
- # Interconect router 1, 2
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ # Interconect router 1, 2
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(OSPFTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
# the rp_filter. Setting it to '0' allows the OS to pass
# up the mcast packet not destined for the local routers
# network.
- topotest.set_sysctl(tgen.net["r1"], "net.ipv4.conf.r1-eth1.rp_filter", 0)
- topotest.set_sysctl(tgen.net["r1"], "net.ipv4.conf.all.rp_filter", 0)
- topotest.set_sysctl(tgen.net["r2"], "net.ipv4.conf.r2-eth1.rp_filter", 0)
- topotest.set_sysctl(tgen.net["r2"], "net.ipv4.conf.all.rp_filter", 0)
+ topotest.sysctl_assure(tgen.net["r1"], "net.ipv4.conf.r1-eth1.rp_filter", 0)
+ topotest.sysctl_assure(tgen.net["r1"], "net.ipv4.conf.all.rp_filter", 0)
+ topotest.sysctl_assure(tgen.net["r2"], "net.ipv4.conf.r2-eth1.rp_filter", 0)
+ topotest.sysctl_assure(tgen.net["r2"], "net.ipv4.conf.all.rp_filter", 0)
# Initialize all routers.
tgen.start_router()
import sys
import time
import pytest
-import json
-from copy import deepcopy
-from ipaddress import IPv4Address
-from lib.topotest import frr_unicode
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
import ipaddress
from time import sleep
shutdown_bringup_interface,
create_prefix_lists,
create_route_maps,
- create_interfaces_cfg,
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf6_neighbor,
clear_ospf,
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospfv3_asbr_summary_topo1.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
NETWORK = {
"ipv4": [
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospfv3_asbr_summary_topo1.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
# Test cases start here.
# ##################################
+
def test_ospfv3_type5_summary_tc42_p0(request):
"""OSPF summarisation functionality."""
tc_name = request.node.name
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
- " routes to one route. with aggregate timer as 6 sec")
+ " routes to one route. with aggregate timer as 6 sec"
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }],
- "aggr_timer": 6
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ],
+ "aggr_timer": 6,
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "del_aggr_timer": True,
- "delete": True
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "del_aggr_timer": True,
+ "delete": True,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary Route still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
step("show ip ospf summary should not have any summary address.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary still present in DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
- dut = 'r1'
+ dut = "r1"
step("All 5 routes are advertised after deletion of configured summary.")
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("configure the summary again and delete static routes .")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
input_dict = {
SUMMARY["ipv6"][0]: {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
input_dict = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole",
- "delete": True
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole", "delete": True}
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
step("Verify that summary route is withdrawn from R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("Add back static routes.")
input_dict_static_rtes = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary"
- " address on R0 and only one route is sent to R1.")
- dut = 'r1'
+ " address on R0 and only one route is sent to R1."
+ )
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
result = verify_rib(
- tgen, "ipv6", dut, input_dict_static_rtes,
- protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show configure summaries.")
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Configure new static route which is matching configured summary.")
input_dict_static_rtes = {
"r0": {
- "static_routes": [
- {
- "network": NETWORK_11["ipv6"],
- "next_hop": "blackhole"
- }
- ]
+ "static_routes": [{"network": NETWORK_11["ipv6"], "next_hop": "blackhole"}]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# step("verify that summary lsa is not refreshed.")
# show ip ospf database command is not working, waiting for DEV fix.
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK_11["ipv6"],
- "next_hop": "blackhole",
- "delete": True
- }
+ {"network": NETWORK_11["ipv6"], "next_hop": "blackhole", "delete": True}
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# step("verify that summary lsa is not refreshed.")
# show ip ospf database command is not working, waiting for DEV fix.
step(
"Configure redistribute connected and configure ospf external"
- " summary address to summarise the connected routes.")
+ " summary address to summarise the connected routes."
+ )
- dut = 'r0'
+ dut = "r0"
red_connected(dut)
- clear_ospf(tgen, dut, ospf='ospf6')
+ clear_ospf(tgen, dut, ospf="ospf6")
- ip = topo['routers']['r0']['links']['r3']['ipv6']
+ ip = topo["routers"]["r0"]["links"]["r3"]["ipv6"]
- ip_net = str(ipaddress.ip_interface(u'{}'.format(ip)).network)
+ ip_net = str(ipaddress.ip_interface(u"{}".format(ip)).network)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": ip_net.split('/')[0],
- "mask": "8"
- }]
+ "summary-address": [{"prefix": ip_net.split("/")[0], "mask": "8"}]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured "
- "summary address on R0 and only one route is sent to R1.")
+ "summary address on R0 and only one route is sent to R1."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": "fd00::/64"}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": "fd00::/64"}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Shut one of the interface")
- intf = topo['routers']['r0']['links']['r3-link0']['interface']
+ intf = topo["routers"]["r0"]["links"]["r3-link0"]["interface"]
shutdown_bringup_interface(tgen, dut, intf, False)
# step("verify that summary lsa is not refreshed.")
# show ip ospf database command is not working, waiting for DEV fix.
step("Delete OSPF process.")
- ospf_del = {
- "r0": {
- "ospf6": {
- "delete": True
- }
- }
- }
+ ospf_del = {"r0": {"ospf6": {"delete": True}}}
result = create_router_ospf(tgen, topo, ospf_del)
assert result is True, "Testcase : Failed \n Error: {}".format(result)
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
red_connected(dut)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
- "address on R0 and only one route is sent to R1.")
+ "address on R0 and only one route is sent to R1."
+ )
input_dict = {
SUMMARY["ipv6"][0]: {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
-
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# step("verify that summary lsa is not refreshed.")
# show ip ospf database command is not working, waiting for DEV fix.
step("Delete the redistribute command in ospf.")
- dut = 'r0'
+ dut = "r0"
red_connected(dut, config=False)
red_static(dut, config=False)
step("Verify that summary route is withdrawn from the peer.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "metric": "1234"
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "metric": "1234",
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
step("Configure OSPF on all the routers of the topology.")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
"Configure External Route summary in R0 to summarise 5"
- " routes to one route with no advertise option.")
+ " routes to one route with no advertise option."
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "advertise": False
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "advertise": False,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary"
" address on R0 and summary route is not advertised to neighbor as"
- " no advertise is configured..")
+ " no advertise is configured.."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict_summary,
- protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- step(
- "Verify that show ip ospf summary should show the "
- "configured summaries.")
+ step("Verify that show ip ospf summary should show the " "configured summaries.")
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Delete the configured summary")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "delete": True
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "delete": True,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Summary has 5 sec delay timer, sleep 5 secs...")
sleep(5)
step("Verify that summary lsa is withdrawn from R1 and deleted from R0.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary Route still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary Route still present in RIB".format(
+ tc_name
+ )
step("show ip ospf summary should not have any summary address.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 1234,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6', expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Summary still present in DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(
+ tgen, topo, dut, input_dict, ospf="ospf6", expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Summary still present in DB".format(tc_name)
step("Reconfigure summary with no advertise.")
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "advertise": False
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "advertise": False,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary"
" address on R0 and summary route is not advertised to neighbor as"
- " no advertise is configured..")
+ " no advertise is configured.."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict_summary,
- protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- step(
- "Verify that show ip ospf summary should show the "
- "configured summaries.")
+ step("Verify that show ip ospf summary should show the " "configured summaries.")
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step(
"Change summary address from no advertise to advertise "
- "(summary-address 10.0.0.0 255.255.0.0)")
+ "(summary-address 10.0.0.0 255.255.0.0)"
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "advertise": False
- }]
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "advertise": False,
+ }
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes is present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes is present in RIB".format(tc_name)
write_test_footer(tc_name)
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
- "Configure External Route summary in R0 to summarise 5"
- " routes to one route.")
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step(
"Configure route map and & rule to permit configured summary address,"
- " redistribute static & connected routes with the route map.")
+ " redistribute static & connected routes with the route map."
+ )
step("Configure prefixlist to permit the static routes, add to route map.")
# Create ip prefix list
pfx_list = {
"prefix_lists": {
"ipv6": {
"pf_list_1_ipv6": [
- {
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }
+ {"seqid": 10, "network": "any", "action": "permit"}
]
}
}
}
}
result = create_prefix_lists(tgen, pfx_list)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv6": [{
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
"action": "permit",
- "seq_id": '1',
- "match": {
- "ipv6": {
- "prefix_lists":
- "pf_list_1_ipv6"
- }
- }
- }]
- }
+ "seq_id": "1",
+ "match": {"ipv6": {"prefix_lists": "pf_list_1_ipv6"}},
+ }
+ ]
}
+ }
}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
ospf_red_r1 = {
"r0": {
"ospf6": {
- "redistribute": [{
- "redist_type": "static",
- "route_map": "rmap_ipv6"
- }]
+ "redistribute": [{"redist_type": "static", "route_map": "rmap_ipv6"}]
}
}
}
result = create_router_ospf(tgen, topo, ospf_red_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured"
"summary address on R0 and only one route is sent to R1. Verify that "
- "show ip ospf summary should show the configure summaries.")
+ "show ip ospf summary should show the configure summaries."
+ )
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
input_dict = {
SUMMARY["ipv6"][0]: {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Configure metric type as 1 in route map.")
-
routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv6": [{
- "seq_id": '1',
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
+ "seq_id": "1",
"action": "permit",
- "set":{
- "metric-type": "type-1"
- }
- }]
- }
+ "set": {"metric-type": "type-1"},
+ }
+ ]
}
+ }
}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes(static / connected) are summarised"
- " to configured summary address with metric type 2.")
+ " to configured summary address with metric type 2."
+ )
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Un configure metric type from route map.")
routemaps = {
- "r0": {
- "route_maps": {
- "rmap_ipv6": [{
+ "r0": {
+ "route_maps": {
+ "rmap_ipv6": [
+ {
"action": "permit",
- "seq_id": '1',
- "set":{
- "metric-type": "type-1",
- "delete": True
- }
- }]
- }
+ "seq_id": "1",
+ "set": {"metric-type": "type-1", "delete": True},
+ }
+ ]
}
+ }
}
result = create_route_maps(tgen, routemaps)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes(static / connected) are summarised"
- " to configured summary address with metric type 2.")
+ " to configured summary address with metric type 2."
+ )
input_dict = {
SUMMARY["ipv6"][0]: {
"Summary address": SUMMARY["ipv6"][0],
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
step("Change rule from permit to deny in prefix list.")
pfx_list = {
"prefix_lists": {
"ipv6": {
"pf_list_1_ipv6": [
- {
- "seqid": 10,
- "network": "any",
- "action": "deny"
- }
+ {"seqid": 10, "network": "any", "action": "deny"}
]
}
}
}
}
result = create_prefix_lists(tgen, pfx_list)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that previously originated summary lsa "
- "is withdrawn from the neighbor.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "is withdrawn from the neighbor."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
step("summary route has delay of 5 secs, wait for 5 secs")
sleep(5)
result = verify_ospf6_rib(tgen, dut, input_dict_summary, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict_summary, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
write_test_footer(tc_name)
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32",
- "tag": 4294967295
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "16",
- "advertise": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "32",
+ "tag": 4294967295,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure and re configure all the commands 10 times in a loop.")
- for itrate in range(0,10):
+ for itrate in range(0, 10):
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "8",
- "tag": 4294967295
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "16",
- "advertise": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False
- },
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ },
]
}
- }
+ }
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "8",
- "tag": 4294967295,
- "delete": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "16",
- "advertise": True,
- "delete": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False,
- "delete": True
- },
- {
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "24",
- "advertise": False,
- "delete": True
- },
+ "summary-address": [
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "8",
+ "tag": 4294967295,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "16",
+ "advertise": True,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True,
+ },
+ {
+ "prefix": SUMMARY["ipv6"][0].split("/")[0],
+ "mask": "24",
+ "advertise": False,
+ "delete": True,
+ },
]
}
+ }
}
- }
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify the show commands")
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 0
+ "External route count": 0,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
write_test_footer(tc_name)
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
- protocol = 'ospf'
+ protocol = "ospf"
step(
"Configure 5 static routes from the same network on R0"
- "5 static routes from different networks and redistribute in R0")
+ "5 static routes from different networks and redistribute in R0"
+ )
input_dict_static_rtes = {
"r0": {
"static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- },
- {
- "network": NETWORK2["ipv6"],
- "next_hop": "blackhole"
- }
+ {"network": NETWORK["ipv6"], "next_hop": "blackhole"},
+ {"network": NETWORK2["ipv6"], "next_hop": "blackhole"},
]
}
}
result = create_static_routes(tgen, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- dut = 'r0'
+ dut = "r0"
red_static(dut)
step("Verify that routes are learnt on R1.")
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_static_rtes)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_static_rtes, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_static_rtes, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step(
- "Configure External Route summary in R0 to summarise 5"
- " routes to one route.")
+ "Configure External Route summary in R0 to summarise 5" " routes to one route."
+ )
ospf_summ_r1 = {
"r0": {
"ospf6": {
- "summary-address": [{
- "prefix": SUMMARY["ipv6"][0].split('/')[0],
- "mask": "32"
- }]
+ "summary-address": [
+ {"prefix": SUMMARY["ipv6"][0].split("/")[0], "mask": "32"}
+ ]
}
}
}
result = create_router_ospf(tgen, topo, ospf_summ_r1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
- step('Reload the FRR router')
+ step("Reload the FRR router")
# stop/start -> restart FRR router and verify
- stop_router(tgen, 'r0')
- start_router(tgen, 'r0')
+ stop_router(tgen, "r0")
+ start_router(tgen, "r0")
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("Kill OSPF6d daemon on R0.")
kill_router_daemons(tgen, "r0", ["ospf6d"])
step("Verify OSPF neighbors are up after bringing back ospf6d in R0")
# Api call verify whether OSPF is converged
ospf_covergence = verify_ospf6_neighbor(tgen, topo)
- assert ospf_covergence is True, ("setup_module :Failed \n Error:"
- " {}".format(ospf_covergence))
+ assert ospf_covergence is True, "setup_module :Failed \n Error:" " {}".format(
+ ospf_covergence
+ )
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
step("restart zebrad")
kill_router_daemons(tgen, "r0", ["zebra"])
step(
"Verify that external routes are summarised to configured summary "
"address on R0 after 5 secs of delay timer expiry and only one "
- "route is sent to R1.")
- input_dict_summary = {
- "r0": {
- "static_routes": [{"network": SUMMARY["ipv6"][0]}]
- }
- }
- dut = 'r1'
+ "route is sent to R1."
+ )
+ input_dict_summary = {"r0": {"static_routes": [{"network": SUMMARY["ipv6"][0]}]}}
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict_summary)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- result = verify_rib(tgen, "ipv6", dut,
- input_dict_summary, protocol=protocol)
- assert result is True, "Testcase {} : Failed" \
- "Error: Routes is missing in RIB".format(tc_name)
+ result = verify_rib(tgen, "ipv6", dut, input_dict_summary, protocol=protocol)
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Routes is missing in RIB".format(tc_name)
step("Verify that show ip ospf summary should show the summaries.")
input_dict = {
"Metric-type": "E2",
"Metric": 20,
"Tag": 0,
- "External route count": 5
+ "External route count": 5,
}
}
- dut = 'r0'
- result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf='ospf6')
- assert result is True, "Testcase {} : Failed" \
- "Error: Summary missing in OSPF DB".format(tc_name)
+ dut = "r0"
+ result = verify_ospf_summary(tgen, topo, dut, input_dict, ospf="ospf6")
+ assert (
+ result is True
+ ), "Testcase {} : Failed" "Error: Summary missing in OSPF DB".format(tc_name)
- step(
- "Verify that originally advertised routes are withdraw from there"
- " peer.")
+ step("Verify that originally advertised routes are withdraw from there" " peer.")
input_dict = {
- "r0": {
- "static_routes": [
- {
- "network": NETWORK["ipv6"],
- "next_hop": "blackhole"
- }
- ]
- }
+ "r0": {"static_routes": [{"network": NETWORK["ipv6"], "next_hop": "blackhole"}]}
}
- dut = 'r1'
+ dut = "r1"
result = verify_ospf6_rib(tgen, dut, input_dict, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
- "Routes still present in OSPF RIB {}".format(tc_name, result)
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: " "Routes still present in OSPF RIB {}".format(
+ tc_name, result
+ )
- result = verify_rib(tgen, "ipv6", dut, input_dict, protocol=protocol,
- expected=False)
- assert result is not True, "Testcase {} : Failed" \
- "Error: Routes still present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv6", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed" "Error: Routes still present in RIB".format(tc_name)
write_test_footer(tc_name)
import sys
import time
import pytest
-import json
-from copy import deepcopy
-from ipaddress import IPv4Address
-from lib.topotest import frr_unicode
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
-import ipaddress
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
verify_rib,
create_static_routes,
step,
- create_route_maps,
shutdown_bringup_interface,
- create_interfaces_cfg,
topo_daemons,
get_frr_ipv6_linklocal,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf6_neighbor,
- config_ospf_interface,
- clear_ospf,
verify_ospf6_rib,
create_router_ospf,
- verify_ospf6_interface,
- verify_ospf6_database,
config_ospf6_interface,
)
-from ipaddress import IPv6Address
pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospfv3_ecmp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
NETWORK = {
"ipv4": [
"11.0.20.1/32",
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospfv3_ecmp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-import json
-from copy import deepcopy
-from ipaddress import IPv4Address
-from lib.topotest import frr_unicode
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
-import ipaddress
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
step,
create_route_maps,
verify_prefix_lists,
- get_frr_ipv6_linklocal,
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf6_neighbor,
- config_ospf_interface,
- clear_ospf,
verify_ospf6_rib,
create_router_ospf,
- verify_ospf6_interface,
- verify_ospf6_database,
- config_ospf6_interface,
)
-from ipaddress import IPv6Address
pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospfv3_routemaps.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
NETWORK = {
"ipv4": [
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospfv3_routemaps.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-import json
-from copy import deepcopy
-from ipaddress import IPv4Address
-from lib.topotest import frr_unicode
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
import ipaddress
-from lib.bgp import verify_bgp_convergence, create_router_bgp
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
write_test_footer,
reset_config_on_routers,
verify_rib,
- create_static_routes,
step,
- create_route_maps,
shutdown_bringup_interface,
create_interfaces_cfg,
topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf6_neighbor,
- config_ospf_interface,
- clear_ospf,
verify_ospf6_rib,
create_router_ospf,
verify_ospf6_interface,
- verify_ospf6_database,
config_ospf6_interface,
)
-from ipaddress import IPv6Address
pytestmark = [pytest.mark.ospfd, pytest.mark.staticd]
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospfv3_rte_calc.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
NETWORK = {
"ipv6": [
"11.0.20.1/32",
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospfv3_rte_calc.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
import sys
import time
import pytest
-import json
from copy import deepcopy
-from ipaddress import IPv4Address
from lib.topotest import frr_unicode
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
-import ipaddress
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
write_test_header,
write_test_footer,
reset_config_on_routers,
- verify_rib,
- create_static_routes,
step,
- create_route_maps,
- shutdown_bringup_interface,
create_interfaces_cfg,
- topo_daemons
+ topo_daemons,
)
from lib.topolog import logger
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.ospf import (
verify_ospf6_neighbor,
- config_ospf_interface,
clear_ospf,
- verify_ospf6_rib,
- create_router_ospf,
verify_ospf6_interface,
- verify_ospf6_database,
- config_ospf6_interface,
)
from ipaddress import IPv6Address
# Global variables
topo = None
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/ospfv3_single_area.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
"""
TOPOOLOGY =
Please view in a fixed-width font such as Courier.
"""
-class CreateTopo(Topo):
- """
- Test topology builder.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/ospfv3_single_area.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# get list of daemons needs to be started for this suite.
"""
import os
-import re
import sys
import pytest
import json
from lib.common_config import shutdown_bringup_interface
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.pbrd]
#####################################################
-class NetworkTopo(Topo):
- "PBR Topology 1"
+def build_topo(tgen):
+ "Build function"
- def build(self, **_opts):
- "Build function"
+ # Populate routers
+ for routern in range(1, 2):
+ tgen.add_router("r{}".format(routern))
- tgen = get_topogen(self)
-
- # Populate routers
- for routern in range(1, 2):
- tgen.add_router("r{}".format(routern))
-
- # Populate switches
- for switchn in range(1, 6):
- switch = tgen.add_switch("sw{}".format(switchn))
- switch.add_link(tgen.gears["r1"])
+ # Populate switches
+ for switchn in range(1, 6):
+ switch = tgen.add_switch("sw{}".format(switchn))
+ switch.add_link(tgen.gears["r1"])
#####################################################
def setup_module(module):
"Setup topology"
- tgen = Topogen(NetworkTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
krel = platform.release()
# R1 and R11 - R15.
# - test_pim_convergence()
# Wait for PIM convergence on all routers. PIM is run on
-# R1 and R11 - R15.
+# R1 and R11 - R15.
# - test_mcast_acl_1():
# Test 1st ACL entry 239.100.0.0/28 with 239.100.0.1 which
# should use R11 as RP
# shutdown topology
#
-
+# XXX clean up in later commit to avoid conflict on rebase
+# pylint: disable=C0413
TOPOLOGY = """
+----------+
| Host H2 |
import os
import sys
import pytest
-import re
-import time
-from time import sleep
-import socket
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
+from lib.pim import McastTesterHelper
pytestmark = [pytest.mark.pimd, pytest.mark.ospfd]
-#
-# Test global variables:
-# They are used to handle communicating with external application.
-#
-APP_SOCK_PATH = '/tmp/topotests/apps.sock'
-HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py")
-app_listener = None
-app_clients = {}
-
-def listen_to_applications():
- "Start listening socket to connect with applications."
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
-
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
- sock.bind(APP_SOCK_PATH)
- sock.listen(10)
- global app_listener
- app_listener = sock
-
-def accept_host(host):
- "Accept connection from application running in hosts."
- global app_listener, app_clients
- conn = app_listener.accept()
- app_clients[host] = {
- 'fd': conn[0],
- 'address': conn[1]
- }
-
-def close_applications():
- "Signal applications to stop and close all sockets."
- global app_listener, app_clients
-
- if app_listener:
- # Close listening socket.
- app_listener.close()
-
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
-
- # Close all host connections.
- for host in ["h1", "h2"]:
- if app_clients.get(host) is None:
- continue
- app_clients[host]["fd"].close()
-
- # Reset listener and clients data struct
- app_listener = None
- app_clients = {}
-
-
-class PIMACLTopo(Topo):
- "PIM ACL Test Topology"
-
- def build(self):
- tgen = get_topogen(self)
-
- # Create the hosts
- for hostNum in range(1,3):
- tgen.add_router("h{}".format(hostNum))
-
- # Create the main router
- tgen.add_router("r1")
-
- # Create the PIM RP routers
- for rtrNum in range(11, 16):
- tgen.add_router("r{}".format(rtrNum))
-
- # Setup Switches and connections
- for swNum in range(1, 3):
- tgen.add_switch("sw{}".format(swNum))
-
- # Add connections H1 to R1 switch sw1
- tgen.gears["h1"].add_link(tgen.gears["sw1"])
- tgen.gears["r1"].add_link(tgen.gears["sw1"])
-
- # Add connections R1 to R1x switch sw2
- tgen.gears["r1"].add_link(tgen.gears["sw2"])
- tgen.gears["h2"].add_link(tgen.gears["sw2"])
- tgen.gears["r11"].add_link(tgen.gears["sw2"])
- tgen.gears["r12"].add_link(tgen.gears["sw2"])
- tgen.gears["r13"].add_link(tgen.gears["sw2"])
- tgen.gears["r14"].add_link(tgen.gears["sw2"])
- tgen.gears["r15"].add_link(tgen.gears["sw2"])
+def build_topo(tgen):
+ for hostNum in range(1, 3):
+ tgen.add_router("h{}".format(hostNum))
+
+ # Create the main router
+ tgen.add_router("r1")
+
+ # Create the PIM RP routers
+ for rtrNum in range(11, 16):
+ tgen.add_router("r{}".format(rtrNum))
+
+ # Setup Switches and connections
+ for swNum in range(1, 3):
+ tgen.add_switch("sw{}".format(swNum))
+
+ # Add connections H1 to R1 switch sw1
+ tgen.gears["h1"].add_link(tgen.gears["sw1"])
+ tgen.gears["r1"].add_link(tgen.gears["sw1"])
+
+ # Add connections R1 to R1x switch sw2
+ tgen.gears["r1"].add_link(tgen.gears["sw2"])
+ tgen.gears["h2"].add_link(tgen.gears["sw2"])
+ tgen.gears["r11"].add_link(tgen.gears["sw2"])
+ tgen.gears["r12"].add_link(tgen.gears["sw2"])
+ tgen.gears["r13"].add_link(tgen.gears["sw2"])
+ tgen.gears["r14"].add_link(tgen.gears["sw2"])
+ tgen.gears["r15"].add_link(tgen.gears["sw2"])
#####################################################
#
#####################################################
+
def setup_module(module):
logger.info("PIM RP ACL Topology: \n {}".format(TOPOLOGY))
- tgen = Topogen(PIMACLTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
# Starting Routers
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
- if rname[0] != 'h':
+ if rname[0] != "h":
# Only load ospf on routers, not on end hosts
router.load_config(
TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
def teardown_module(module):
tgen = get_topogen()
tgen.stop_topology()
- close_applications()
def test_ospf_convergence():
assert res is None, assertmsg
-
def check_mcast_entry(entry, mcastaddr, pimrp):
"Helper function to check RP"
tgen = get_topogen()
- logger.info("Testing PIM RP selection for ACL {} entry using {}".format(entry, mcastaddr));
-
- # Start applications socket.
- listen_to_applications()
-
- tgen.gears["h2"].run("{} --send='0.7' '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, 'h2-eth0'))
- accept_host("h2")
+ logger.info(
+ "Testing PIM RP selection for ACL {} entry using {}".format(entry, mcastaddr)
+ )
- tgen.gears["h1"].run("{} '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, 'h1-eth0'))
- accept_host("h1")
+ with McastTesterHelper(tgen) as helper:
+ helper.run("h2", ["--send=0.7", mcastaddr, "h2-eth0"])
+ helper.run("h1", [mcastaddr, "h1-eth0"])
- logger.info("mcast join and source for {} started".format(mcastaddr))
+ logger.info("mcast join and source for {} started".format(mcastaddr))
- # tgen.mininet_cli()
+ # tgen.mininet_cli()
- router = tgen.gears["r1"]
- reffile = os.path.join(CWD, "r1/acl_{}_pim_join.json".format(entry))
- expected = json.loads(open(reffile).read())
+ router = tgen.gears["r1"]
+ reffile = os.path.join(CWD, "r1/acl_{}_pim_join.json".format(entry))
+ expected = json.loads(open(reffile).read())
- logger.info("verifying pim join on r1 for {}".format(mcastaddr))
- test_func = functools.partial(
- topotest.router_json_cmp, router, "show ip pim join json", expected
- )
- _, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
- assertmsg = "PIM router r1 did not show join status"
- assert res is None, assertmsg
+ logger.info("verifying pim join on r1 for {}".format(mcastaddr))
+ test_func = functools.partial(
+ topotest.router_json_cmp, router, "show ip pim join json", expected
+ )
+ _, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
+ assertmsg = "PIM router r1 did not show join status"
+ assert res is None, assertmsg
- logger.info("verifying pim join on PIM RP {} for {}".format(pimrp, mcastaddr))
- router = tgen.gears[pimrp]
- reffile = os.path.join(CWD, "{}/acl_{}_pim_join.json".format(pimrp, entry))
- expected = json.loads(open(reffile).read())
+ logger.info("verifying pim join on PIM RP {} for {}".format(pimrp, mcastaddr))
+ router = tgen.gears[pimrp]
+ reffile = os.path.join(CWD, "{}/acl_{}_pim_join.json".format(pimrp, entry))
+ expected = json.loads(open(reffile).read())
- test_func = functools.partial(
- topotest.router_json_cmp, router, "show ip pim join json", expected
- )
- _, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
- assertmsg = "PIM router {} did not get selected as the PIM RP".format(pimrp)
- assert res is None, assertmsg
+ test_func = functools.partial(
+ topotest.router_json_cmp, router, "show ip pim join json", expected
+ )
+ _, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
+ assertmsg = "PIM router {} did not get selected as the PIM RP".format(pimrp)
+ assert res is None, assertmsg
- close_applications()
return
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(1, '239.100.0.1', 'r11')
+ check_mcast_entry(1, "239.100.0.1", "r11")
def test_mcast_acl_2():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(2, '239.100.0.17', 'r12')
+ check_mcast_entry(2, "239.100.0.17", "r12")
def test_mcast_acl_3():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(3, '239.100.0.32', 'r13')
+ check_mcast_entry(3, "239.100.0.32", "r13")
def test_mcast_acl_4():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(4, '239.100.0.255', 'r14')
+ check_mcast_entry(4, "239.100.0.255", "r14")
def test_mcast_acl_5():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(5, '239.100.0.97', 'r14')
+ check_mcast_entry(5, "239.100.0.97", "r14")
def test_mcast_acl_6():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry(6, '239.100.0.70', 'r15')
+ check_mcast_entry(6, "239.100.0.70", "r15")
if __name__ == "__main__":
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.pimd]
-class PIMTopo(Topo):
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- for routern in range(1, 4):
- tgen.add_router("r{}".format(routern))
-
- tgen.add_router("rp")
-
- # rp ------ r1 -------- r2
- # \
- # --------- r3
- # r1 -> .1
- # r2 -> .2
- # rp -> .3
- # r3 -> .4
- # loopback network is 10.254.0.X/32
- #
- # r1 <- sw1 -> r2
- # r1-eth0 <-> r2-eth0
- # 10.0.20.0/24
- sw = tgen.add_switch("sw1")
- sw.add_link(tgen.gears["r1"])
- sw.add_link(tgen.gears["r2"])
-
- # r1 <- sw2 -> rp
- # r1-eth1 <-> rp-eth0
- # 10.0.30.0/24
- sw = tgen.add_switch("sw2")
- sw.add_link(tgen.gears["r1"])
- sw.add_link(tgen.gears["rp"])
-
- # 10.0.40.0/24
- sw = tgen.add_switch("sw3")
- sw.add_link(tgen.gears["r1"])
- sw.add_link(tgen.gears["r3"])
+def build_topo(tgen):
+ "Build function"
+
+ for routern in range(1, 4):
+ tgen.add_router("r{}".format(routern))
+
+ tgen.add_router("rp")
+
+ # rp ------ r1 -------- r2
+ # \
+ # --------- r3
+ # r1 -> .1
+ # r2 -> .2
+ # rp -> .3
+ # r3 -> .4
+ # loopback network is 10.254.0.X/32
+ #
+ # r1 <- sw1 -> r2
+ # r1-eth0 <-> r2-eth0
+ # 10.0.20.0/24
+ sw = tgen.add_switch("sw1")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r2"])
+
+ # r1 <- sw2 -> rp
+ # r1-eth1 <-> rp-eth0
+ # 10.0.30.0/24
+ sw = tgen.add_switch("sw2")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["rp"])
+
+ # 10.0.40.0/24
+ sw = tgen.add_switch("sw3")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r3"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(PIMTopo, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
# For all registered routers, load the zebra configuration file
r1 = tgen.gears["r1"]
# Let's send a igmp report from r2->r1
- CWD = os.path.dirname(os.path.realpath(__file__))
- r2.run("{}/mcast-rx.py 229.1.1.2 r2-eth0 &".format(CWD))
-
- out = r1.vtysh_cmd("show ip pim upstream json", isjson=True)
- expected = {
- "229.1.1.2": {
- "*": {
- "sourceIgmp": 1,
- "joinState": "Joined",
- "regState": "RegNoInfo",
- "sptBit": 0,
+ cmd = [os.path.join(CWD, "mcast-rx.py"), "229.1.1.2", "r2-eth0"]
+ p = r2.popen(cmd)
+ try:
+ expected = {
+ "229.1.1.2": {
+ "*": {
+ "sourceIgmp": 1,
+ "joinState": "Joined",
+ "regState": "RegNoInfo",
+ "sptBit": 0,
+ }
}
}
- }
-
- assert topotest.json_cmp(out, expected) is None, "failed to converge pim"
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip pim upstream json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=5, wait=0.5)
+ assertmsg = '"{}" JSON output mismatches'.format(r1.name)
+ assert result is None, assertmsg
+ finally:
+ if p:
+ p.terminate()
+ p.wait()
def test_memory_leak():
import os
import sys
-import json
from functools import partial
import pytest
from lib.topolog import logger
# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bfdd, pytest.mark.pimd]
-class PimBasicTopo2(Topo):
- "Test topology builder"
+def build_topo(tgen):
+ "Build function"
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
+ # Create 4 routers
+ for routern in range(1, 5):
+ tgen.add_router("r{}".format(routern))
- # Create 4 routers
- for routern in range(1, 5):
- tgen.add_router("r{}".format(routern))
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r3"])
-
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r2"])
- switch.add_link(tgen.gears["r4"])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(PimBasicTopo2, mod.__name__)
+ tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
topotest.router_json_cmp,
tgen.gears[router],
"show ip pim neighbor json",
- {interface: {peer: {}}}
+ {interface: {peer: {}}},
)
_, result = topotest.run_and_expect(test_func, None, count=130, wait=1)
assertmsg = '"{}" PIM convergence failure'.format(router)
logger.info("waiting for PIM to converge")
- expect_neighbor('r1', 'r1-eth0', '192.168.1.2')
- expect_neighbor('r2', 'r2-eth0', '192.168.1.1')
+ expect_neighbor("r1", "r1-eth0", "192.168.1.2")
+ expect_neighbor("r2", "r2-eth0", "192.168.1.1")
- expect_neighbor('r2', 'r2-eth1', '192.168.2.3')
- expect_neighbor('r2', 'r2-eth2', '192.168.3.4')
+ expect_neighbor("r2", "r2-eth1", "192.168.2.3")
+ expect_neighbor("r2", "r2-eth2", "192.168.3.4")
- expect_neighbor('r3', 'r3-eth0', '192.168.2.1')
- expect_neighbor('r4', 'r4-eth0', '192.168.3.1')
+ expect_neighbor("r3", "r3-eth0", "192.168.2.1")
+ expect_neighbor("r4", "r4-eth0", "192.168.3.1")
def test_bfd_peers():
topotest.router_json_cmp,
tgen.gears[router],
"show bfd peers json",
- [{"peer": peer, "status": "up"}]
+ [{"peer": peer, "status": "up"}],
)
_, result = topotest.run_and_expect(test_func, None, count=10, wait=1)
assertmsg = '"{}" BFD convergence failure'.format(router)
topotest.router_json_cmp,
tgen.gears[router],
"show ip pim neighbor json",
- {interface: {peer: None}}
+ {interface: {peer: None}},
)
_, result = topotest.run_and_expect(test_func, None, count=4, wait=1)
assertmsg = '"{}" PIM convergence failure'.format(router)
topotest.router_json_cmp,
tgen.gears[router],
"show bfd peers json",
- [settings]
+ [settings],
)
_, result = topotest.run_and_expect(test_func, None, count=4, wait=1)
assertmsg = '"{}" BFD convergence failure'.format(router)
assert result is None, assertmsg
- expect_bfd_peer_settings("r1", {
- "peer": "192.168.1.2",
- "receive-interval": 250,
- "transmit-interval": 250,
- })
-
- expect_bfd_peer_settings("r2", {
- "peer": "192.168.1.1",
- "remote-receive-interval": 250,
- "remote-transmit-interval": 250,
- })
+ expect_bfd_peer_settings(
+ "r1",
+ {
+ "peer": "192.168.1.2",
+ "receive-interval": 250,
+ "transmit-interval": 250,
+ },
+ )
+
+ expect_bfd_peer_settings(
+ "r2",
+ {
+ "peer": "192.168.1.1",
+ "remote-receive-interval": 250,
+ "remote-transmit-interval": 250,
+ },
+ )
def test_memory_leak():
test_pim_vrf.py: Test PIM with VRFs.
"""
+# XXX clean up in later commit to avoid conflict on rebase
+# pylint: disable=C0413
+
# Tests PIM with VRF
#
# R1 is split into 2 VRF: Blue and Red, the others are normal
# routers and Hosts
# There are 2 similar topologies with overlapping IPs in each
-# section.
+# section.
#
# Test steps:
# - setup_module()
# R1, R11 and R12. R11 is the RP for vrf blue, R12 is RP
# for vrf red.
# - test_vrf_pimreg_interfaces()
-# Adding PIM RP in VRF information and verify pimreg
+# Adding PIM RP in VRF information and verify pimreg
# interfaces in VRF blue and red
# - test_mcast_vrf_blue()
-# Start multicast stream for group 239.100.0.1 from Host
+# Start multicast stream for group 239.100.0.1 from Host
# H2 and join from Host H1 on vrf blue
# Verify PIM JOIN status on R1 and R11
# Stop multicast after verification
# - test_mcast_vrf_red()
-# Start multicast stream for group 239.100.0.1 from Host
+# Start multicast stream for group 239.100.0.1 from Host
# H4 and join from Host H3 on vrf blue
# Verify PIM JOIN status on R1 and R12
# Stop multicast after verification
import os
import sys
import pytest
-import re
-import time
-from time import sleep
-import socket
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
from lib.topotest import iproute2_is_vrf_capable
-from lib.common_config import (
- required_linux_kernel_version)
+from lib.common_config import required_linux_kernel_version
+from lib.pim import McastTesterHelper
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.ospfd, pytest.mark.pimd]
-#
-# Test global variables:
-# They are used to handle communicating with external application.
-#
-APP_SOCK_PATH = '/tmp/topotests/apps.sock'
-HELPER_APP_PATH = os.path.join(CWD, "../lib/mcast-tester.py")
-app_listener = None
-app_clients = {}
-
-def listen_to_applications():
- "Start listening socket to connect with applications."
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
-
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
- sock.bind(APP_SOCK_PATH)
- sock.listen(10)
- global app_listener
- app_listener = sock
-
-def accept_host(host):
- "Accept connection from application running in hosts."
- global app_listener, app_clients
- conn = app_listener.accept()
- app_clients[host] = {
- 'fd': conn[0],
- 'address': conn[1]
- }
-
-def close_applications():
- "Signal applications to stop and close all sockets."
- global app_listener, app_clients
-
- if app_listener:
- # Close listening socket.
- app_listener.close()
-
- # Remove old socket.
- try:
- os.unlink(APP_SOCK_PATH)
- except OSError:
- pass
-
- # Close all host connections.
- for host in ["h1", "h2"]:
- if app_clients.get(host) is None:
- continue
- app_clients[host]["fd"].close()
-
- # Reset listener and clients data struct
- app_listener = None
- app_clients = {}
-
-
-class PIMVRFTopo(Topo):
- "PIM VRF Test Topology"
-
- def build(self):
- tgen = get_topogen(self)
-
- # Create the hosts
- for hostNum in range(1,5):
- tgen.add_router("h{}".format(hostNum))
-
- # Create the main router
- tgen.add_router("r1")
-
- # Create the PIM RP routers
- for rtrNum in range(11, 13):
- tgen.add_router("r{}".format(rtrNum))
-
- # Setup Switches and connections
- for swNum in range(1, 5):
- tgen.add_switch("sw{}".format(swNum))
-
- ################
- # 1st set of connections to routers for VRF red
- ################
-
- # Add connections H1 to R1 switch sw1
- tgen.gears["h1"].add_link(tgen.gears["sw1"])
- tgen.gears["r1"].add_link(tgen.gears["sw1"])
-
- # Add connections R1 to R1x switch sw2
- tgen.gears["r1"].add_link(tgen.gears["sw2"])
- tgen.gears["h2"].add_link(tgen.gears["sw2"])
- tgen.gears["r11"].add_link(tgen.gears["sw2"])
-
- ################
- # 2nd set of connections to routers for vrf blue
- ################
-
- # Add connections H1 to R1 switch sw1
- tgen.gears["h3"].add_link(tgen.gears["sw3"])
- tgen.gears["r1"].add_link(tgen.gears["sw3"])
-
- # Add connections R1 to R1x switch sw2
- tgen.gears["r1"].add_link(tgen.gears["sw4"])
- tgen.gears["h4"].add_link(tgen.gears["sw4"])
- tgen.gears["r12"].add_link(tgen.gears["sw4"])
+def build_topo(tgen):
+ for hostNum in range(1, 5):
+ tgen.add_router("h{}".format(hostNum))
+
+ # Create the main router
+ tgen.add_router("r1")
+
+ # Create the PIM RP routers
+ for rtrNum in range(11, 13):
+ tgen.add_router("r{}".format(rtrNum))
+
+ # Setup Switches and connections
+ for swNum in range(1, 5):
+ tgen.add_switch("sw{}".format(swNum))
+
+ ################
+ # 1st set of connections to routers for VRF red
+ ################
+
+ # Add connections H1 to R1 switch sw1
+ tgen.gears["h1"].add_link(tgen.gears["sw1"])
+ tgen.gears["r1"].add_link(tgen.gears["sw1"])
+
+ # Add connections R1 to R1x switch sw2
+ tgen.gears["r1"].add_link(tgen.gears["sw2"])
+ tgen.gears["h2"].add_link(tgen.gears["sw2"])
+ tgen.gears["r11"].add_link(tgen.gears["sw2"])
+
+ ################
+ # 2nd set of connections to routers for vrf blue
+ ################
+
+ # Add connections H1 to R1 switch sw1
+ tgen.gears["h3"].add_link(tgen.gears["sw3"])
+ tgen.gears["r1"].add_link(tgen.gears["sw3"])
+
+ # Add connections R1 to R1x switch sw2
+ tgen.gears["r1"].add_link(tgen.gears["sw4"])
+ tgen.gears["h4"].add_link(tgen.gears["sw4"])
+ tgen.gears["r12"].add_link(tgen.gears["sw4"])
+
#####################################################
#
#
#####################################################
+
def setup_module(module):
logger.info("PIM IGMP VRF Topology: \n {}".format(TOPOLOGY))
- tgen = Topogen(PIMVRFTopo, module.__name__)
+ tgen = Topogen(build_topo, module.__name__)
tgen.start_topology()
vrf_setup_cmds = [
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
- if rname[0] != 'h':
+ if rname[0] != "h":
# Only load ospf on routers, not on end hosts
router.load_config(
TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
router.load_config(
TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname))
)
+
tgen.start_router()
def teardown_module(module):
tgen = get_topogen()
tgen.stop_topology()
- close_applications()
def test_ospf_convergence():
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, router, "show ip ospf vrf blue neighbor json", expected
+ topotest.router_json_cmp,
+ router,
+ "show ip ospf vrf blue neighbor json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=60, wait=2)
assertmsg = "OSPF router R1 did not converge on VRF blue"
reffile = os.path.join(CWD, "r1/pim_blue_pimreg11.json")
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, r1, "show ip pim vrf blue inter pimreg11 json", expected
+ topotest.router_json_cmp,
+ r1,
+ "show ip pim vrf blue inter pimreg11 json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=5, wait=2)
assertmsg = "PIM router R1, VRF blue (table 11) pimreg11 interface missing or incorrect status"
reffile = os.path.join(CWD, "r1/pim_red_pimreg12.json")
expected = json.loads(open(reffile).read())
test_func = functools.partial(
- topotest.router_json_cmp, r1, "show ip pim vrf red inter pimreg12 json", expected
+ topotest.router_json_cmp,
+ r1,
+ "show ip pim vrf red inter pimreg12 json",
+ expected,
)
_, res = topotest.run_and_expect(test_func, None, count=5, wait=2)
assertmsg = "PIM router R1, VRF red (table 12) pimreg12 interface missing or incorrect status"
### Test PIM / IGMP with VRF
##################################
+
def check_mcast_entry(mcastaddr, pimrp, receiver, sender, vrf):
"Helper function to check RP"
tgen = get_topogen()
- logger.info("Testing PIM for VRF {} entry using {}".format(vrf, mcastaddr));
+ logger.info("Testing PIM for VRF {} entry using {}".format(vrf, mcastaddr))
- # Start applications socket.
- listen_to_applications()
+ with McastTesterHelper(tgen) as helper:
+ helper.run(sender, ["--send=0.7", mcastaddr, str(sender) + "-eth0"])
+ helper.run(receiver, [mcastaddr, str(receiver) + "-eth0"])
- tgen.gears[sender].run("{} --send='0.7' '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, '{}-eth0'.format(sender)))
- accept_host(sender)
+ logger.info("mcast join and source for {} started".format(mcastaddr))
- tgen.gears[receiver].run("{} '{}' '{}' '{}' &".format(
- HELPER_APP_PATH, APP_SOCK_PATH, mcastaddr, '{}-eth0'.format(receiver)))
- accept_host(receiver)
+ router = tgen.gears["r1"]
+ reffile = os.path.join(CWD, "r1/pim_{}_join.json".format(vrf))
+ expected = json.loads(open(reffile).read())
- logger.info("mcast join and source for {} started".format(mcastaddr))
-
- # tgen.mininet_cli()
-
- router = tgen.gears["r1"]
- reffile = os.path.join(CWD, "r1/pim_{}_join.json".format(vrf))
- expected = json.loads(open(reffile).read())
-
- logger.info("verifying pim join on r1 for {} on VRF {}".format(mcastaddr, vrf))
- test_func = functools.partial(
- topotest.router_json_cmp, router, "show ip pim vrf {} join json".format(vrf),
- expected
- )
- _, res = topotest.run_and_expect(test_func, None, count=10, wait=2)
- assertmsg = "PIM router r1 did not show join status on VRF".format(vrf)
- assert res is None, assertmsg
-
- logger.info("verifying pim join on PIM RP {} for {}".format(pimrp, mcastaddr))
- router = tgen.gears[pimrp]
- reffile = os.path.join(CWD, "{}/pim_{}_join.json".format(pimrp, vrf))
- expected = json.loads(open(reffile).read())
+ logger.info("verifying pim join on r1 for {} on VRF {}".format(mcastaddr, vrf))
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip pim vrf {} join json".format(vrf),
+ expected,
+ )
+ _, res = topotest.run_and_expect(test_func, None, count=10, wait=2)
+ assertmsg = "PIM router r1 did not show join status on VRF {}".format(vrf)
+ assert res is None, assertmsg
- test_func = functools.partial(
- topotest.router_json_cmp, router, "show ip pim join json", expected
- )
- _, res = topotest.run_and_expect(test_func, None, count=10, wait=2)
- assertmsg = "PIM router {} did not get selected as the PIM RP for VRF {}".format(pimrp, vrf)
- assert res is None, assertmsg
+ logger.info("verifying pim join on PIM RP {} for {}".format(pimrp, mcastaddr))
+ router = tgen.gears[pimrp]
+ reffile = os.path.join(CWD, "{}/pim_{}_join.json".format(pimrp, vrf))
+ expected = json.loads(open(reffile).read())
- close_applications()
- return
+ test_func = functools.partial(
+ topotest.router_json_cmp, router, "show ip pim join json", expected
+ )
+ _, res = topotest.run_and_expect(test_func, None, count=10, wait=2)
+ assertmsg = (
+ "PIM router {} did not get selected as the PIM RP for VRF {}".format(
+ pimrp, vrf
+ )
+ )
+ assert res is None, assertmsg
def test_mcast_vrf_blue():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry('239.100.0.1', 'r11', 'h1', 'h2', 'blue')
+ check_mcast_entry("239.100.0.1", "r11", "h1", "h2", "blue")
def test_mcast_vrf_red():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- check_mcast_entry('239.100.0.1', 'r12', 'h3', 'h4', 'red')
+ check_mcast_entry("239.100.0.1", "r12", "h3", "h4", "red")
if __name__ == "__main__":
# Skip pytests example directory
[pytest]
+
+# We always turn this on inside conftest.py, default shown
+# addopts = --junitxml=<rundir>/topotests.xml
+
+log_level = DEBUG
+log_format = %(asctime)s,%(msecs)03d %(levelname)s: %(name)s: %(message)s
+log_date_format = %Y-%m-%d %H:%M:%S
+
+# If verbose is specifyied log_cli will be set to 1, it can also be specified
+# here or on the CLI.
+# log_cli = 1
+log_cli_level = INFO
+log_cli_format = %(asctime)s,%(msecs)03d %(levelname)s: %(name)s: %(message)s
+log_cli_date_format = %Y-%m-%d %H:%M:%S
+
+# By default this is palced in <rundir>/exec.log
+# log_file = <rundir>/exec.log
+log_file_level = DEBUG
+log_file_format = %(asctime)s,%(msecs)03d %(levelname)s: %(name)s: %(message)s
+log_file_date_format = %Y-%m-%d %H:%M:%S
+
+junit_logging = all
+junit_log_passing_tests = true
+
norecursedirs = .git example_test example_topojson_test lib docker
+# Directory to store test results and run logs in, default shown
+# rundir = /tmp/topotests
+
# Markers
#
# Please consult the documentation and discuss with TSC members before applying
# memleak_path = /tmp/memleak_
# Output files will be named after the testname:
# /tmp/memleak_test_ospf_topo1.txt
-#memleak_path =
+memleak_path = /tmp/memleak_
import pytest
from time import sleep
-from mininet.topo import Topo
-from mininet.net import Mininet
-from mininet.node import Node, OVSSwitch, Host
-from mininet.log import setLogLevel, info
-from mininet.cli import CLI
-from mininet.link import Intf
-
-from functools import partial
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
+from lib.topogen import Topogen, get_topogen
fatal_error = ""
#####################################################
-class NetworkTopo(Topo):
- "RIP Topology 1"
-
- def build(self, **_opts):
-
- # Setup Routers
- router = {}
- #
- # Setup Main Router
- router[1] = topotest.addRouter(self, "r1")
- #
- # Setup RIP Routers
- for i in range(2, 4):
- router[i] = topotest.addRouter(self, "r%s" % i)
- #
- # Setup Switches
- switch = {}
- #
- # On main router
- # First switch is for a dummy interface (for local network)
- switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
- self.addLink(switch[1], router[1], intfName2="r1-eth0")
- #
- # Switches for RIP
- # switch 2 switch is for connection to RIP router
- switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch)
- self.addLink(switch[2], router[1], intfName2="r1-eth1")
- self.addLink(switch[2], router[2], intfName2="r2-eth0")
- # switch 3 is between RIP routers
- switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch)
- self.addLink(switch[3], router[2], intfName2="r2-eth1")
- self.addLink(switch[3], router[3], intfName2="r3-eth1")
- # switch 4 is stub on remote RIP router
- switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch)
- self.addLink(switch[4], router[3], intfName2="r3-eth0")
-
- switch[5] = self.addSwitch("sw5", cls=topotest.LegacySwitch)
- self.addLink(switch[5], router[1], intfName2="r1-eth2")
-
- switch[6] = self.addSwitch("sw6", cls=topotest.LegacySwitch)
- self.addLink(switch[6], router[1], intfName2="r1-eth3")
+def build_topo(tgen):
+ # Setup RIP Routers
+ for i in range(1, 4):
+ tgen.add_router("r%s" % i)
+
+ #
+ # On main router
+ # First switch is for a dummy interface (for local network)
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["r1"])
+ #
+ # Switches for RIP
+
+ # switch 2 switch is for connection to RIP router
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ # switch 3 is between RIP routers
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"], nodeif="r3-eth1")
+
+ # switch 4 is stub on remote RIP router
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-eth0")
+
+ switch = tgen.add_switch("sw5")
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("sw6")
+ switch.add_link(tgen.gears["r1"])
#####################################################
def setup_module(module):
- global topo, net
-
print("\n\n** %s: Setup Topology" % module.__name__)
print("******************************************\n")
- print("Cleanup old Mininet runs")
- os.system("sudo mn -c > /dev/null 2>&1")
-
thisDir = os.path.dirname(os.path.realpath(__file__))
- topo = NetworkTopo()
+ tgen = Topogen(build_topo, module.__name__)
+ tgen.start_topology()
- net = Mininet(controller=None, topo=topo)
- net.start()
+ net = tgen.net
# Starting Routers
#
for i in range(1, 4):
net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
net["r%s" % i].loadConf("ripd", "%s/r%s/ripd.conf" % (thisDir, i))
- net["r%s" % i].startRouter()
+ tgen.gears["r%s" % i].start()
# For debugging after starting FRR daemons, uncomment the next line
# CLI(net)
def teardown_module(module):
- global net
-
print("\n\n** %s: Shutdown Topology" % module.__name__)
print("******************************************\n")
-
- # End - Shutdown network
- net.stop()
+ tgen = get_topogen()
+ tgen.stop_topology()
def test_router_running():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_converge_protocols():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
print("******************************************\n")
# Not really implemented yet - just sleep 11 secs for now
- sleep(11)
+ sleep(21)
# Make sure that all daemons are still running
for i in range(1, 4):
def test_rip_status():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_rip_routes():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_zebra_ipv4_routingTable():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_shutdown_check_stderr():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
if __name__ == "__main__":
- setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
import re
import sys
import pytest
-import unicodedata
from time import sleep
-from mininet.topo import Topo
-from mininet.net import Mininet
-from mininet.node import Node, OVSSwitch, Host
-from mininet.log import setLogLevel, info
-from mininet.cli import CLI
-from mininet.link import Intf
-
-from functools import partial
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lib import topotest
+from lib.topogen import Topogen, get_topogen
fatal_error = ""
#####################################################
-class NetworkTopo(Topo):
- "RIPng Topology 1"
-
- def build(self, **_opts):
-
- # Setup Routers
- router = {}
- #
- # Setup Main Router
- router[1] = topotest.addRouter(self, "r1")
- #
- # Setup RIPng Routers
- for i in range(2, 4):
- router[i] = topotest.addRouter(self, "r%s" % i)
-
- # Setup Switches
- switch = {}
- #
- # On main router
- # First switch is for a dummy interface (for local network)
- switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
- self.addLink(switch[1], router[1], intfName2="r1-eth0")
- #
- # Switches for RIPng
- # switch 2 switch is for connection to RIP router
- switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch)
- self.addLink(switch[2], router[1], intfName2="r1-eth1")
- self.addLink(switch[2], router[2], intfName2="r2-eth0")
- # switch 3 is between RIP routers
- switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch)
- self.addLink(switch[3], router[2], intfName2="r2-eth1")
- self.addLink(switch[3], router[3], intfName2="r3-eth1")
- # switch 4 is stub on remote RIP router
- switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch)
- self.addLink(switch[4], router[3], intfName2="r3-eth0")
-
- switch[5] = self.addSwitch("sw5", cls=topotest.LegacySwitch)
- self.addLink(switch[5], router[1], intfName2="r1-eth2")
- switch[6] = self.addSwitch("sw6", cls=topotest.LegacySwitch)
- self.addLink(switch[6], router[1], intfName2="r1-eth3")
+def build_topo(tgen):
+ # Setup RIPng Routers
+ for i in range(1, 4):
+ tgen.add_router("r%s" % i)
+
+ #
+ # On main router
+ # First switch is for a dummy interface (for local network)
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["r1"])
+ #
+ # Switches for RIPng
+ # switch 2 switch is for connection to RIP router
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ # switch 3 is between RIP routers
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"], nodeif="r3-eth1")
+ # switch 4 is stub on remote RIP router
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-eth0")
+
+ switch = tgen.add_switch("sw5")
+ switch.add_link(tgen.gears["r1"])
+ switch = tgen.add_switch("sw6")
+ switch.add_link(tgen.gears["r1"])
#####################################################
def setup_module(module):
- global topo, net
-
print("\n\n** %s: Setup Topology" % module.__name__)
print("******************************************\n")
- print("Cleanup old Mininet runs")
- os.system("sudo mn -c > /dev/null 2>&1")
-
thisDir = os.path.dirname(os.path.realpath(__file__))
- topo = NetworkTopo()
+ tgen = Topogen(build_topo, module.__name__)
+ tgen.start_topology()
- net = Mininet(controller=None, topo=topo)
- net.start()
+ net = tgen.net
# Starting Routers
#
for i in range(1, 4):
net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
net["r%s" % i].loadConf("ripngd", "%s/r%s/ripngd.conf" % (thisDir, i))
- net["r%s" % i].startRouter()
+ tgen.gears["r%s" % i].start()
# For debugging after starting FRR daemons, uncomment the next line
# CLI(net)
def teardown_module(module):
- global net
-
print("\n\n** %s: Shutdown Topology" % module.__name__)
print("******************************************\n")
-
- # End - Shutdown network
- net.stop()
+ tgen = get_topogen()
+ tgen.stop_topology()
def test_router_running():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_converge_protocols():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_ripng_status():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_ripng_routes():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_zebra_ipv6_routingTable():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_shutdown_check_stderr():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
def test_shutdown_check_memleak():
global fatal_error
- global net
+ net = get_topogen().net
# Skip if previous fatal error condition is raised
if fatal_error != "":
if __name__ == "__main__":
- setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
--- /dev/null
+#!/usr/bin/env python
+
+#
+# scale_test_common.py
+#
+# Copyright (c) 2020 by
+# Cumulus Networks, Inc.
+# Donald Sharp
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+scale_test_common.py: Common routines for testing route scale
+
+"""
+
+import os
+import re
+import sys
+import pytest
+import json
+from functools import partial
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+
+#####################################################
+##
+## Network Topology Definition
+##
+#####################################################
+
+
+def scale_build_common(tgen):
+ "Build function"
+
+ # Populate routers
+ for routern in range(1, 2):
+ tgen.add_router("r{}".format(routern))
+
+ # Populate switches
+ for switchn in range(1, 33):
+ switch = tgen.add_switch("sw{}".format(switchn))
+ switch.add_link(tgen.gears["r1"])
+
+
+def scale_setup_module(module):
+ "Setup topology"
+ tgen = Topogen(scale_build_common, module.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.items():
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
+
+ tgen.start_router()
+ # tgen.mininet_cli()
+
+
+def scale_teardown_module(_mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+
+ # This function tears down the whole topology.
+ tgen.stop_topology()
+
+
+def scale_converge_protocols():
+ "Wait for protocol convergence"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+
+def run_one_setup(r1, s):
+ "Run one ecmp config"
+
+ # Extract params
+ expected_installed = s["expect_in"]
+ expected_removed = s["expect_rem"]
+
+ retries = s["retries"]
+ wait = s["wait"]
+
+ for d in expected_installed["routes"]:
+ if d["type"] == "sharp":
+ count = d["rib"]
+ break
+
+ logger.info("Testing {} routes X {} ecmp".format(count, s["ecmp"]))
+
+ r1.vtysh_cmd(
+ "sharp install route 1.0.0.0 \
+ nexthop-group {} {}".format(
+ s["nhg"], count
+ ),
+ isjson=False,
+ )
+
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route summary json", expected_installed
+ )
+ success, result = topotest.run_and_expect(test_func, None, retries, wait)
+ assert success, "Route scale test install failed:\n{}".format(result)
+
+ output = r1.vtysh_cmd("sharp data route", isjson=False)
+ logger.info("{} routes X {} ecmp installed".format(count, s["ecmp"]))
+ logger.info(output)
+ r1.vtysh_cmd("sharp remove route 1.0.0.0 {}".format(count), isjson=False)
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route summary json", expected_removed
+ )
+ success, result = topotest.run_and_expect(test_func, None, retries, wait)
+ assert success, "Route scale test remove failed:\n{}".format(result)
+
+ output = r1.vtysh_cmd("sharp data route", isjson=False)
+ logger.info("{} routes x {} ecmp removed".format(count, s["ecmp"]))
+ logger.info(output)
+
+
+def route_install_helper(iter):
+ "Test route install for a variety of ecmp"
+
+ tgen = get_topogen()
+ # Don't run this test if we have any failure.
+ if tgen.routers_have_failure():
+ pytest.skip(tgen.errors)
+
+ r1 = tgen.gears["r1"]
+
+ # Avoid top ecmp case for runs with < 4G memory
+ output = tgen.net.cmd_raises("free")
+ m = re.search("Mem:\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)", output)
+ total_mem = int(m.group(2))
+ if total_mem < 4000000 and iter == 5:
+ logger.info(
+ "Limited memory available: {}, skipping x32 testcase".format(total_mem)
+ )
+ return;
+
+ installed_file = "{}/r1/installed.routes.json".format(CWD)
+ expected_installed = json.loads(open(installed_file).read())
+
+ removed_file = "{}/r1/no.routes.json".format(CWD)
+ expected_removed = json.loads(open(removed_file).read())
+
+ # dict keys of params: ecmp number, corresponding nhg name, timeout,
+ # number of times to wait
+ scale_keys = ["ecmp", "nhg", "wait", "retries", "expect_in", "expect_rem"]
+
+ # Table of defaults, used for timeout values and 'expected' objects
+ scale_defaults = dict(
+ zip(scale_keys, [None, None, 7, 30, expected_installed, expected_removed])
+ )
+
+ # List of params for each step in the test; note extra time given
+ # for the highest ecmp steps. Executing 'show' at scale can be costly
+ # so we widen the interval there too.
+ scale_steps = [
+ [1, "one"],
+ [2, "two"],
+ [4, "four"],
+ [8, "eight"],
+ [16, "sixteen", 10, 40],
+ [32, "thirtytwo", 10, 40],
+ ]
+
+ # Build up a list of dicts with params for each step of the test;
+ # use defaults where the step doesn't supply a value
+ scale_setups = []
+ s = scale_steps[iter]
+
+ d = dict(zip(scale_keys, s))
+ for k in scale_keys:
+ if k not in d:
+ d[k] = scale_defaults[k]
+
+ run_one_setup(r1, d)
+
+
+# Mem leak testcase
+def scale_test_memory_leak():
+ "Run the memory leak test and report results."
+ tgen = get_topogen()
+ if not tgen.is_memleak_enabled():
+ pytest.skip("Memory leak test/report is disabled")
+ tgen.report_memory_leaks()
+++ /dev/null
-#!/usr/bin/env python
-
-#
-# test_route_scale.py
-#
-# Copyright (c) 2020 by
-# Cumulus Networks, Inc.
-# Donald Sharp
-#
-# Permission to use, copy, modify, and/or distribute this software
-# for any purpose with or without fee is hereby granted, provided
-# that the above copyright notice and this permission notice appear
-# in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-#
-
-"""
-test_route_scale.py: Testing route scale
-
-"""
-
-import os
-import re
-import sys
-import pytest
-import json
-from functools import partial
-
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
-from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from lib.common_config import shutdown_bringup_interface
-
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
-
-pytestmark = [pytest.mark.sharpd]
-
-
-#####################################################
-##
-## Network Topology Definition
-##
-#####################################################
-
-
-class NetworkTopo(Topo):
- "Route Scale Topology"
-
- def build(self, **_opts):
- "Build function"
-
- tgen = get_topogen(self)
-
- # Populate routers
- for routern in range(1, 2):
- tgen.add_router("r{}".format(routern))
-
- # Populate switches
- for switchn in range(1, 33):
- switch = tgen.add_switch("sw{}".format(switchn))
- switch.add_link(tgen.gears["r1"])
-
-
-#####################################################
-##
-## Tests starting
-##
-#####################################################
-
-
-def setup_module(module):
- "Setup topology"
- tgen = Topogen(NetworkTopo, module.__name__)
- tgen.start_topology()
-
- router_list = tgen.routers()
- for rname, router in router_list.items():
- router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
- router.load_config(
- TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
- )
-
- tgen.start_router()
- # tgen.mininet_cli()
-
-
-def teardown_module(_mod):
- "Teardown the pytest environment"
- tgen = get_topogen()
-
- # This function tears down the whole topology.
- tgen.stop_topology()
-
-
-def test_converge_protocols():
- "Wait for protocol convergence"
-
- tgen = get_topogen()
- # Don't run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
-
-def run_one_setup(r1, s):
- "Run one ecmp config"
-
- # Extract params
- expected_installed = s["expect_in"]
- expected_removed = s["expect_rem"]
-
- retries = s["retries"]
- wait = s["wait"]
-
- for d in expected_installed["routes"]:
- if d["type"] == "sharp":
- count = d["rib"]
- break
-
- logger.info("Testing {} routes X {} ecmp".format(count, s["ecmp"]))
-
- r1.vtysh_cmd(
- "sharp install route 1.0.0.0 \
- nexthop-group {} {}".format(
- s["nhg"], count
- ),
- isjson=False,
- )
-
- test_func = partial(
- topotest.router_json_cmp, r1, "show ip route summary json", expected_installed
- )
- success, result = topotest.run_and_expect(test_func, None, retries, wait)
- assert success, "Route scale test install failed:\n{}".format(result)
-
- output = r1.vtysh_cmd("sharp data route", isjson=False)
- logger.info("{} routes X {} ecmp installed".format(count, s["ecmp"]))
- logger.info(output)
- r1.vtysh_cmd("sharp remove route 1.0.0.0 {}".format(count), isjson=False)
- test_func = partial(
- topotest.router_json_cmp, r1, "show ip route summary json", expected_removed
- )
- success, result = topotest.run_and_expect(test_func, None, retries, wait)
- assert success, "Route scale test remove failed:\n{}".format(result)
-
- output = r1.vtysh_cmd("sharp data route", isjson=False)
- logger.info("{} routes x {} ecmp removed".format(count, s["ecmp"]))
- logger.info(output)
-
-
-def test_route_install():
- "Test route install for a variety of ecmp"
-
- tgen = get_topogen()
- # Don't run this test if we have any failure.
- if tgen.routers_have_failure():
- pytest.skip(tgen.errors)
-
- r1 = tgen.gears["r1"]
-
- installed_file = "{}/r1/installed.routes.json".format(CWD)
- expected_installed = json.loads(open(installed_file).read())
-
- removed_file = "{}/r1/no.routes.json".format(CWD)
- expected_removed = json.loads(open(removed_file).read())
-
- # dict keys of params: ecmp number, corresponding nhg name, timeout,
- # number of times to wait
- scale_keys = ["ecmp", "nhg", "wait", "retries", "expect_in", "expect_rem"]
-
- # Table of defaults, used for timeout values and 'expected' objects
- scale_defaults = dict(
- zip(scale_keys, [None, None, 7, 30, expected_installed, expected_removed])
- )
-
- # List of params for each step in the test; note extra time given
- # for the highest ecmp steps. Executing 'show' at scale can be costly
- # so we widen the interval there too.
- scale_steps = [
- [1, "one"],
- [2, "two"],
- [4, "four"],
- [8, "eight"],
- [16, "sixteen", 10, 40],
- [32, "thirtytwo", 10, 40],
- ]
-
- # Build up a list of dicts with params for each step of the test;
- # use defaults where the step doesn't supply a value
- scale_setups = []
- for s in scale_steps:
- d = dict(zip(scale_keys, s))
- for k in scale_keys:
- if k not in d:
- d[k] = scale_defaults[k]
-
- scale_setups.append(d)
-
- # Avoid top ecmp case for runs with < 4G memory
- p = os.popen("free")
- l = p.readlines()[1].split()
- mem = int(l[1])
- if mem < 4000000:
- logger.info("Limited memory available: {}, skipping x32 testcase".format(mem))
- scale_setups = scale_setups[0:-1]
-
- # Run each step using the dicts we've built
- for s in scale_setups:
- run_one_setup(r1, s)
-
-
-# Mem leak testcase
-def test_memory_leak():
- "Run the memory leak test and report results."
- tgen = get_topogen()
- if not tgen.is_memleak_enabled():
- pytest.skip("Memory leak test/report is disabled")
- tgen.report_memory_leaks()
-
-
-if __name__ == "__main__":
- args = ["-s"] + sys.argv[1:]
- sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_route_scale1.py
+#
+# Copyright (c) 2021 by
+# Nvidia, Inc.
+# Donald Sharp
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_route_scale1.py: Testing route scale
+
+"""
+import os
+import re
+import sys
+import pytest
+import json
+from functools import partial
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+from scale_test_common import scale_build_common, scale_setup_module, route_install_helper, scale_test_memory_leak, scale_converge_protocols, scale_teardown_module
+
+
+pytestmark = [pytest.mark.sharpd]
+
+def build(tgen):
+ scale_build_common(tgen)
+
+def setup_module(module):
+ scale_setup_module(module)
+
+def teardown_module(_mod):
+ scale_teardown_module(_mod)
+
+def test_converge_protocols():
+ scale_converge_protocols()
+
+def test_route_install_2nh():
+ route_install_helper(1)
+
+def test_route_install_4nh():
+ route_install_helper(2)
+
+def test_route_install_16nh():
+ route_install_helper(4)
+
+def test_memory_leak():
+ scale_test_memory_leak()
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_route_scale2.py
+#
+# Copyright (c) 2022 by
+# Nvidia, Inc.
+# Donald Sharp
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NVIDIA DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NVIDIA BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_route_scale2.py: Testing route scale
+
+"""
+import os
+import re
+import sys
+import pytest
+import json
+from functools import partial
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, "../"))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+from scale_test_common import scale_build_common, scale_setup_module, route_install_helper, scale_test_memory_leak, scale_converge_protocols, scale_teardown_module
+
+
+pytestmark = [pytest.mark.sharpd]
+
+def build(tgen):
+ scale_build_common(tgen)
+
+def setup_module(module):
+ scale_setup_module(module)
+
+def teardown_module(_mod):
+ scale_teardown_module(_mod)
+
+def test_converge_protocols():
+ scale_converge_protocols()
+
+def test_route_install_1nh():
+ route_install_helper(0)
+
+def test_route_install_8nh():
+ route_install_helper(3)
+
+def test_route_install_32nh():
+ route_install_helper(5)
+
+def test_memory_leak():
+ scale_test_memory_leak()
+
+if __name__ == "__main__":
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
rouser frr
master agentx
+
+agentXSocket /etc/frr/agentx
+agentXPerms 777 755 root frr
import os
import sys
-import json
-from functools import partial
-from time import sleep
import pytest
# Save the Current Working Directory to find configuration files.
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
from lib.snmptest import SnmpTester
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.snmp]
-class TemplateTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- # This function only purpose is to define allocation and relationship
- # between routers, switches and hosts.
- #
- #
- # Create routers
- tgen.add_router("r1")
-
- # r1-eth0
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
-
- # r1-eth1
- switch = tgen.add_switch("s2")
- switch.add_link(tgen.gears["r1"])
-
- # r1-eth2
- switch = tgen.add_switch("s3")
- switch.add_link(tgen.gears["r1"])
-
-
def setup_module(mod):
"Sets up the pytest environment"
error_msg = "SNMP not installed - skipping"
pytest.skip(error_msg)
# This function initiates the topology build with Topogen...
- tgen = Topogen(TemplateTopo, mod.__name__)
+ topodef = {"s1": "r1", "s2": "r1", "s3": "r1"}
+ tgen = Topogen(topodef, mod.__name__)
# ... and here it calls Mininet initialization functions.
tgen.start_topology()
pytest.skip(tgen.errors)
# tgen.mininet_cli()
- r1 = tgen.net.get("r1")
+ r1 = tgen.gears["r1"]
r1_snmp = SnmpTester(r1, "1.1.1.1", "public", "2c")
assert r1_snmp.test_oid("bgpVersin", None)
assert r1_snmp.test_oid("bgpVersion", "10")
import os
import sys
import json
-import time
import pytest
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd, pytest.mark.sharpd]
assert False, "Could not read file {}".format(filename)
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
- tgen.add_router('r1')
-
-
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen({None: "r1"}, mod.__name__)
tgen.start_topology()
- router_list = tgen.routers()
for rname, router in tgen.routers().items():
router.run("/bin/bash {}/{}/setup.sh".format(CWD, rname))
- router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, '{}/bgpd.conf'.format(rname)))
- router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, '{}/sharpd.conf'.format(rname)))
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
tgen.start_router()
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r1']
+ router = tgen.gears["r1"]
def _check_srv6_locator(router, expected_locator_file):
logger.info("checking zebra locator status")
def check_srv6_locator(router, expected_file):
func = functools.partial(_check_srv6_locator, router, expected_file)
success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
def check_sharpd_chunk(router, expected_file):
func = functools.partial(_check_sharpd_chunk, router, expected_file)
success, result = topotest.run_and_expect(func, None, count=5, wait=0.5)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
logger.info("Test1 for Locator Configuration")
check_srv6_locator(router, "expected_locators1.json")
check_sharpd_chunk(router, "expected_chunks5.json")
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
"""
import sys
-import json
import time
import os
import pytest
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
-from mininet.topo import Topo
from lib.topotest import version_cmp
# Import topoJson from lib, to create topology and initial configuration
create_static_routes,
check_address_types,
step,
- create_interfaces_cfg,
shutdown_bringup_interface,
stop_router,
start_router,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-JSONFILE = "{}/static_routes_topo1_ebgp.json".format(CWD)
-try:
- with open(JSONFILE, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(JSONFILE)
-
# Global variables
ADDR_TYPES = check_address_types()
NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]}
PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"}
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
- def dumdum(self):
- """ Dummy """
- print("%s", self.name)
-
-
def setup_module(mod):
"""
Sets up the pytest environment.
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo1_ebgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
-Verify 8 static route functionality with 8 ECMP next hop
"""
import sys
-import json
import time
import os
import pytest
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/static_routes_topo2_ebgp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
"""
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo2_ebgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
"""
import sys
-import json
import time
import os
import pytest
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.common_config import (
create_static_routes,
check_address_types,
step,
- create_interfaces_cfg,
shutdown_bringup_interface,
stop_router,
start_router,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/static_routes_topo3_ebgp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
NEXT_HOP_IP = []
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo3_ebgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
"""
import sys
-import json
import time
import os
import pytest
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
step,
create_prefix_lists,
create_route_maps,
- create_interfaces_cfg,
verify_prefix_lists,
verify_route_maps,
)
clear_bgp_and_verify,
clear_bgp,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
-
-pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
+from lib.topojson import build_config_from_json
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/static_routes_topo4_ebgp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
+pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
BGP_CONVERGENCE = False
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Set up the pytest environment.
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo4_ebgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
"""
import sys
-import json
import time
import os
import pytest
import platform
-from copy import deepcopy
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
create_static_routes,
check_address_types,
step,
- create_interfaces_cfg,
shutdown_bringup_interface,
stop_router,
start_router,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/static_routes_topo1_ibgp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
PREFIX1 = {"ipv4": "110.0.20.1/32", "ipv6": "20::1/128"}
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo1_ibgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
-Verify 8 static route functionality with 8 ECMP next hop
"""
import sys
-import json
import time
import os
import pytest
import platform
-from time import sleep
import random
# Save the Current Working Directory to find configuration files.
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
create_static_routes,
check_address_types,
step,
- create_interfaces_cfg,
shutdown_bringup_interface,
stop_router,
start_router,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.topotest import version_cmp
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/static_routes_topo2_ibgp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
"""
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo2_ibgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
"""
import sys
-import json
import time
import os
import pytest
import platform
-from copy import deepcopy
import random
-from re import search as re_search
# Save the Current Working Directory to find configuration files.
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.topotest import version_cmp
create_static_routes,
check_address_types,
step,
- create_interfaces_cfg,
shutdown_bringup_interface,
stop_router,
start_router,
)
from lib.topolog import logger
from lib.bgp import verify_bgp_convergence, create_router_bgp, verify_bgp_rib
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/static_routes_topo3_ibgp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
-
# Global variables
BGP_CONVERGENCE = False
ADDR_TYPES = check_address_types()
NEXT_HOP_IP = []
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo3_ibgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
"""
import sys
-import json
import time
import os
import pytest
sys.path.append(os.path.join(CWD, "../lib/"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from mininet.topo import Topo
from lib.topogen import Topogen, get_topogen
from lib.common_config import (
step,
create_prefix_lists,
create_route_maps,
- create_interfaces_cfg,
verify_prefix_lists,
verify_route_maps,
)
clear_bgp_and_verify,
clear_bgp,
)
-from lib.topojson import build_topo_from_json, build_config_from_json
+from lib.topojson import build_config_from_json
from lib.topotest import version_cmp
-# Reading the data from JSON File for topology creation
-jsonFile = "{}/static_routes_topo4_ibgp.json".format(CWD)
-try:
- with open(jsonFile, "r") as topoJson:
- topo = json.load(topoJson)
-except IOError:
- assert False, "Could not read file {}".format(jsonFile)
# Global variables
BGP_CONVERGENCE = False
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
-class CreateTopo(Topo):
- """
- Test CreateTopo - topology 1.
-
- * `Topo`: Topology object
- """
-
- def build(self, *_args, **_opts):
- """Build function."""
- tgen = get_topogen(self)
-
- # Building topology from json file
- build_topo_from_json(tgen, topo)
-
-
def setup_module(mod):
"""
Set up the pytest environment.
* `mod`: module name
"""
- global topo
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
- tgen = Topogen(CreateTopo, mod.__name__)
+ json_file = "{}/static_routes_topo4_ibgp.json".format(CWD)
+ tgen = Topogen(json_file, mod.__name__)
+ global topo
+ topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
+++ /dev/null
-{
- "2.1.3.7\/32":[
- {
- "prefix":"2.1.3.7\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.8\/32":[
- {
- "prefix":"2.1.3.8\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.9\/32":[
- {
- "prefix":"2.1.3.9\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.10\/32":[
- {
- "prefix":"2.1.3.10\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.11\/32":[
- {
- "prefix":"2.1.3.11\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.12\/32":[
- {
- "prefix":"2.1.3.12\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.13\/32":[
- {
- "prefix":"2.1.3.13\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.14\/32":[
- {
- "prefix":"2.1.3.14\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.15\/32":[
- {
- "prefix":"2.1.3.15\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.16\/32":[
- {
- "prefix":"2.1.3.16\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.17\/32":[
- {
- "prefix":"2.1.3.17\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.18\/32":[
- {
- "prefix":"2.1.3.18\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.19\/32":[
- {
- "prefix":"2.1.3.19\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.20\/32":[
- {
- "prefix":"2.1.3.20\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.21\/32":[
- {
- "prefix":"2.1.3.21\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.22\/32":[
- {
- "prefix":"2.1.3.22\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.23\/32":[
- {
- "prefix":"2.1.3.23\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.24\/32":[
- {
- "prefix":"2.1.3.24\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.25\/32":[
- {
- "prefix":"2.1.3.25\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.26\/32":[
- {
- "prefix":"2.1.3.26\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.27\/32":[
- {
- "prefix":"2.1.3.27\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.28\/32":[
- {
- "prefix":"2.1.3.28\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.29\/32":[
- {
- "prefix":"2.1.3.29\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.30\/32":[
- {
- "prefix":"2.1.3.30\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.31\/32":[
- {
- "prefix":"2.1.3.31\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.32\/32":[
- {
- "prefix":"2.1.3.32\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.33\/32":[
- {
- "prefix":"2.1.3.33\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.34\/32":[
- {
- "prefix":"2.1.3.34\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.35\/32":[
- {
- "prefix":"2.1.3.35\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.36\/32":[
- {
- "prefix":"2.1.3.36\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.37\/32":[
- {
- "prefix":"2.1.3.37\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.38\/32":[
- {
- "prefix":"2.1.3.38\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.39\/32":[
- {
- "prefix":"2.1.3.39\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.40\/32":[
- {
- "prefix":"2.1.3.40\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.41\/32":[
- {
- "prefix":"2.1.3.41\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.42\/32":[
- {
- "prefix":"2.1.3.42\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.43\/32":[
- {
- "prefix":"2.1.3.43\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.44\/32":[
- {
- "prefix":"2.1.3.44\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.45\/32":[
- {
- "prefix":"2.1.3.45\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.46\/32":[
- {
- "prefix":"2.1.3.46\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.47\/32":[
- {
- "prefix":"2.1.3.47\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.48\/32":[
- {
- "prefix":"2.1.3.48\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.49\/32":[
- {
- "prefix":"2.1.3.49\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.50\/32":[
- {
- "prefix":"2.1.3.50\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.51\/32":[
- {
- "prefix":"2.1.3.51\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.52\/32":[
- {
- "prefix":"2.1.3.52\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.53\/32":[
- {
- "prefix":"2.1.3.53\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.54\/32":[
- {
- "prefix":"2.1.3.54\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.55\/32":[
- {
- "prefix":"2.1.3.55\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.56\/32":[
- {
- "prefix":"2.1.3.56\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.57\/32":[
- {
- "prefix":"2.1.3.57\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.58\/32":[
- {
- "prefix":"2.1.3.58\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.59\/32":[
- {
- "prefix":"2.1.3.59\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.60\/32":[
- {
- "prefix":"2.1.3.60\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.61\/32":[
- {
- "prefix":"2.1.3.61\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.62\/32":[
- {
- "prefix":"2.1.3.62\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.63\/32":[
- {
- "prefix":"2.1.3.63\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.64\/32":[
- {
- "prefix":"2.1.3.64\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.65\/32":[
- {
- "prefix":"2.1.3.65\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.66\/32":[
- {
- "prefix":"2.1.3.66\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.67\/32":[
- {
- "prefix":"2.1.3.67\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.68\/32":[
- {
- "prefix":"2.1.3.68\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.69\/32":[
- {
- "prefix":"2.1.3.69\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.70\/32":[
- {
- "prefix":"2.1.3.70\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.71\/32":[
- {
- "prefix":"2.1.3.71\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.72\/32":[
- {
- "prefix":"2.1.3.72\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.73\/32":[
- {
- "prefix":"2.1.3.73\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.74\/32":[
- {
- "prefix":"2.1.3.74\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.75\/32":[
- {
- "prefix":"2.1.3.75\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.76\/32":[
- {
- "prefix":"2.1.3.76\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.77\/32":[
- {
- "prefix":"2.1.3.77\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.78\/32":[
- {
- "prefix":"2.1.3.78\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.79\/32":[
- {
- "prefix":"2.1.3.79\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.80\/32":[
- {
- "prefix":"2.1.3.80\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.81\/32":[
- {
- "prefix":"2.1.3.81\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.82\/32":[
- {
- "prefix":"2.1.3.82\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.83\/32":[
- {
- "prefix":"2.1.3.83\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.84\/32":[
- {
- "prefix":"2.1.3.84\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.85\/32":[
- {
- "prefix":"2.1.3.85\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.86\/32":[
- {
- "prefix":"2.1.3.86\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.87\/32":[
- {
- "prefix":"2.1.3.87\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.88\/32":[
- {
- "prefix":"2.1.3.88\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.89\/32":[
- {
- "prefix":"2.1.3.89\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.90\/32":[
- {
- "prefix":"2.1.3.90\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.91\/32":[
- {
- "prefix":"2.1.3.91\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.92\/32":[
- {
- "prefix":"2.1.3.92\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.93\/32":[
- {
- "prefix":"2.1.3.93\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.94\/32":[
- {
- "prefix":"2.1.3.94\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.95\/32":[
- {
- "prefix":"2.1.3.95\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.96\/32":[
- {
- "prefix":"2.1.3.96\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.97\/32":[
- {
- "prefix":"2.1.3.97\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.98\/32":[
- {
- "prefix":"2.1.3.98\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.99\/32":[
- {
- "prefix":"2.1.3.99\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.100\/32":[
- {
- "prefix":"2.1.3.100\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.101\/32":[
- {
- "prefix":"2.1.3.101\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.102\/32":[
- {
- "prefix":"2.1.3.102\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.103\/32":[
- {
- "prefix":"2.1.3.103\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.104\/32":[
- {
- "prefix":"2.1.3.104\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.105\/32":[
- {
- "prefix":"2.1.3.105\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ],
- "2.1.3.106\/32":[
- {
- "prefix":"2.1.3.106\/32",
- "protocol":"sharp",
- "selected":true,
- "destSelected":true,
- "distance":150,
- "metric":0,
- "installed":true,
- "table":254,
- "nexthops":[
- {
- "flags":3,
- "fib":true,
- "ip":"192.168.1.1",
- "afi":"ipv4",
- "interfaceName":"r1-eth0",
- "active":true,
- "weight":1
- }
- ]
- }
- ]
-}
test_zebra_netlink.py: Test some basic interactions with kernel using Netlink
"""
-
-import os
-import re
-import sys
-import pytest
+# pylint: disable=C0413
+import ipaddress
import json
-import platform
+import sys
from functools import partial
-# Save the Current Working Directory to find configuration files.
-CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, "../"))
-
-# pylint: disable=C0413
-# Import topogen and topotest helpers
+import pytest
from lib import topotest
-from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topogen import Topogen, TopoRouter
from lib.topolog import logger
-from lib.common_config import shutdown_bringup_interface
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.sharpd]
-#####################################################
-##
-## Network Topology Definition
-##
-#####################################################
-
-
-class ZebraTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- tgen.add_router("r1")
-
- # Create a empty network for router 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
-
-
#####################################################
##
## Tests starting
#####################################################
-def setup_module(mod):
+@pytest.fixture(scope="module")
+def tgen(request):
"Sets up the pytest environment"
- tgen = Topogen(ZebraTopo, mod.__name__)
+
+ topodef = {"s1": ("r1")}
+ tgen = Topogen(topodef, request.module.__name__)
tgen.start_topology()
+ # Initialize all routers.
router_list = tgen.routers()
for rname, router in router_list.items():
- router.load_config(
- TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
- )
-
- router.load_config(
- TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
- )
+ router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
+ router.load_config(TopoRouter.RD_SHARP)
- # Initialize all routers.
tgen.start_router()
+ yield tgen
+ tgen.stop_topology()
-def teardown_module(_mod):
- "Teardown the pytest environment"
- tgen = get_topogen()
-
- # This function tears down the whole topology.
- tgen.stop_topology()
+@pytest.fixture(autouse=True)
+def skip_on_failure(tgen):
+ if tgen.routers_have_failure():
+ pytest.skip("skipped because of previous test failure")
-def test_zebra_netlink_batching():
+def test_zebra_netlink_batching(tgen):
"Test the situation where dataplane fills netlink send buffer entirely."
logger.info(
"Test the situation where dataplane fills netlink send buffer entirely."
)
- tgen = get_topogen()
- if tgen.routers_have_failure():
- pytest.skip("skipped because of previous test failure")
r1 = tgen.gears["r1"]
# Reduce the size of the buffer to hit the limit.
r1.vtysh_cmd("conf t\nzebra kernel netlink batch-tx-buf 256 256")
- r1.vtysh_cmd("sharp install routes 2.1.3.7 nexthop 192.168.1.1 100")
- json_file = "{}/r1/v4_route.json".format(CWD)
- expected = json.loads(open(json_file).read())
- test_func = partial(
- topotest.router_json_cmp,
- r1,
- "show ip route json",
- expected,
- )
- _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
- assertmsg = '"r1" JSON output mismatches'
- assert result is None, assertmsg
-
- r1.vtysh_cmd("sharp remove routes 2.1.3.7 100")
+ count = 100
+ r1.vtysh_cmd("sharp install routes 2.1.3.7 nexthop 192.168.1.1 " + str(count))
+
+ # Generate expected results
+ entry = {
+ "protocol": "sharp",
+ "distance": 150,
+ "metric": 0,
+ "installed": True,
+ "table": 254,
+ "nexthops": [
+ {
+ "fib": True,
+ "ip": "192.168.1.1",
+ "afi": "ipv4",
+ "interfaceName": "r1-eth0",
+ "active": True,
+ "weight": 1,
+ }
+ ],
+ }
+
+ match = {}
+ base = int(ipaddress.ip_address(u"2.1.3.7"))
+ for i in range(base, base + count):
+ pfx = str(ipaddress.ip_network((i, 32)))
+ match[pfx] = [dict(entry, prefix=pfx)]
+
+ ok = topotest.router_json_cmp_retry(r1, "show ip route json", match)
+ assert ok, '"r1" JSON output mismatches'
+
+ r1.vtysh_cmd("sharp remove routes 2.1.3.7 " + str(count))
if __name__ == "__main__":
import os
import sys
import json
-import time
import pytest
import functools
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
-from lib.topolog import logger
-from mininet.topo import Topo
pytestmark = [pytest.mark.bgpd]
-class TemplateTopo(Topo):
- def build(self, *_args, **_opts):
- tgen = get_topogen(self)
-
- for routern in range(1, 3):
- tgen.add_router("r{}".format(routern))
-
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r2"])
-
-
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ topodef = {"s1": ("r1", "r2")}
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
from lib.topolog import logger
from time import sleep
-# Required to instantiate the topology builder class.
-from mininet.topo import Topo
pytestmark = [pytest.mark.sharpd]
-class ZebraTopo(Topo):
- "Test topology builder"
-
- def build(self, *_args, **_opts):
- "Build function"
- tgen = get_topogen(self)
-
- tgen.add_router("r1")
-
- # Create a empty network for router 1
- switch = tgen.add_switch("s1")
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r1"])
- switch.add_link(tgen.gears["r1"])
-
-
def setup_module(mod):
"Sets up the pytest environment"
- tgen = Topogen(ZebraTopo, mod.__name__)
+ topodef = {"s1": ("r1", "r1", "r1", "r1", "r1", "r1", "r1", "r1")}
+ tgen = Topogen(topodef, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
distance = 255
metric = 8192
+
def makekmetric(dist, metric):
return (dist << 24) + metric
- r1.run("ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric " + str(makekmetric(255, 8192)))
+ r1.run(
+ "ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric "
+ + str(makekmetric(255, 8192))
+ )
# Route with 1/1 metric
- r1.run("ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric " + str(makekmetric(1, 1)))
+ r1.run(
+ "ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric "
+ + str(makekmetric(1, 1))
+ )
# Route with 10/1 metric
- r1.run("ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric " + str(makekmetric(10, 1)))
+ r1.run(
+ "ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric "
+ + str(makekmetric(10, 1))
+ )
# Same route with a 160/1 metric
- r1.run("ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric " + str(makekmetric(160, 1)))
+ r1.run(
+ "ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric "
+ + str(makekmetric(160, 1))
+ )
# Currently I believe we have a bug here with the same route and different
# metric. That needs to be properly resolved. Making a note for
logger.info(
"Does the show route-map static command run the correct number of times"
)
+
def check_static_map_correct_runs():
actual = r1.vtysh_cmd("show route-map static")
actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
title1="Actual Route-map output",
title2="Expected Route-map output",
)
- ok, result = topotest.run_and_expect(check_static_map_correct_runs, "", count=5, wait=1)
+
+ ok, result = topotest.run_and_expect(
+ check_static_map_correct_runs, "", count=5, wait=1
+ )
assert ok, result
sharp_rmapfile = "%s/r1/sharp_rmap.ref" % (thisDir)
expected = open(sharp_rmapfile).read().rstrip()
expected = ("\n".join(expected.splitlines()) + "\n").rstrip()
logger.info("Does the show route-map sharp command run the correct number of times")
+
def check_sharp_map_correct_runs():
actual = r1.vtysh_cmd("show route-map sharp")
actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
title1="Actual Route-map output",
title2="Expected Route-map output",
)
- ok, result = topotest.run_and_expect(check_sharp_map_correct_runs, "", count=5, wait=1)
+
+ ok, result = topotest.run_and_expect(
+ check_sharp_map_correct_runs, "", count=5, wait=1
+ )
assert ok, result
logger.info(
sharp_ipfile = "%s/r1/iproute.ref" % (thisDir)
expected = open(sharp_ipfile).read().rstrip()
expected = ("\n".join(expected.splitlines()) + "\n").rstrip()
+
def check_routes_installed():
actual = r1.run("ip route show")
actual = ("\n".join(actual.splitlines()) + "\n").rstrip()
actual = re.sub(r" metric", " metric", actual)
actual = re.sub(r" link ", " link ", actual)
return topotest.get_textdiff(
- actual, expected, title1="Actual ip route show", title2="Expected ip route show"
+ actual,
+ expected,
+ title1="Actual ip route show",
+ title2="Expected ip route show",
)
+
ok, result = topotest.run_and_expect(check_routes_installed, "", count=5, wait=1)
assert ok, result
"""
import os
-import re
import sys
import pytest
import json
-import platform
from functools import partial
CWD = os.path.dirname(os.path.realpath(__file__))
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from lib.common_config import shutdown_bringup_interface
-from mininet.topo import Topo
pytestmark = [pytest.mark.sharpd]
assert False, "Could not read file {}".format(filename)
-class TemplateTopo(Topo):
- def build(self, **_opts):
- tgen = get_topogen(self)
- tgen.add_router("r1")
-
-
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen({None: "r1"}, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
- router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
- router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ router.run(
+ "/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname)))
+ )
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
tgen.start_router()
r1 = tgen.gears["r1"]
def check(router, dest, nh, sid, expected):
- router.vtysh_cmd("sharp install seg6-routes {} "\
- "nexthop-seg6 {} encap {} 1".format(dest, nh, sid))
+ router.vtysh_cmd(
+ "sharp install seg6-routes {} "
+ "nexthop-seg6 {} encap {} 1".format(dest, nh, sid)
+ )
output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
- output = output.get('{}/128'.format(dest))
+ output = output.get("{}/128".format(dest))
if output is None:
return False
return topotest.json_cmp(output, expected)
manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
for manifest in manifests:
- logger.info("CHECK {} {} {}".format(manifest['in']['dest'],
- manifest['in']['nh'],
- manifest['in']['sid']))
- test_func = partial(check, r1,
- manifest['in']['dest'],
- manifest['in']['nh'],
- manifest['in']['sid'],
- manifest['out'])
+ logger.info(
+ "CHECK {} {} {}".format(
+ manifest["in"]["dest"], manifest["in"]["nh"], manifest["in"]["sid"]
+ )
+ )
+ test_func = partial(
+ check,
+ r1,
+ manifest["in"]["dest"],
+ manifest["in"]["nh"],
+ manifest["in"]["sid"],
+ manifest["out"],
+ )
success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
if __name__ == "__main__":
"""
import os
-import re
import sys
import pytest
import json
-import platform
from functools import partial
CWD = os.path.dirname(os.path.realpath(__file__))
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
-from lib.common_config import shutdown_bringup_interface
-from mininet.topo import Topo
pytestmark = [pytest.mark.sharpd]
assert False, "Could not read file {}".format(filename)
-class TemplateTopo(Topo):
- def build(self, **_opts):
- tgen = get_topogen(self)
- tgen.add_router("r1")
-
-
def setup_module(mod):
- tgen = Topogen(TemplateTopo, mod.__name__)
+ tgen = Topogen({None: "r1"}, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in tgen.routers().items():
- router.run("/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname))))
- router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format(rname)))
- router.load_config(TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname)))
+ router.run(
+ "/bin/bash {}".format(os.path.join(CWD, "{}/setup.sh".format(rname)))
+ )
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_SHARP, os.path.join(CWD, "{}/sharpd.conf".format(rname))
+ )
tgen.start_router()
r1 = tgen.gears["r1"]
def check(router, dest, context, expected):
- router.vtysh_cmd("sharp install seg6local-routes {} "\
- "nexthop-seg6local dum0 {} 1".format(dest, context))
+ router.vtysh_cmd(
+ "sharp install seg6local-routes {} "
+ "nexthop-seg6local dum0 {} 1".format(dest, context)
+ )
output = json.loads(router.vtysh_cmd("show ipv6 route {} json".format(dest)))
- output = output.get('{}/128'.format(dest))
+ output = output.get("{}/128".format(dest))
if output is None:
return False
return topotest.json_cmp(output, expected)
manifests = open_json_file(os.path.join(CWD, "{}/routes.json".format("r1")))
for manifest in manifests:
- logger.info("CHECK {} {}".format(manifest['in']['dest'],
- manifest['in']['context']))
- test_func = partial(check, r1,
- manifest['in']['dest'],
- manifest['in']['context'],
- manifest['out'])
+ logger.info(
+ "CHECK {} {}".format(manifest["in"]["dest"], manifest["in"]["context"])
+ )
+ test_func = partial(
+ check,
+ r1,
+ manifest["in"]["dest"],
+ manifest["in"]["context"],
+ manifest["out"],
+ )
success, result = topotest.run_and_expect(test_func, None, count=5, wait=1)
- assert result is None, 'Failed'
+ assert result is None, "Failed"
if __name__ == "__main__":
/* use external allocations */
-static void lp_plugin_init()
+static void lp_plugin_init(void)
{
/* register our own hooks */
hook_register(lm_client_connect, test_client_connect);
hook_register(lm_release_chunk, lm_release_chunk_pi);
}
-static void lp_plugin_cleanup()
+static void lp_plugin_cleanup(void)
{
/* register our own hooks */
hook_unregister(lm_client_connect, test_client_connect);
/* tests */
-static void test_lp_plugin()
+static void test_lp_plugin(void)
{
struct label_manager_chunk *lmc;
-#!/usr/bin/python
+#!/usr/bin/env python3
# Frr Reloader
# Copyright (C) 2014 Cumulus Networks, Inc.
#
from __future__ import print_function, unicode_literals
import argparse
-import copy
import logging
import os, os.path
import random
import subprocess
import sys
from collections import OrderedDict
-
-try:
- from ipaddress import IPv6Address, ip_network
-except ImportError:
- from ipaddr import IPv6Address, IPNetwork
+from ipaddress import IPv6Address, ip_network
from pprint import pformat
-try:
- dict.iteritems
-except AttributeError:
- # Python 3
- def iteritems(d):
- return iter(d.items())
-
-
-else:
- # Python 2
- def iteritems(d):
- return d.iteritems()
+# Python 3
+def iteritems(d):
+ return iter(d.items())
log = logging.getLogger(__name__)
addr = re_key_rt.group(2)
if "/" in addr:
try:
- if "ipaddress" not in sys.modules:
- newaddr = IPNetwork(addr)
- key[0] = "%s route %s/%s%s" % (
- re_key_rt.group(1),
- newaddr.network,
- newaddr.prefixlen,
- re_key_rt.group(3),
- )
- else:
- newaddr = ip_network(addr, strict=False)
- key[0] = "%s route %s/%s%s" % (
- re_key_rt.group(1),
- str(newaddr.network_address),
- newaddr.prefixlen,
- re_key_rt.group(3),
- )
+ newaddr = ip_network(addr, strict=False)
+ key[0] = "%s route %s/%s%s" % (
+ re_key_rt.group(1),
+ str(newaddr.network_address),
+ newaddr.prefixlen,
+ re_key_rt.group(3),
+ )
except ValueError:
pass
addr = re_key_rt.group(4)
if "/" in addr:
try:
- if "ipaddress" not in sys.modules:
- newaddr = "%s/%s" % (
- IPNetwork(addr).network,
- IPNetwork(addr).prefixlen,
- )
- else:
- network_addr = ip_network(addr, strict=False)
- newaddr = "%s/%s" % (
- str(network_addr.network_address),
- network_addr.prefixlen,
- )
+ network_addr = ip_network(addr, strict=False)
+ newaddr = "%s/%s" % (
+ str(network_addr.network_address),
+ network_addr.prefixlen,
+ )
except ValueError:
newaddr = addr
else:
addr = addr + "/8"
try:
- if "ipaddress" not in sys.modules:
- newaddr = IPNetwork(addr)
- line = "network %s/%s %s" % (
- newaddr.network,
- newaddr.prefixlen,
- re_net.group(2),
- )
- else:
- network_addr = ip_network(addr, strict=False)
- line = "network %s/%s %s" % (
- str(network_addr.network_address),
- network_addr.prefixlen,
- re_net.group(2),
- )
+ network_addr = ip_network(addr, strict=False)
+ line = "network %s/%s %s" % (
+ str(network_addr.network_address),
+ network_addr.prefixlen,
+ re_net.group(2),
+ )
newlines.append(line)
except ValueError:
# Really this should be an error. Whats a network
Parse the configuration and create contexts for each appropriate block
"""
- current_context_lines = []
- ctx_keys = []
-
"""
The end of a context is flagged via the 'end' keyword:
# key of the context. So "router bgp 10" is the key for the non-address
# family part of bgp, "router bgp 10, address-family ipv6 unicast" is
# the key for the subcontext and so on.
+
+ # This dictionary contains a tree of all commands that we know start a
+ # new multi-line context. All other commands are treated either as
+ # commands inside a multi-line context or as single-line contexts. This
+ # dictionary should be updated whenever a new node is added to FRR.
+ ctx_keywords = {
+ "router bgp ": {
+ "address-family ": {
+ "vni ": {},
+ },
+ "vnc ": {},
+ "vrf-policy ": {},
+ "bmp ": {},
+ "segment-routing srv6": {},
+ },
+ "router rip": {},
+ "router ripng": {},
+ "router isis ": {},
+ "router openfabric ": {},
+ "router ospf": {},
+ "router ospf6": {},
+ "router eigrp ": {},
+ "router babel": {},
+ "mpls ldp": {"address-family ": {"interface ": {}}},
+ "l2vpn ": {"member pseudowire ": {}},
+ "key chain ": {"key ": {}},
+ "vrf ": {},
+ "interface ": {"link-params": {}},
+ "pseudowire ": {},
+ "segment-routing": {
+ "traffic-eng": {
+ "segment-list ": {},
+ "policy ": {"candidate-path ": {}},
+ "pcep": {"pcc": {}, "pce ": {}, "pce-config ": {}},
+ },
+ "srv6": {"locators": {"locator ": {}}},
+ },
+ "nexthop-group ": {},
+ "route-map ": {},
+ "pbr-map ": {},
+ "rpki": {},
+ "bfd": {"peer ": {}, "profile ": {}},
+ "line vty": {},
+ }
+
+ # stack of context keys
ctx_keys = []
- main_ctx_key = []
- new_ctx = True
-
- # the keywords that we know are single line contexts. bgp in this case
- # is not the main router bgp block, but enabling multi-instance
- oneline_ctx_keywords = (
- "access-list ",
- "agentx",
- "allow-external-route-update",
- "bgp ",
- "debug ",
- "domainname ",
- "dump ",
- "enable ",
- "evpn mh",
- "frr ",
- "fpm ",
- "hostname ",
- "ip ",
- "ipv6 ",
- "log ",
- "mac access-list ",
- "mpls lsp",
- "mpls label",
- "no ",
- "password ",
- "pbr ",
- "ptm-enable",
- "router-id ",
- "service ",
- "table ",
- "username ",
- "vni ",
- "vrrp autoconfigure",
- "zebra "
- )
+ # stack of context keywords
+ cur_ctx_keywords = [ctx_keywords]
+ # list of stored commands
+ cur_ctx_lines = []
for line in self.lines:
if line.startswith("!") or line.startswith("#"):
continue
- if (
- len(ctx_keys) == 2
- and ctx_keys[0].startswith("bfd")
- and ctx_keys[1].startswith("profile ")
- and line == "end"
- ):
- log.debug("LINE %-50s: popping from sub context, %-50s", line, ctx_keys)
-
- if main_ctx_key:
- self.save_contexts(ctx_keys, current_context_lines)
- ctx_keys = copy.deepcopy(main_ctx_key)
- current_context_lines = []
+ if line.startswith("exit"):
+ # ignore on top level
+ if len(ctx_keys) == 0:
continue
- # one line contexts
- # there is one exception though: ldpd accepts a 'router-id' clause
- # as part of its 'mpls ldp' config context. If we are processing
- # ldp configuration and encounter a router-id we should NOT switch
- # to a new context
- if (
- new_ctx is True
- and any(line.startswith(keyword) for keyword in oneline_ctx_keywords)
- and not (
- ctx_keys
- and ctx_keys[0].startswith("mpls ldp")
- and line.startswith("router-id ")
- )
- ):
- self.save_contexts(ctx_keys, current_context_lines)
-
- # Start a new context
- main_ctx_key = []
- ctx_keys = [
- line,
- ]
- current_context_lines = []
-
- log.debug("LINE %-50s: entering new context, %-50s", line, ctx_keys)
- self.save_contexts(ctx_keys, current_context_lines)
- new_ctx = True
-
- elif line == "end":
- self.save_contexts(ctx_keys, current_context_lines)
- log.debug("LINE %-50s: exiting old context, %-50s", line, ctx_keys)
-
- # Start a new context
- new_ctx = True
- main_ctx_key = []
- ctx_keys = []
- current_context_lines = []
-
- elif line == "exit" and ctx_keys[0].startswith("rpki"):
- self.save_contexts(ctx_keys, current_context_lines)
- log.debug("LINE %-50s: exiting old context, %-50s", line, ctx_keys)
-
- # Start a new context
- new_ctx = True
- main_ctx_key = []
- ctx_keys = []
- current_context_lines = []
-
- elif line == "exit-vrf":
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines.append(line)
- log.debug(
- "LINE %-50s: append to current_context_lines, %-50s", line, ctx_keys
- )
+ # save current context
+ self.save_contexts(ctx_keys, cur_ctx_lines)
- # Start a new context
- new_ctx = True
- main_ctx_key = []
- ctx_keys = []
- current_context_lines = []
+ # exit current context
+ log.debug("LINE %-50s: exit context %-50s", line, ctx_keys)
- elif (
- line == "exit"
- and len(ctx_keys) > 1
- and ctx_keys[0].startswith("segment-routing")
- ):
- self.save_contexts(ctx_keys, current_context_lines)
-
- # Start a new context
- ctx_keys = ctx_keys[:-1]
- current_context_lines = []
- log.debug(
- "LINE %-50s: popping segment routing sub-context to ctx%-50s",
- line,
- ctx_keys,
- )
+ ctx_keys.pop()
+ cur_ctx_keywords.pop()
+ cur_ctx_lines = []
- elif line in ["exit-address-family", "exit", "exit-vnc"]:
- # if this exit is for address-family ipv4 unicast, ignore the pop
- if main_ctx_key:
- self.save_contexts(ctx_keys, current_context_lines)
-
- # Start a new context
- ctx_keys = copy.deepcopy(main_ctx_key)
- current_context_lines = []
- log.debug(
- "LINE %-50s: popping from subcontext to ctx%-50s",
- line,
- ctx_keys,
- )
+ continue
- elif line in ["exit-vni", "exit-ldp-if"]:
- if sub_main_ctx_key:
- self.save_contexts(ctx_keys, current_context_lines)
-
- # Start a new context
- ctx_keys = copy.deepcopy(sub_main_ctx_key)
- current_context_lines = []
- log.debug(
- "LINE %-50s: popping from sub-subcontext to ctx%-50s",
- line,
- ctx_keys,
- )
+ if line.startswith("end"):
+ # exit all contexts
+ while len(ctx_keys) > 0:
+ # save current context
+ self.save_contexts(ctx_keys, cur_ctx_lines)
- elif new_ctx is True:
- if not main_ctx_key:
- ctx_keys = [
- line,
- ]
- else:
- ctx_keys = copy.deepcopy(main_ctx_key)
- main_ctx_key = []
+ # exit current context
+ log.debug("LINE %-50s: exit context %-50s", line, ctx_keys)
- current_context_lines = []
- new_ctx = False
- log.debug("LINE %-50s: entering new context, %-50s", line, ctx_keys)
+ ctx_keys.pop()
+ cur_ctx_keywords.pop()
+ cur_ctx_lines = []
- elif (
- line.startswith("address-family ")
- or line.startswith("vnc defaults")
- or line.startswith("vnc l2-group")
- or line.startswith("vnc nve-group")
- or line.startswith("peer")
- or line.startswith("key ")
- or line.startswith("member pseudowire")
- ):
- main_ctx_key = []
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug("LINE %-50s: entering sub-context, append to ctx_keys", line)
+ continue
- if line == "address-family ipv6" and not ctx_keys[0].startswith(
- "mpls ldp"
- ):
- ctx_keys.append("address-family ipv6 unicast")
- elif line == "address-family ipv4" and not ctx_keys[0].startswith(
- "mpls ldp"
- ):
- ctx_keys.append("address-family ipv4 unicast")
- elif line == "address-family evpn":
- ctx_keys.append("address-family l2vpn evpn")
- else:
+ new_ctx = False
+
+ # check if the line is a context-entering keyword
+ for k, v in cur_ctx_keywords[-1].items():
+ if line.startswith(k):
+ # candidate-path is a special case. It may be a node and
+ # may be a single-line command. The distinguisher is the
+ # word "dynamic" or "explicit" at the middle of the line.
+ # It was perhaps not the best choice by the pathd authors
+ # but we have what we have.
+ if k == "candidate-path " and "explicit" in line:
+ # this is a single-line command
+ break
+
+ # save current context
+ self.save_contexts(ctx_keys, cur_ctx_lines)
+
+ # enter new context
+ new_ctx = True
ctx_keys.append(line)
+ cur_ctx_keywords.append(v)
+ cur_ctx_lines = []
- elif (
- line.startswith("vni ")
- and len(ctx_keys) == 2
- and ctx_keys[0].startswith("router bgp")
- and ctx_keys[1] == "address-family l2vpn evpn"
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- sub_main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering sub-sub-context, append to ctx_keys", line
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("interface ")
- and len(ctx_keys) == 2
- and ctx_keys[0].startswith("mpls ldp")
- and ctx_keys[1].startswith("address-family")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- sub_main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering sub-sub-context, append to ctx_keys", line
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("traffic-eng")
- and len(ctx_keys) == 1
- and ctx_keys[0].startswith("segment-routing")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- log.debug(
- "LINE %-50s: entering segment routing sub-context, append to ctx_keys",
- line,
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("segment-list ")
- and len(ctx_keys) == 2
- and ctx_keys[0].startswith("segment-routing")
- and ctx_keys[1].startswith("traffic-eng")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- log.debug(
- "LINE %-50s: entering segment routing sub-context, append to ctx_keys",
- line,
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("policy ")
- and len(ctx_keys) == 2
- and ctx_keys[0].startswith("segment-routing")
- and ctx_keys[1].startswith("traffic-eng")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- log.debug(
- "LINE %-50s: entering segment routing sub-context, append to ctx_keys",
- line,
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("candidate-path ")
- and line.endswith(" dynamic")
- and len(ctx_keys) == 3
- and ctx_keys[0].startswith("segment-routing")
- and ctx_keys[1].startswith("traffic-eng")
- and ctx_keys[2].startswith("policy")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering candidate-path sub-context, append to ctx_keys",
- line,
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("pcep")
- and len(ctx_keys) == 2
- and ctx_keys[0].startswith("segment-routing")
- and ctx_keys[1].startswith("traffic-eng")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering pcep sub-context, append to ctx_keys", line
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("pce-config ")
- and len(ctx_keys) == 3
- and ctx_keys[0].startswith("segment-routing")
- and ctx_keys[1].startswith("traffic-eng")
- and ctx_keys[2].startswith("pcep")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering pce-config sub-context, append to ctx_keys",
- line,
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("pce ")
- and len(ctx_keys) == 3
- and ctx_keys[0].startswith("segment-routing")
- and ctx_keys[1].startswith("traffic-eng")
- and ctx_keys[2].startswith("pcep")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering pce sub-context, append to ctx_keys", line
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("pcc")
- and len(ctx_keys) == 3
- and ctx_keys[0].startswith("segment-routing")
- and ctx_keys[1].startswith("traffic-eng")
- and ctx_keys[2].startswith("pcep")
- ):
-
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering pcc sub-context, append to ctx_keys", line
- )
- ctx_keys.append(line)
-
- elif (
- line.startswith("profile ")
- and len(ctx_keys) == 1
- and ctx_keys[0].startswith("bfd")
- ):
+ log.debug("LINE %-50s: enter context %-50s", line, ctx_keys)
+ break
- # Save old context first
- self.save_contexts(ctx_keys, current_context_lines)
- current_context_lines = []
- main_ctx_key = copy.deepcopy(ctx_keys)
- log.debug(
- "LINE %-50s: entering BFD profile sub-context, append to ctx_keys",
- line,
- )
- ctx_keys.append(line)
+ if new_ctx:
+ continue
+ if len(ctx_keys) == 0:
+ log.debug("LINE %-50s: single-line context", line)
+ self.save_contexts([line], [])
else:
- # Continuing in an existing context, add non-commented lines to it
- current_context_lines.append(line)
- log.debug(
- "LINE %-50s: append to current_context_lines, %-50s", line, ctx_keys
- )
+ log.debug("LINE %-50s: add to current context %-50s", line, ctx_keys)
+ cur_ctx_lines.append(line)
# Save the context of the last one
- self.save_contexts(ctx_keys, current_context_lines)
+ if len(ctx_keys) > 0:
+ self.save_contexts(ctx_keys, cur_ctx_lines)
def lines_to_config(ctx_keys, line, delete):
norm_word = None
if "/" in word:
try:
- if "ipaddress" not in sys.modules:
- v6word = IPNetwork(word)
- norm_word = "%s/%s" % (v6word.network, v6word.prefixlen)
- else:
- v6word = ip_network(word, strict=False)
- norm_word = "%s/%s" % (
- str(v6word.network_address),
- v6word.prefixlen,
- )
+ v6word = ip_network(word, strict=False)
+ norm_word = "%s/%s" % (
+ str(v6word.network_address),
+ v6word.prefixlen,
+ )
except ValueError:
pass
if not norm_word:
nolines = [x.strip() for x in nolines]
# For topotests leave these lines in (don't delete them)
# [chopps: why is "log file" more special than other "log" commands?]
- nolines = [x for x in nolines if "debug" not in x and "log file" not in x]
+ nolines = [
+ x for x in nolines if "debug" not in x and "log file" not in x
+ ]
if not nolines:
continue
[ -n "$FRR_GROUP" ] && chgrp "$FRR_GROUP" "$1"
[ -n "$FRR_CONFIG_MODE" ] && chmod "$FRR_CONFIG_MODE" "$1"
if [ -d "$1" ]; then
- chmod u+x "$1"
+ chmod gu+x "$1"
fi
}
struct cmd_token *stok = start->data;
struct graph_node *gnn;
struct listnode *ln;
+ bool is_neg = false;
// recursive dfs
listnode_add(position, start);
+
+ for (ALL_LIST_ELEMENTS_RO(position, ln, gnn)) {
+ struct cmd_token *tok = gnn->data;
+
+ if (tok->type == WORD_TKN && !strcmp(tok->text, "no")) {
+ is_neg = true;
+ break;
+ }
+ if (tok->type < SPECIAL_TKN)
+ break;
+ }
+
for (unsigned int i = 0; i < vector_active(start->to); i++) {
struct graph_node *gn = vector_slot(start->to, i);
struct cmd_token *tok = gn->data;
fprintf(stdout, "\n");
} else {
bool skip = false;
+
+ if (tok->type == NEG_ONLY_TKN && !is_neg)
+ continue;
if (stok->type == FORK_TKN && tok->type != FORK_TKN)
for (ALL_LIST_ELEMENTS_RO(position, ln, gnn))
if (gnn == gn) {
break;
default:
frr_help_exit(1);
- break;
}
}
{
install_node(&debug_node);
install_node(&vrrp_node);
- vrf_cmd_init(NULL, &vrrp_privs);
+ vrf_cmd_init(NULL);
if_cmd_init(vrrp_config_write_interface);
install_element(VIEW_NODE, &vrrp_vrid_show_cmd);
#include <sys/resource.h>
#include <sys/stat.h>
+/* readline carries some ancient definitions around */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
#include <readline/readline.h>
#include <readline/history.h>
+#pragma GCC diagnostic pop
#include <dirent.h>
#include <stdio.h>
#include "vtysh/vtysh.h"
#include "vtysh/vtysh_daemons.h"
#include "log.h"
-#include "ns.h"
#include "vrf.h"
#include "libfrr.h"
#include "command_graph.h"
*/
if (ret == CMD_SUCCESS || ret == CMD_SUCCESS_DAEMON
|| ret == CMD_WARNING) {
- if ((saved_node == BGP_VPNV4_NODE
- || saved_node == BGP_VPNV6_NODE
- || saved_node == BGP_IPV4_NODE
- || saved_node == BGP_IPV6_NODE
- || saved_node == BGP_FLOWSPECV4_NODE
- || saved_node == BGP_FLOWSPECV6_NODE
- || saved_node == BGP_IPV4M_NODE
- || saved_node == BGP_IPV4L_NODE
- || saved_node == BGP_IPV6L_NODE
- || saved_node == BGP_IPV6M_NODE
- || saved_node == BGP_EVPN_NODE
- || saved_node == LDP_IPV4_NODE
- || saved_node == LDP_IPV6_NODE)
- && (tried == 1)) {
- vtysh_execute("exit-address-family");
- } else if ((saved_node == BGP_EVPN_VNI_NODE) && (tried == 1)) {
- vtysh_execute("exit-vni");
- } else if (saved_node == BGP_VRF_POLICY_NODE && (tried == 1)) {
- vtysh_execute("exit-vrf-policy");
- } else if ((saved_node == BGP_VNC_DEFAULTS_NODE
- || saved_node == BGP_VNC_NVE_GROUP_NODE
- || saved_node == BGP_VNC_L2_GROUP_NODE)
- && (tried == 1)) {
- vtysh_execute("exit-vnc");
- } else if (saved_node == VRF_NODE && (tried == 1)) {
- vtysh_execute("exit-vrf");
- } else if ((saved_node == KEYCHAIN_KEY_NODE
- || saved_node == LDP_PSEUDOWIRE_NODE
- || saved_node == LDP_IPV4_IFACE_NODE
- || saved_node == LDP_IPV6_IFACE_NODE)
- && (tried == 1)) {
+ while (tried-- > 0)
vtysh_execute("exit");
- } else if ((saved_node == SR_SEGMENT_LIST_NODE
- || saved_node == SR_POLICY_NODE
- || saved_node == SR_CANDIDATE_DYN_NODE
- || saved_node == PCEP_NODE
- || saved_node == PCEP_PCE_CONFIG_NODE
- || saved_node == PCEP_PCE_NODE
- || saved_node == PCEP_PCC_NODE)
- && (tried > 0)) {
- vtysh_execute("exit");
- } else if (tried) {
- vtysh_execute("end");
- vtysh_execute("configure");
- }
}
/*
* If command didn't succeed in any node, continue with return value
int ret;
vector vline;
int tried = 0;
- bool ending;
const struct cmd_element *cmd;
int saved_ret, prev_node;
int lineno = 0;
strlcpy(vty_buf_copy, vty->buf, VTY_BUFSIZ);
vty_buf_trimmed = trim(vty_buf_copy);
- switch (vty->node) {
- case LDP_IPV4_IFACE_NODE:
- if (strncmp(vty_buf_copy, " ", 3)) {
- vty_out(vty, " exit-ldp-if\n");
- vty->node = LDP_IPV4_NODE;
- }
- break;
- case LDP_IPV6_IFACE_NODE:
- if (strncmp(vty_buf_copy, " ", 3)) {
- vty_out(vty, " exit-ldp-if\n");
- vty->node = LDP_IPV6_NODE;
- }
- break;
- case LDP_PSEUDOWIRE_NODE:
- if (strncmp(vty_buf_copy, " ", 2)) {
- vty_out(vty, " exit\n");
- vty->node = LDP_L2VPN_NODE;
- }
- break;
- case SR_CANDIDATE_DYN_NODE:
- if (strncmp(vty_buf_copy, " ", 2)) {
- vty_out(vty, " exit\n");
- vty->node = SR_POLICY_NODE;
- }
- break;
- default:
- break;
- }
-
if (vty_buf_trimmed[0] == '!' || vty_buf_trimmed[0] == '#') {
vty_out(vty, "%s", vty->buf);
continue;
*/
if (ret == CMD_SUCCESS || ret == CMD_SUCCESS_DAEMON
|| ret == CMD_WARNING) {
- if ((prev_node == BGP_VPNV4_NODE
- || prev_node == BGP_VPNV6_NODE
- || prev_node == BGP_IPV4_NODE
- || prev_node == BGP_IPV6_NODE
- || prev_node == BGP_FLOWSPECV4_NODE
- || prev_node == BGP_FLOWSPECV6_NODE
- || prev_node == BGP_IPV4L_NODE
- || prev_node == BGP_IPV6L_NODE
- || prev_node == BGP_IPV4M_NODE
- || prev_node == BGP_IPV6M_NODE
- || prev_node == BGP_EVPN_NODE)
- && (tried == 1)) {
- vty_out(vty, "exit-address-family\n");
- } else if ((prev_node == BGP_EVPN_VNI_NODE)
- && (tried == 1)) {
- vty_out(vty, "exit-vni\n");
- } else if ((prev_node == KEYCHAIN_KEY_NODE)
- && (tried == 1)) {
+ while (tried-- > 0)
vty_out(vty, "exit\n");
- } else if ((prev_node == BFD_PEER_NODE
- || prev_node == BFD_PROFILE_NODE)
- && (tried == 1)) {
- vty_out(vty, "exit\n");
- } else if (((prev_node == SEGMENT_ROUTING_NODE)
- || (prev_node == SR_TRAFFIC_ENG_NODE)
- || (prev_node == SR_SEGMENT_LIST_NODE)
- || (prev_node == SR_POLICY_NODE)
- || (prev_node == SR_CANDIDATE_DYN_NODE)
- || (prev_node == PCEP_NODE)
- || (prev_node == PCEP_PCE_CONFIG_NODE)
- || (prev_node == PCEP_PCE_NODE)
- || (prev_node == PCEP_PCC_NODE))
- && (tried > 0)) {
- ending = (vty->node != SEGMENT_ROUTING_NODE)
- && (vty->node != SR_TRAFFIC_ENG_NODE)
- && (vty->node != SR_SEGMENT_LIST_NODE)
- && (vty->node != SR_POLICY_NODE)
- && (vty->node != SR_CANDIDATE_DYN_NODE)
- && (vty->node != PCEP_NODE)
- && (vty->node != PCEP_PCE_CONFIG_NODE)
- && (vty->node != PCEP_PCE_NODE)
- && (vty->node != PCEP_PCC_NODE);
- if (ending)
- tried--;
- while (tried-- > 0)
- vty_out(vty, "exit\n");
- if (ending)
- vty_out(vty, "end\n");
- } else if (tried) {
- vty_out(vty, "end\n");
- }
}
/*
* If command didn't succeed in any node, continue with return
return CMD_SUCCESS;
}
-DEFSH(VTYSH_ZEBRA, vtysh_vrf_netns_cmd,
- "netns NAME",
- "Attach VRF to a Namespace\n"
- "The file name in " NS_RUN_DIR ", or a full pathname\n")
-
-DEFSH(VTYSH_ZEBRA, vtysh_no_vrf_netns_cmd,
- "no netns [NAME]",
- NO_STR
- "Detach VRF from a Namespace\n"
- "The file name in " NS_RUN_DIR ", or a full pathname\n")
-
DEFUNSH(VTYSH_VRF, vtysh_exit_vrf, vtysh_exit_vrf_cmd, "exit",
"Exit current mode and down to previous mode\n")
{
install_node(&vrf_node);
install_element(CONFIG_NODE, &vtysh_vrf_cmd);
- install_element(VRF_NODE, &vtysh_vrf_netns_cmd);
- install_element(VRF_NODE, &vtysh_no_vrf_netns_cmd);
install_element(VRF_NODE, &exit_vrf_config_cmd);
install_element(VRF_NODE, &vtysh_end_all_cmd);
install_element(VRF_NODE, &vtysh_exit_vrf_cmd);
strlen(" ip igmp query-interval")) == 0) {
config_add_line_uniq_end(config->line, line);
} else if (config->index == LINK_PARAMS_NODE
- && strncmp(line, " exit-link-params",
- strlen(" exit"))
+ && strncmp(line, " exit-link-params",
+ strlen(" exit"))
== 0) {
config_add_line(config->line, line);
config->index = INTERFACE_NODE;
- } else if (config->index == VRF_NODE
- && strncmp(line, " exit-vrf",
- strlen(" exit-vrf"))
- == 0) {
- config_add_line_uniq_end(config->line, line);
} else if (!strncmp(line, " vrrp", strlen(" vrrp"))
|| !strncmp(line, " no vrrp",
strlen(" no vrrp"))) {
config_add_line(config_top, line);
break;
default:
- if (strncmp(line, "interface", strlen("interface")) == 0)
+ if (strncmp(line, "exit", strlen("exit")) == 0) {
+ if (config)
+ config_add_line_uniq_end(config->line, line);
+ } else if (strncmp(line, "interface", strlen("interface")) == 0)
config = config_get(INTERFACE_NODE, line);
else if (strncmp(line, "pseudowire", strlen("pseudowire")) == 0)
config = config_get(PW_NODE, line);
* are not under the VRF node.
*/
if (config->index == INTERFACE_NODE
- && list_isempty(config->line)) {
+ && (listcount(config->line) == 1)
+ && (line = listnode_head(config->line))
+ && strmatch(line, "exit")) {
config_del(config);
continue;
}
#include <sys/file.h>
#include <unistd.h>
+/* readline carries some ancient definitions around */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-prototypes"
#include <readline/readline.h>
#include <readline/history.h>
+#pragma GCC diagnostic pop
/*
* The append_history function only appears in newer versions
"Set BGP administrative distance to use";
}
+ identity set-extcommunity-none {
+ base frr-route-map:rmap-set-type;
+ description
+ "Set BGP extended community attribute";
+ }
+
identity set-extcommunity-rt {
base frr-route-map:rmap-set-type;
description
}
}
+ case extcommunity-none {
+ when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:set-extcommunity-none')";
+ description
+ "Value of the BGP extended community attribute";
+ leaf extcommunity-none {
+ type boolean;
+ description "No extended community attribute";
+ }
+ }
+
case extcommunity-rt {
when "derived-from-or-self(/frr-route-map:lib/frr-route-map:route-map/frr-route-map:entry/frr-route-map:set-action/frr-route-map:action, 'frr-bgp-route-map:set-extcommunity-rt')";
description
static int fpm_lsp_send_cb(struct hash_bucket *bucket, void *arg)
{
- zebra_lsp_t *lsp = bucket->data;
+ struct zebra_lsp *lsp = bucket->data;
struct fpm_lsp_arg *fla = arg;
/* Skip entries which have already been sent */
struct fpm_rmac_arg {
struct zebra_dplane_ctx *ctx;
struct fpm_nl_ctx *fnc;
- zebra_l3vni_t *zl3vni;
+ struct zebra_l3vni *zl3vni;
bool complete;
};
static void fpm_enqueue_rmac_table(struct hash_bucket *bucket, void *arg)
{
struct fpm_rmac_arg *fra = arg;
- zebra_mac_t *zrmac = bucket->data;
+ struct zebra_mac *zrmac = bucket->data;
struct zebra_if *zif = fra->zl3vni->vxlan_if->info;
const struct zebra_l2info_vxlan *vxl = &zif->l2info.vxl;
struct zebra_if *br_zif;
static void fpm_enqueue_l3vni_table(struct hash_bucket *bucket, void *arg)
{
struct fpm_rmac_arg *fra = arg;
- zebra_l3vni_t *zl3vni = bucket->data;
+ struct zebra_l3vni *zl3vni = bucket->data;
fra->zl3vni = zl3vni;
hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni);
*/
static void fpm_lsp_reset_cb(struct hash_bucket *bucket, void *arg)
{
- zebra_lsp_t *lsp = bucket->data;
+ struct zebra_lsp *lsp = bucket->data;
UNSET_FLAG(lsp->flags, LSP_FLAG_FPM);
}
*/
static void fpm_unset_rmac_table(struct hash_bucket *bucket, void *arg)
{
- zebra_mac_t *zrmac = bucket->data;
+ struct zebra_mac *zrmac = bucket->data;
UNSET_FLAG(zrmac->flags, ZEBRA_MAC_FPM_SENT);
}
static void fpm_unset_l3vni_table(struct hash_bucket *bucket, void *arg)
{
- zebra_l3vni_t *zl3vni = bucket->data;
+ struct zebra_l3vni *zl3vni = bucket->data;
hash_iterate(zl3vni->rmac_table, fpm_unset_rmac_table, zl3vni);
}
}
static inline void zebra_if_set_ziftype(struct interface *ifp,
- zebra_iftype_t zif_type,
- zebra_slave_iftype_t zif_slave_type)
+ enum zebra_iftype zif_type,
+ enum zebra_slave_iftype zif_slave_type)
{
struct zebra_if *zif;
}
static void netlink_determine_zebra_iftype(const char *kind,
- zebra_iftype_t *zif_type)
+ enum zebra_iftype *zif_type)
{
*zif_type = ZEBRA_IF_OTHER;
char *slave_kind = NULL;
struct zebra_ns *zns = NULL;
vrf_id_t vrf_id = VRF_DEFAULT;
- zebra_iftype_t zif_type = ZEBRA_IF_OTHER;
- zebra_slave_iftype_t zif_slave_type = ZEBRA_IF_SLAVE_NONE;
+ enum zebra_iftype zif_type = ZEBRA_IF_OTHER;
+ enum zebra_slave_iftype zif_slave_type = ZEBRA_IF_SLAVE_NONE;
ifindex_t bridge_ifindex = IFINDEX_INTERNAL;
ifindex_t link_ifindex = IFINDEX_INTERNAL;
ifindex_t bond_ifindex = IFINDEX_INTERNAL;
char *slave_kind = NULL;
struct zebra_ns *zns;
vrf_id_t vrf_id = VRF_DEFAULT;
- zebra_iftype_t zif_type = ZEBRA_IF_OTHER;
- zebra_slave_iftype_t zif_slave_type = ZEBRA_IF_SLAVE_NONE;
+ enum zebra_iftype zif_type = ZEBRA_IF_OTHER;
+ enum zebra_slave_iftype zif_slave_type = ZEBRA_IF_SLAVE_NONE;
ifindex_t bridge_ifindex = IFINDEX_INTERNAL;
ifindex_t bond_ifindex = IFINDEX_INTERNAL;
ifindex_t link_ifindex = IFINDEX_INTERNAL;
vty_out(vty, " %s %pFX\n", prefix_family_str(p), p);
}
-static const char *zebra_zifslavetype_2str(zebra_slave_iftype_t zif_slave_type)
+static const char *
+zebra_zifslavetype_2str(enum zebra_slave_iftype zif_slave_type)
{
switch (zif_slave_type) {
case ZEBRA_IF_SLAVE_BRIDGE:
return "None";
}
-static const char *zebra_ziftype_2str(zebra_iftype_t zif_type)
+static const char *zebra_ziftype_2str(enum zebra_iftype zif_type)
{
switch (zif_type) {
case ZEBRA_IF_OTHER:
if (IS_PARAM_SET(iflp, LP_RMT_AS))
vty_out(vty, " neighbor %pI4 as %u\n", &iflp->rmt_ip,
iflp->rmt_as);
- vty_out(vty, " exit-link-params\n");
+ vty_out(vty, " exit-link-params\n");
return 0;
}
zebra_evpn_mh_if_write(vty, ifp);
link_params_config_write(vty, ifp);
- vty_endframe(vty, "!\n");
+ vty_endframe(vty, "exit\n!\n");
}
return 0;
}
#endif /* HAVE_RTADV */
/* Zebra interface type - ones of interest. */
-typedef enum {
+enum zebra_iftype {
ZEBRA_IF_OTHER = 0, /* Anything else */
ZEBRA_IF_VXLAN, /* VxLAN interface */
ZEBRA_IF_VRF, /* VRF device */
ZEBRA_IF_BOND, /* Bond */
ZEBRA_IF_BOND_SLAVE, /* Bond */
ZEBRA_IF_GRE, /* GRE interface */
-} zebra_iftype_t;
+};
/* Zebra "slave" interface type */
-typedef enum {
+enum zebra_slave_iftype {
ZEBRA_IF_SLAVE_NONE, /* Not a slave */
ZEBRA_IF_SLAVE_VRF, /* Member of a VRF */
ZEBRA_IF_SLAVE_BRIDGE, /* Member of a bridge */
ZEBRA_IF_SLAVE_BOND, /* Bond member */
ZEBRA_IF_SLAVE_OTHER, /* Something else - e.g., bond slave */
-} zebra_slave_iftype_t;
+};
struct irdp_interface;
uint8_t ptm_enable;
/* Zebra interface and "slave" interface type */
- zebra_iftype_t zif_type;
- zebra_slave_iftype_t zif_slave_type;
+ enum zebra_iftype zif_type;
+ enum zebra_slave_iftype zif_slave_type;
/* Additional L2 info, depends on zif_type */
union zebra_l2if_info l2info;
#endif /* HAVE_NETLINK */
default:
frr_help_exit(1);
- break;
}
}
static int router_id_set(afi_t afi, struct prefix *p, struct zebra_vrf *zvrf)
{
- struct prefix p2;
+ struct prefix after, before;
struct listnode *node;
struct zserv *client;
+ router_id_get(afi, &before, zvrf);
+
switch (afi) {
case AFI_IP:
zvrf->rid_user_assigned.u.prefix4.s_addr = p->u.prefix4.s_addr;
return -1;
}
- router_id_get(afi, &p2, zvrf);
+ router_id_get(afi, &after, zvrf);
+
+ /*
+ * If we've been told that the router-id is exactly the same
+ * do we need to really do anything here?
+ */
+ if (prefix_same(&before, &after))
+ return 0;
for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client))
- zsend_router_id_update(client, afi, &p2, zvrf->vrf->vrf_id);
+ zsend_router_id_update(client, afi, &after, zvrf->vrf->vrf_id);
return 0;
}
return true;
}
-static inline bool _netlink_mpls_build_singlepath(const struct prefix *p,
- const char *routedesc,
- const zebra_nhlfe_t *nhlfe,
- struct nlmsghdr *nlmsg,
- struct rtmsg *rtmsg,
- size_t req_size, int cmd)
+static inline bool
+_netlink_mpls_build_singlepath(const struct prefix *p, const char *routedesc,
+ const struct zebra_nhlfe *nhlfe,
+ struct nlmsghdr *nlmsg, struct rtmsg *rtmsg,
+ size_t req_size, int cmd)
{
int bytelen;
uint8_t family;
static inline bool
_netlink_mpls_build_multipath(const struct prefix *p, const char *routedesc,
- const zebra_nhlfe_t *nhlfe,
+ const struct zebra_nhlfe *nhlfe,
struct nlmsghdr *nlmsg, size_t req_size,
struct rtmsg *rtmsg, const union g_addr **src)
{
#define NUD_LOCAL_ACTIVE \
(NUD_PERMANENT | NUD_NOARP | NUD_REACHABLE)
+static int netlink_nbr_entry_state_to_zclient(int nbr_state)
+{
+ /* an exact match is done between
+ * - netlink neighbor state values: NDM_XXX (see in linux/neighbour.h)
+ * - zclient neighbor state values: ZEBRA_NEIGH_STATE_XXX
+ * (see in lib/zclient.h)
+ */
+ return nbr_state;
+}
static int netlink_ipneigh_change(struct nlmsghdr *h, int len, ns_id_t ns_id)
{
struct ndmsg *ndm;
&mac, l2_len);
} else
sockunion_family(&link_layer_ipv4) = AF_UNSPEC;
- zsend_nhrp_neighbor_notify(cmd, ifp, &ip, ndm->ndm_state,
- &link_layer_ipv4);
+ zsend_nhrp_neighbor_notify(
+ cmd, ifp, &ip,
+ netlink_nbr_entry_state_to_zclient(ndm->ndm_state),
+ &link_layer_ipv4);
}
if (h->nlmsg_type == RTM_GETNEIGH)
{
mpls_lse_t lse;
const struct nhlfe_list_head *head;
- const zebra_nhlfe_t *nhlfe;
+ const struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop = NULL;
unsigned int nexthop_num;
const char *routedesc;
zebra/zebra_routemap.c \
zebra/zebra_vty.c \
zebra/zserv.c \
+ zebra/zebra_vrf.c \
# end
# can be loaded as DSO - always include for vtysh
zebra/zebra_routemap.c \
zebra/zebra_vty.c \
zebra/zebra_srv6_vty.c \
+ zebra/zebra_vrf.c \
# end
noinst_HEADERS += \
continue;
s = stream_new(ZEBRA_MAX_PACKET_SIZ);
- zclient_neigh_ip_encode(s, cmd, &ip, link_layer_ipv4, ifp);
+ zclient_neigh_ip_encode(s, cmd, &ip, link_layer_ipv4, ifp,
+ ndm_state);
stream_putw_at(s, 0, stream_get_endp(s));
zserv_send_message(client, s);
}
zlog_debug("%s: nh blackhole %d",
__func__, api_nh->bh_type);
- nexthop = nexthop_from_blackhole(api_nh->bh_type);
+ nexthop =
+ nexthop_from_blackhole(api_nh->bh_type, api_nh->vrf_id);
break;
}
static void zread_router_id_add(ZAPI_HANDLER_ARGS)
{
afi_t afi;
-
struct prefix p;
+ struct prefix zero;
STREAM_GETW(msg, afi);
router_id_get(afi, &p, zvrf);
+ /*
+ * If we have not officially setup a router-id let's not
+ * tell the upper level protocol about it yet.
+ */
+ memset(&zero, 0, sizeof(zero));
+ if ((p.family == AF_INET && p.u.prefix4.s_addr == INADDR_ANY)
+ || (p.family == AF_INET6
+ && memcmp(&p.u.prefix6, &zero.u.prefix6,
+ sizeof(struct in6_addr))
+ == 0))
+ return;
+
zsend_router_id_update(client, afi, &p, zvrf_id(zvrf));
stream_failure:
/* Support info for different kinds of updates */
union {
struct dplane_route_info rinfo;
- zebra_lsp_t lsp;
+ struct zebra_lsp lsp;
struct dplane_pw_info pw;
struct dplane_br_port_info br_port;
struct dplane_intf_info intf;
static int dplane_thread_loop(struct thread *event);
static void dplane_info_from_zns(struct zebra_dplane_info *ns_info,
struct zebra_ns *zns);
-static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
+static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
enum dplane_op_e op);
static enum zebra_dplane_result pw_update_internal(struct zebra_pw *pw,
enum dplane_op_e op);
case DPLANE_OP_LSP_DELETE:
case DPLANE_OP_LSP_NOTIFY:
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
/* Unlink and free allocated NHLFEs */
frr_each_safe(nhlfe_list, &ctx->u.lsp.nhlfe_list, nhlfe) {
return &(ctx->u.lsp.backup_nhlfe_list);
}
-zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t nh_type,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- mpls_label_t *out_labels)
+struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type,
+ const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels,
+ mpls_label_t *out_labels)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
DPLANE_CTX_VALID(ctx);
return nhlfe;
}
-zebra_nhlfe_t *dplane_ctx_add_backup_nhlfe(struct zebra_dplane_ctx *ctx,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t nh_type,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- mpls_label_t *out_labels)
+struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
+ struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type, const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
DPLANE_CTX_VALID(ctx);
return nhlfe;
}
-const zebra_nhlfe_t *
+const struct zebra_nhlfe *
dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx)
{
DPLANE_CTX_VALID(ctx);
return ctx->u.lsp.best_nhlfe;
}
-const zebra_nhlfe_t *
+const struct zebra_nhlfe *
dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
- zebra_nhlfe_t *nhlfe)
+ struct zebra_nhlfe *nhlfe)
{
DPLANE_CTX_VALID(ctx);
struct zebra_ns *zns;
struct zebra_vrf *zvrf;
struct nexthop *nexthop;
- zebra_l3vni_t *zl3vni;
+ struct zebra_l3vni *zl3vni;
const struct interface *ifp;
struct dplane_intf_extra *if_extra;
* Capture information for an LSP update in a dplane context.
*/
int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
- zebra_lsp_t *lsp)
+ struct zebra_lsp *lsp)
{
int ret = AOK;
- zebra_nhlfe_t *nhlfe, *new_nhlfe;
+ struct zebra_nhlfe *nhlfe, *new_nhlfe;
ctx->zd_op = op;
ctx->zd_status = ZEBRA_DPLANE_REQUEST_SUCCESS;
/*
* Enqueue LSP add for the dataplane.
*/
-enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp)
+enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp)
{
enum zebra_dplane_result ret =
lsp_update_internal(lsp, DPLANE_OP_LSP_INSTALL);
/*
* Enqueue LSP update for the dataplane.
*/
-enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp)
+enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp)
{
enum zebra_dplane_result ret =
lsp_update_internal(lsp, DPLANE_OP_LSP_UPDATE);
/*
* Enqueue LSP delete for the dataplane.
*/
-enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp)
+enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp)
{
enum zebra_dplane_result ret =
lsp_update_internal(lsp, DPLANE_OP_LSP_DELETE);
/* Update or un-install resulting from an async notification */
enum zebra_dplane_result
-dplane_lsp_notif_update(zebra_lsp_t *lsp,
- enum dplane_op_e op,
+dplane_lsp_notif_update(struct zebra_lsp *lsp, enum dplane_op_e op,
struct zebra_dplane_ctx *notif_ctx)
{
enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
int ret = EINVAL;
struct zebra_dplane_ctx *ctx = NULL;
struct nhlfe_list_head *head;
- zebra_nhlfe_t *nhlfe, *new_nhlfe;
+ struct zebra_nhlfe *nhlfe, *new_nhlfe;
/* Obtain context block */
ctx = dplane_ctx_alloc();
/*
* Common internal LSP update utility
*/
-static enum zebra_dplane_result lsp_update_internal(zebra_lsp_t *lsp,
+static enum zebra_dplane_result lsp_update_internal(struct zebra_lsp *lsp,
enum dplane_op_e op)
{
enum zebra_dplane_result result = ZEBRA_DPLANE_REQUEST_FAILURE;
* context data area.
*/
int dplane_ctx_lsp_init(struct zebra_dplane_ctx *ctx, enum dplane_op_e op,
- zebra_lsp_t *lsp);
+ struct zebra_lsp *lsp);
mpls_label_t dplane_ctx_get_in_label(const struct zebra_dplane_ctx *ctx);
void dplane_ctx_set_in_label(struct zebra_dplane_ctx *ctx,
const struct nhlfe_list_head *dplane_ctx_get_backup_nhlfe_list(
const struct zebra_dplane_ctx *ctx);
-zebra_nhlfe_t *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t nh_type,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- mpls_label_t *out_labels);
-
-zebra_nhlfe_t *dplane_ctx_add_backup_nhlfe(struct zebra_dplane_ctx *ctx,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t nh_type,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- mpls_label_t *out_labels);
-
-const zebra_nhlfe_t *dplane_ctx_get_best_nhlfe(
- const struct zebra_dplane_ctx *ctx);
-const zebra_nhlfe_t *dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
- zebra_nhlfe_t *nhlfe);
+struct zebra_nhlfe *dplane_ctx_add_nhlfe(struct zebra_dplane_ctx *ctx,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type,
+ const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels,
+ mpls_label_t *out_labels);
+
+struct zebra_nhlfe *dplane_ctx_add_backup_nhlfe(
+ struct zebra_dplane_ctx *ctx, enum lsp_types_t lsp_type,
+ enum nexthop_types_t nh_type, const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels, mpls_label_t *out_labels);
+
+const struct zebra_nhlfe *
+dplane_ctx_get_best_nhlfe(const struct zebra_dplane_ctx *ctx);
+const struct zebra_nhlfe *
+dplane_ctx_set_best_nhlfe(struct zebra_dplane_ctx *ctx,
+ struct zebra_nhlfe *nhlfe);
uint32_t dplane_ctx_get_lsp_num_ecmp(const struct zebra_dplane_ctx *ctx);
/* Accessors for pseudowire information */
/*
* Enqueue LSP change operations for the dataplane.
*/
-enum zebra_dplane_result dplane_lsp_add(zebra_lsp_t *lsp);
-enum zebra_dplane_result dplane_lsp_update(zebra_lsp_t *lsp);
-enum zebra_dplane_result dplane_lsp_delete(zebra_lsp_t *lsp);
+enum zebra_dplane_result dplane_lsp_add(struct zebra_lsp *lsp);
+enum zebra_dplane_result dplane_lsp_update(struct zebra_lsp *lsp);
+enum zebra_dplane_result dplane_lsp_delete(struct zebra_lsp *lsp);
/* Update or un-install resulting from an async notification */
-enum zebra_dplane_result dplane_lsp_notif_update(zebra_lsp_t *lsp,
+enum zebra_dplane_result dplane_lsp_notif_update(struct zebra_lsp *lsp,
enum dplane_op_e op,
struct zebra_dplane_ctx *ctx);
{0}
};
-int advertise_gw_macip_enabled(zebra_evpn_t *zevpn)
+int advertise_gw_macip_enabled(struct zebra_evpn *zevpn)
{
struct zebra_vrf *zvrf;
return 0;
}
-int advertise_svi_macip_enabled(zebra_evpn_t *zevpn)
+int advertise_svi_macip_enabled(struct zebra_evpn *zevpn)
{
struct zebra_vrf *zvrf;
/*
* Print a specific EVPN entry.
*/
-void zebra_evpn_print(zebra_evpn_t *zevpn, void **ctxt)
+void zebra_evpn_print(struct zebra_evpn *zevpn, void **ctxt)
{
struct vty *vty;
- zebra_vtep_t *zvtep;
+ struct zebra_vtep *zvtep;
uint32_t num_macs;
uint32_t num_neigh;
json_object *json = NULL;
void zebra_evpn_print_hash(struct hash_bucket *bucket, void *ctxt[])
{
struct vty *vty;
- zebra_evpn_t *zevpn;
- zebra_vtep_t *zvtep;
+ struct zebra_evpn *zevpn;
+ struct zebra_vtep *zvtep;
uint32_t num_vteps = 0;
uint32_t num_macs = 0;
uint32_t num_neigh = 0;
vty = ctxt[0];
json = ctxt[1];
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
zvtep = zevpn->vteps;
while (zvtep) {
void zebra_evpn_print_hash_detail(struct hash_bucket *bucket, void *data)
{
struct vty *vty;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
json_object *json_array = NULL;
bool use_json = false;
struct zebra_evpn_show *zes = data;
json_array = zes->json;
use_json = zes->use_json;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
zebra_vxlan_print_vni(vty, zes->zvrf, zevpn->vni, use_json, json_array);
vty_out(vty, "\n");
}
-int zebra_evpn_del_macip_for_intf(struct interface *ifp, zebra_evpn_t *zevpn)
+int zebra_evpn_del_macip_for_intf(struct interface *ifp,
+ struct zebra_evpn *zevpn)
{
struct listnode *cnode = NULL, *cnnode = NULL;
struct connected *c = NULL;
return 0;
}
-int zebra_evpn_add_macip_for_intf(struct interface *ifp, zebra_evpn_t *zevpn)
+int zebra_evpn_add_macip_for_intf(struct interface *ifp,
+ struct zebra_evpn *zevpn)
{
struct listnode *cnode = NULL, *cnnode = NULL;
struct connected *c = NULL;
return zserv_send_message(client, s);
}
-int zebra_evpn_advertise_subnet(zebra_evpn_t *zevpn, struct interface *ifp,
+int zebra_evpn_advertise_subnet(struct zebra_evpn *zevpn, struct interface *ifp,
int advertise)
{
struct listnode *cnode = NULL, *cnnode = NULL;
/*
* zebra_evpn_gw_macip_add_to_client
*/
-int zebra_evpn_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn,
+int zebra_evpn_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
struct ethaddr *macaddr, struct ipaddr *ip)
{
- zebra_mac_t *mac = NULL;
+ struct zebra_mac *mac = NULL;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan *vxl = NULL;
/*
* zebra_evpn_gw_macip_del_from_client
*/
-int zebra_evpn_gw_macip_del(struct interface *ifp, zebra_evpn_t *zevpn,
+int zebra_evpn_gw_macip_del(struct interface *ifp, struct zebra_evpn *zevpn,
struct ipaddr *ip)
{
- zebra_neigh_t *n = NULL;
- zebra_mac_t *mac = NULL;
+ struct zebra_neigh *n = NULL;
+ struct zebra_mac *mac = NULL;
/* If the neigh entry is not present nothing to do*/
n = zebra_evpn_neigh_lookup(zevpn, ip);
void zebra_evpn_gw_macip_del_for_evpn_hash(struct hash_bucket *bucket,
void *ctxt)
{
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan zl2_info;
struct interface *vlan_if = NULL;
struct interface *ifp;
/* Add primary SVI MAC*/
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
/* Global (Zvrf) advertise-default-gw is disabled,
* but zevpn advertise-default-gw is enabled
void zebra_evpn_gw_macip_add_for_evpn_hash(struct hash_bucket *bucket,
void *ctxt)
{
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan zl2_info;
struct interface *vlan_if = NULL;
struct interface *vrr_if = NULL;
struct interface *ifp = NULL;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
ifp = zevpn->vxlan_if;
if (!ifp)
void zebra_evpn_svi_macip_del_for_evpn_hash(struct hash_bucket *bucket,
void *ctxt)
{
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan zl2_info;
struct interface *vlan_if = NULL;
struct interface *ifp;
/* Add primary SVI MAC*/
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
if (!zevpn)
return;
struct zebra_ns *zns = ns->info;
struct route_node *rn;
struct interface *br_if;
- zebra_evpn_t **p_zevpn = (zebra_evpn_t **)_p_zevpn;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn **p_zevpn = (struct zebra_evpn **)_p_zevpn;
+ struct zebra_evpn *zevpn;
struct interface *tmp_if = NULL;
struct zebra_if *zif;
struct zebra_l2info_vxlan *vxl = NULL;
* Map port or (port, VLAN) to an EVPN. This is invoked upon getting MAC
* notifications, to see if they are of interest.
*/
-zebra_evpn_t *zebra_evpn_map_vlan(struct interface *ifp,
- struct interface *br_if, vlanid_t vid)
+struct zebra_evpn *zebra_evpn_map_vlan(struct interface *ifp,
+ struct interface *br_if, vlanid_t vid)
{
struct zebra_if *zif;
struct zebra_l2info_bridge *br;
- zebra_evpn_t **p_zevpn;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn **p_zevpn;
+ struct zebra_evpn *zevpn = NULL;
struct zebra_from_svi_param in_param;
/* Determine if bridge is VLAN-aware or not */
struct zebra_ns *zns = ns->info;
struct route_node *rn;
struct interface *br_if;
- zebra_evpn_t **p_zevpn = (zebra_evpn_t **)_p_zevpn;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn **p_zevpn = (struct zebra_evpn **)_p_zevpn;
+ struct zebra_evpn *zevpn;
struct interface *tmp_if = NULL;
struct zebra_if *zif;
struct zebra_l2info_vxlan *vxl = NULL;
* Map SVI and associated bridge to an EVPN. This is invoked upon getting
* neighbor notifications, to see if they are of interest.
*/
-zebra_evpn_t *zebra_evpn_from_svi(struct interface *ifp,
- struct interface *br_if)
+struct zebra_evpn *zebra_evpn_from_svi(struct interface *ifp,
+ struct interface *br_if)
{
struct zebra_l2info_bridge *br;
- zebra_evpn_t *zevpn = NULL;
- zebra_evpn_t **p_zevpn;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_evpn **p_zevpn;
struct zebra_if *zif;
struct zebra_from_svi_param in_param;
*/
void zebra_evpn_install_mac_hash(struct hash_bucket *bucket, void *ctxt)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
struct mac_walk_ctx *wctx = ctxt;
- mac = (zebra_mac_t *)bucket->data;
+ mac = (struct zebra_mac *)bucket->data;
if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE))
zebra_evpn_rem_mac_install(wctx->zevpn, mac, false);
/*
* Read and populate local MACs and neighbors corresponding to this EVPN.
*/
-void zebra_evpn_read_mac_neigh(zebra_evpn_t *zevpn, struct interface *ifp)
+void zebra_evpn_read_mac_neigh(struct zebra_evpn *zevpn, struct interface *ifp)
{
struct zebra_ns *zns;
struct zebra_vrf *zvrf;
*/
unsigned int zebra_evpn_hash_keymake(const void *p)
{
- const zebra_evpn_t *zevpn = p;
+ const struct zebra_evpn *zevpn = p;
return (jhash_1word(zevpn->vni, 0));
}
*/
bool zebra_evpn_hash_cmp(const void *p1, const void *p2)
{
- const zebra_evpn_t *zevpn1 = p1;
- const zebra_evpn_t *zevpn2 = p2;
+ const struct zebra_evpn *zevpn1 = p1;
+ const struct zebra_evpn *zevpn2 = p2;
return (zevpn1->vni == zevpn2->vni);
}
int zebra_evpn_list_cmp(void *p1, void *p2)
{
- const zebra_evpn_t *zevpn1 = p1;
- const zebra_evpn_t *zevpn2 = p2;
+ const struct zebra_evpn *zevpn1 = p1;
+ const struct zebra_evpn *zevpn2 = p2;
if (zevpn1->vni == zevpn2->vni)
return 0;
*/
void *zebra_evpn_alloc(void *p)
{
- const zebra_evpn_t *tmp_vni = p;
- zebra_evpn_t *zevpn;
+ const struct zebra_evpn *tmp_vni = p;
+ struct zebra_evpn *zevpn;
- zevpn = XCALLOC(MTYPE_ZEVPN, sizeof(zebra_evpn_t));
+ zevpn = XCALLOC(MTYPE_ZEVPN, sizeof(struct zebra_evpn));
zevpn->vni = tmp_vni->vni;
return ((void *)zevpn);
}
/*
* Look up EVPN hash entry.
*/
-zebra_evpn_t *zebra_evpn_lookup(vni_t vni)
+struct zebra_evpn *zebra_evpn_lookup(vni_t vni)
{
struct zebra_vrf *zvrf;
- zebra_evpn_t tmp_vni;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn tmp_vni;
+ struct zebra_evpn *zevpn = NULL;
zvrf = zebra_vrf_get_evpn();
assert(zvrf);
- memset(&tmp_vni, 0, sizeof(zebra_evpn_t));
+ memset(&tmp_vni, 0, sizeof(struct zebra_evpn));
tmp_vni.vni = vni;
zevpn = hash_lookup(zvrf->evpn_table, &tmp_vni);
/*
* Add EVPN hash entry.
*/
-zebra_evpn_t *zebra_evpn_add(vni_t vni)
+struct zebra_evpn *zebra_evpn_add(vni_t vni)
{
char buffer[80];
struct zebra_vrf *zvrf;
- zebra_evpn_t tmp_zevpn;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn tmp_zevpn;
+ struct zebra_evpn *zevpn = NULL;
zvrf = zebra_vrf_get_evpn();
assert(zvrf);
- memset(&tmp_zevpn, 0, sizeof(zebra_evpn_t));
+ memset(&tmp_zevpn, 0, sizeof(struct zebra_evpn));
tmp_zevpn.vni = vni;
zevpn = hash_get(zvrf->evpn_table, &tmp_zevpn, zebra_evpn_alloc);
assert(zevpn);
/*
* Delete EVPN hash entry.
*/
-int zebra_evpn_del(zebra_evpn_t *zevpn)
+int zebra_evpn_del(struct zebra_evpn *zevpn)
{
struct zebra_vrf *zvrf;
- zebra_evpn_t *tmp_zevpn;
+ struct zebra_evpn *tmp_zevpn;
zvrf = zebra_vrf_get_evpn();
assert(zvrf);
/*
* Inform BGP about local EVPN addition.
*/
-int zebra_evpn_send_add_to_client(zebra_evpn_t *zevpn)
+int zebra_evpn_send_add_to_client(struct zebra_evpn *zevpn)
{
struct zserv *client;
struct stream *s;
/*
* Inform BGP about local EVPN deletion.
*/
-int zebra_evpn_send_del_to_client(zebra_evpn_t *zevpn)
+int zebra_evpn_send_del_to_client(struct zebra_evpn *zevpn)
{
struct zserv *client;
struct stream *s;
/*
* See if remote VTEP matches with prefix.
*/
-static int zebra_evpn_vtep_match(struct in_addr *vtep_ip, zebra_vtep_t *zvtep)
+static int zebra_evpn_vtep_match(struct in_addr *vtep_ip,
+ struct zebra_vtep *zvtep)
{
return (IPV4_ADDR_SAME(vtep_ip, &zvtep->vtep_ip));
}
/*
* Locate remote VTEP in EVPN hash table.
*/
-zebra_vtep_t *zebra_evpn_vtep_find(zebra_evpn_t *zevpn, struct in_addr *vtep_ip)
+struct zebra_vtep *zebra_evpn_vtep_find(struct zebra_evpn *zevpn,
+ struct in_addr *vtep_ip)
{
- zebra_vtep_t *zvtep;
+ struct zebra_vtep *zvtep;
if (!zevpn)
return NULL;
/*
* Add remote VTEP to EVPN hash table.
*/
-zebra_vtep_t *zebra_evpn_vtep_add(zebra_evpn_t *zevpn, struct in_addr *vtep_ip,
- int flood_control)
+struct zebra_vtep *zebra_evpn_vtep_add(struct zebra_evpn *zevpn,
+ struct in_addr *vtep_ip,
+ int flood_control)
{
- zebra_vtep_t *zvtep;
+ struct zebra_vtep *zvtep;
- zvtep = XCALLOC(MTYPE_ZEVPN_VTEP, sizeof(zebra_vtep_t));
+ zvtep = XCALLOC(MTYPE_ZEVPN_VTEP, sizeof(struct zebra_vtep));
zvtep->vtep_ip = *vtep_ip;
zvtep->flood_control = flood_control;
/*
* Remove remote VTEP from EVPN hash table.
*/
-int zebra_evpn_vtep_del(zebra_evpn_t *zevpn, zebra_vtep_t *zvtep)
+int zebra_evpn_vtep_del(struct zebra_evpn *zevpn, struct zebra_vtep *zvtep)
{
if (zvtep->next)
zvtep->next->prev = zvtep->prev;
* Delete all remote VTEPs for this EVPN (upon VNI delete). Also
* uninstall from kernel if asked to.
*/
-int zebra_evpn_vtep_del_all(zebra_evpn_t *zevpn, int uninstall)
+int zebra_evpn_vtep_del_all(struct zebra_evpn *zevpn, int uninstall)
{
- zebra_vtep_t *zvtep, *zvtep_next;
+ struct zebra_vtep *zvtep, *zvtep_next;
if (!zevpn)
return -1;
* Install remote VTEP into the kernel if the remote VTEP has asked
* for head-end-replication.
*/
-int zebra_evpn_vtep_install(zebra_evpn_t *zevpn, zebra_vtep_t *zvtep)
+int zebra_evpn_vtep_install(struct zebra_evpn *zevpn, struct zebra_vtep *zvtep)
{
if (is_vxlan_flooding_head_end() &&
(zvtep->flood_control == VXLAN_FLOOD_HEAD_END_REPL)) {
/*
* Uninstall remote VTEP from the kernel.
*/
-int zebra_evpn_vtep_uninstall(zebra_evpn_t *zevpn, struct in_addr *vtep_ip)
+int zebra_evpn_vtep_uninstall(struct zebra_evpn *zevpn, struct in_addr *vtep_ip)
{
if (!zevpn->vxlan_if) {
zlog_debug("VNI %u hash %p couldn't be uninstalled - no intf",
void zebra_evpn_handle_flooding_remote_vteps(struct hash_bucket *bucket,
void *zvrf)
{
- zebra_evpn_t *zevpn;
- zebra_vtep_t *zvtep;
+ struct zebra_evpn *zevpn;
+ struct zebra_vtep *zvtep;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
if (!zevpn)
return;
*/
void zebra_evpn_cleanup_all(struct hash_bucket *bucket, void *arg)
{
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
/* Free up all neighbors and MACs, if any. */
zebra_evpn_neigh_del_all(zevpn, 1, 0, DEL_ALL_NEIGH);
zebra_evpn_del(zevpn);
}
-static void zebra_evpn_process_sync_macip_add(zebra_evpn_t *zevpn,
+static void zebra_evpn_process_sync_macip_add(struct zebra_evpn *zevpn,
const struct ethaddr *macaddr,
uint16_t ipa_len,
const struct ipaddr *ipaddr,
char ipbuf[INET6_ADDRSTRLEN];
bool sticky;
bool remote_gw;
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
sticky = !!CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_STICKY);
remote_gw = !!CHECK_FLAG(flags, ZEBRA_MACIP_TYPE_GW);
uint8_t flags, uint32_t seq,
struct in_addr vtep_ip, const esi_t *esi)
{
- zebra_evpn_t *zevpn;
- zebra_vtep_t *zvtep;
- zebra_mac_t *mac = NULL;
+ struct zebra_evpn *zevpn;
+ struct zebra_vtep *zvtep;
+ struct zebra_mac *mac = NULL;
struct interface *ifp = NULL;
struct zebra_if *zif = NULL;
struct zebra_vrf *zvrf;
uint16_t ipa_len, const struct ipaddr *ipaddr,
struct in_addr vtep_ip)
{
- zebra_evpn_t *zevpn;
- zebra_mac_t *mac = NULL;
- zebra_neigh_t *n = NULL;
+ struct zebra_evpn *zevpn;
+ struct zebra_mac *mac = NULL;
+ struct zebra_neigh *n = NULL;
struct interface *ifp = NULL;
struct zebra_if *zif = NULL;
struct zebra_ns *zns;
/************************** EVPN BGP config management ************************/
void zebra_evpn_cfg_cleanup(struct hash_bucket *bucket, void *ctxt)
{
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
zevpn->advertise_gw_macip = 0;
zevpn->advertise_svi_macip = 0;
zevpn->advertise_subnet = 0;
extern "C" {
#endif
-typedef struct zebra_evpn_t_ zebra_evpn_t;
-typedef struct zebra_vtep_t_ zebra_vtep_t;
-
RB_HEAD(zebra_es_evi_rb_head, zebra_evpn_es_evi);
RB_PROTOTYPE(zebra_es_evi_rb_head, zebra_evpn_es_evi, rb_node,
zebra_es_evi_rb_cmp);
*
* Right now, this just has each remote VTEP's IP address.
*/
-struct zebra_vtep_t_ {
+struct zebra_vtep {
/* Remote IP. */
/* NOTE: Can only be IPv4 right now. */
struct in_addr vtep_ip;
int flood_control;
/* Links. */
- struct zebra_vtep_t_ *next;
- struct zebra_vtep_t_ *prev;
+ struct zebra_vtep *next;
+ struct zebra_vtep *prev;
};
/*
* Contains information pertaining to a VNI:
* - the list of remote VTEPs (with this VNI)
*/
-struct zebra_evpn_t_ {
+struct zebra_evpn {
/* VNI - key */
vni_t vni;
struct interface *svi_if;
/* List of remote VTEPs */
- zebra_vtep_t *vteps;
+ struct zebra_vtep *vteps;
/* Local IP */
struct in_addr local_vtep_ip;
struct interface *zvni_map_to_svi(vlanid_t vid, struct interface *br_if);
-static inline struct interface *zevpn_map_to_svi(zebra_evpn_t *zevpn)
+static inline struct interface *zevpn_map_to_svi(struct zebra_evpn *zevpn)
{
struct interface *ifp;
struct zebra_if *zif = NULL;
return zvni_map_to_svi(zl2_info.access_vlan, zif->brslave_info.br_if);
}
-int advertise_gw_macip_enabled(zebra_evpn_t *zevpn);
-int advertise_svi_macip_enabled(zebra_evpn_t *zevpn);
-void zebra_evpn_print(zebra_evpn_t *zevpn, void **ctxt);
+int advertise_gw_macip_enabled(struct zebra_evpn *zevpn);
+int advertise_svi_macip_enabled(struct zebra_evpn *zevpn);
+void zebra_evpn_print(struct zebra_evpn *zevpn, void **ctxt);
void zebra_evpn_print_hash(struct hash_bucket *bucket, void *ctxt[]);
void zebra_evpn_print_hash_detail(struct hash_bucket *bucket, void *data);
-int zebra_evpn_add_macip_for_intf(struct interface *ifp, zebra_evpn_t *zevpn);
-int zebra_evpn_del_macip_for_intf(struct interface *ifp, zebra_evpn_t *zevpn);
-int zebra_evpn_advertise_subnet(zebra_evpn_t *zevpn, struct interface *ifp,
+int zebra_evpn_add_macip_for_intf(struct interface *ifp,
+ struct zebra_evpn *zevpn);
+int zebra_evpn_del_macip_for_intf(struct interface *ifp,
+ struct zebra_evpn *zevpn);
+int zebra_evpn_advertise_subnet(struct zebra_evpn *zevpn, struct interface *ifp,
int advertise);
-int zebra_evpn_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn,
+int zebra_evpn_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
struct ethaddr *macaddr, struct ipaddr *ip);
-int zebra_evpn_gw_macip_del(struct interface *ifp, zebra_evpn_t *zevpn,
+int zebra_evpn_gw_macip_del(struct interface *ifp, struct zebra_evpn *zevpn,
struct ipaddr *ip);
void zebra_evpn_gw_macip_del_for_evpn_hash(struct hash_bucket *bucket,
void *ctxt);
void *ctxt);
void zebra_evpn_svi_macip_del_for_evpn_hash(struct hash_bucket *bucket,
void *ctxt);
-zebra_evpn_t *zebra_evpn_map_vlan(struct interface *ifp,
- struct interface *br_if, vlanid_t vid);
-zebra_evpn_t *zebra_evpn_from_svi(struct interface *ifp,
- struct interface *br_if);
+struct zebra_evpn *zebra_evpn_map_vlan(struct interface *ifp,
+ struct interface *br_if, vlanid_t vid);
+struct zebra_evpn *zebra_evpn_from_svi(struct interface *ifp,
+ struct interface *br_if);
struct interface *zebra_evpn_map_to_macvlan(struct interface *br_if,
struct interface *svi_if);
void zebra_evpn_install_mac_hash(struct hash_bucket *bucket, void *ctxt);
-void zebra_evpn_read_mac_neigh(zebra_evpn_t *zevpn, struct interface *ifp);
+void zebra_evpn_read_mac_neigh(struct zebra_evpn *zevpn, struct interface *ifp);
unsigned int zebra_evpn_hash_keymake(const void *p);
bool zebra_evpn_hash_cmp(const void *p1, const void *p2);
int zebra_evpn_list_cmp(void *p1, void *p2);
void *zebra_evpn_alloc(void *p);
-zebra_evpn_t *zebra_evpn_lookup(vni_t vni);
-zebra_evpn_t *zebra_evpn_add(vni_t vni);
-int zebra_evpn_del(zebra_evpn_t *zevpn);
-int zebra_evpn_send_add_to_client(zebra_evpn_t *zevpn);
-int zebra_evpn_send_del_to_client(zebra_evpn_t *zevpn);
-zebra_vtep_t *zebra_evpn_vtep_find(zebra_evpn_t *zevpn,
- struct in_addr *vtep_ip);
-zebra_vtep_t *zebra_evpn_vtep_add(zebra_evpn_t *zevpn, struct in_addr *vtep_ip,
- int flood_control);
-int zebra_evpn_vtep_del(zebra_evpn_t *zevpn, zebra_vtep_t *zvtep);
-int zebra_evpn_vtep_del_all(zebra_evpn_t *zevpn, int uninstall);
-int zebra_evpn_vtep_install(zebra_evpn_t *zevpn, zebra_vtep_t *zvtep);
-int zebra_evpn_vtep_uninstall(zebra_evpn_t *zevpn, struct in_addr *vtep_ip);
+struct zebra_evpn *zebra_evpn_lookup(vni_t vni);
+struct zebra_evpn *zebra_evpn_add(vni_t vni);
+int zebra_evpn_del(struct zebra_evpn *zevpn);
+int zebra_evpn_send_add_to_client(struct zebra_evpn *zevpn);
+int zebra_evpn_send_del_to_client(struct zebra_evpn *zevpn);
+struct zebra_vtep *zebra_evpn_vtep_find(struct zebra_evpn *zevpn,
+ struct in_addr *vtep_ip);
+struct zebra_vtep *zebra_evpn_vtep_add(struct zebra_evpn *zevpn,
+ struct in_addr *vtep_ip,
+ int flood_control);
+int zebra_evpn_vtep_del(struct zebra_evpn *zevpn, struct zebra_vtep *zvtep);
+int zebra_evpn_vtep_del_all(struct zebra_evpn *zevpn, int uninstall);
+int zebra_evpn_vtep_install(struct zebra_evpn *zevpn, struct zebra_vtep *zvtep);
+int zebra_evpn_vtep_uninstall(struct zebra_evpn *zevpn,
+ struct in_addr *vtep_ip);
void zebra_evpn_handle_flooding_remote_vteps(struct hash_bucket *bucket,
void *zvrf);
void zebra_evpn_cleanup_all(struct hash_bucket *bucket, void *arg);
* Return number of valid MACs in an EVPN's MAC hash table - all
* remote MACs and non-internal (auto) local MACs count.
*/
-uint32_t num_valid_macs(zebra_evpn_t *zevpn)
+uint32_t num_valid_macs(struct zebra_evpn *zevpn)
{
unsigned int i;
uint32_t num_macs = 0;
struct hash *hash;
struct hash_bucket *hb;
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
hash = zevpn->mac_table;
if (!hash)
return num_macs;
for (i = 0; i < hash->size; i++) {
for (hb = hash->index[i]; hb; hb = hb->next) {
- mac = (zebra_mac_t *)hb->data;
+ mac = (struct zebra_mac *)hb->data;
if (CHECK_FLAG(mac->flags, ZEBRA_MAC_REMOTE)
|| CHECK_FLAG(mac->flags, ZEBRA_MAC_LOCAL)
|| !CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO))
return num_macs;
}
-uint32_t num_dup_detected_macs(zebra_evpn_t *zevpn)
+uint32_t num_dup_detected_macs(struct zebra_evpn *zevpn)
{
unsigned int i;
uint32_t num_macs = 0;
struct hash *hash;
struct hash_bucket *hb;
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
hash = zevpn->mac_table;
if (!hash)
return num_macs;
for (i = 0; i < hash->size; i++) {
for (hb = hash->index[i]; hb; hb = hb->next) {
- mac = (zebra_mac_t *)hb->data;
+ mac = (struct zebra_mac *)hb->data;
if (CHECK_FLAG(mac->flags, ZEBRA_MAC_DUPLICATE))
num_macs++;
}
}
/* Unlink local mac from a destination access port */
-static void zebra_evpn_mac_ifp_unlink(zebra_mac_t *zmac)
+static void zebra_evpn_mac_ifp_unlink(struct zebra_mac *zmac)
{
struct zebra_if *zif;
struct interface *ifp = zmac->ifp;
* local mac is associated with a zero ESI i.e. single attach or lacp-bypass
* bridge port member
*/
-static void zebra_evpn_mac_ifp_link(zebra_mac_t *zmac, struct interface *ifp)
+static void zebra_evpn_mac_ifp_link(struct zebra_mac *zmac,
+ struct interface *ifp)
{
struct zebra_if *zif;
}
/* If the mac is a local mac clear links to destination access port */
-void zebra_evpn_mac_clear_fwd_info(zebra_mac_t *zmac)
+void zebra_evpn_mac_clear_fwd_info(struct zebra_mac *zmac)
{
zebra_evpn_mac_ifp_unlink(zmac);
memset(&zmac->fwd_info, 0, sizeof(zmac->fwd_info));
/*
* Install remote MAC into the forwarding plane.
*/
-int zebra_evpn_rem_mac_install(zebra_evpn_t *zevpn, zebra_mac_t *mac,
+int zebra_evpn_rem_mac_install(struct zebra_evpn *zevpn, struct zebra_mac *mac,
bool was_static)
{
const struct zebra_if *zif, *br_zif;
/*
* Uninstall remote MAC from the forwarding plane.
*/
-int zebra_evpn_rem_mac_uninstall(zebra_evpn_t *zevpn, zebra_mac_t *mac,
- bool force)
+int zebra_evpn_rem_mac_uninstall(struct zebra_evpn *zevpn,
+ struct zebra_mac *mac, bool force)
{
const struct zebra_if *zif, *br_zif;
const struct zebra_l2info_vxlan *vxl;
* Decrement neighbor refcount of MAC; uninstall and free it if
* appropriate.
*/
-void zebra_evpn_deref_ip2mac(zebra_evpn_t *zevpn, zebra_mac_t *mac)
+void zebra_evpn_deref_ip2mac(struct zebra_evpn *zevpn, struct zebra_mac *mac)
{
if (!CHECK_FLAG(mac->flags, ZEBRA_MAC_AUTO))
return;
zebra_evpn_mac_del(zevpn, mac);
}
-static void zebra_evpn_mac_get_access_info(zebra_mac_t *mac,
+static void zebra_evpn_mac_get_access_info(struct zebra_mac *mac,
struct interface **ifpP,
vlanid_t *vid)
{
}
#define MAC_BUF_SIZE 256
-static char *zebra_evpn_zebra_mac_flag_dump(struct zebra_mac_t_ *mac, char *buf,
+static char *zebra_evpn_zebra_mac_flag_dump(struct zebra_mac *mac, char *buf,
size_t len)
{
if (mac->flags == 0) {
static int zebra_evpn_dad_mac_auto_recovery_exp(struct thread *t)
{
struct zebra_vrf *zvrf = NULL;
- zebra_mac_t *mac = NULL;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_mac *mac = NULL;
+ struct zebra_evpn *zevpn = NULL;
struct listnode *node = NULL;
- zebra_neigh_t *nbr = NULL;
+ struct zebra_neigh *nbr = NULL;
mac = THREAD_ARG(t);
}
static void zebra_evpn_dup_addr_detect_for_mac(struct zebra_vrf *zvrf,
- zebra_mac_t *mac,
+ struct zebra_mac *mac,
struct in_addr vtep_ip,
bool do_dad, bool *is_dup_detect,
bool is_local)
{
- zebra_neigh_t *nbr;
+ struct zebra_neigh *nbr;
struct listnode *node = NULL;
struct timeval elapsed = {0, 0};
bool reset_params = false;
/*
* Print a specific MAC entry.
*/
-void zebra_evpn_print_mac(zebra_mac_t *mac, void *ctxt, json_object *json)
+void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json)
{
struct vty *vty;
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
struct listnode *node = NULL;
char buf1[ETHER_ADDR_STRLEN];
char buf2[INET6_ADDRSTRLEN];
}
}
-static char *zebra_evpn_print_mac_flags(zebra_mac_t *mac, char *flags_buf,
+static char *zebra_evpn_print_mac_flags(struct zebra_mac *mac, char *flags_buf,
size_t flags_buf_sz)
{
snprintf(flags_buf, flags_buf_sz, "%s%s%s%s",
{
struct vty *vty;
json_object *json_mac_hdr = NULL, *json_mac = NULL;
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
char buf1[ETHER_ADDR_STRLEN];
char addr_buf[PREFIX_STRLEN];
struct mac_walk_ctx *wctx = ctxt;
vty = wctx->vty;
json_mac_hdr = wctx->json;
- mac = (zebra_mac_t *)bucket->data;
+ mac = (struct zebra_mac *)bucket->data;
prefix_mac2str(&mac->macaddr, buf1, sizeof(buf1));
{
struct vty *vty;
json_object *json_mac_hdr = NULL;
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
struct mac_walk_ctx *wctx = ctxt;
char buf1[ETHER_ADDR_STRLEN];
vty = wctx->vty;
json_mac_hdr = wctx->json;
- mac = (zebra_mac_t *)bucket->data;
+ mac = (struct zebra_mac *)bucket->data;
if (!mac)
return;
static unsigned int mac_hash_keymake(const void *p)
{
- const zebra_mac_t *pmac = p;
+ const struct zebra_mac *pmac = p;
const void *pnt = (void *)pmac->macaddr.octet;
return jhash(pnt, ETH_ALEN, 0xa5a5a55a);
*/
static bool mac_cmp(const void *p1, const void *p2)
{
- const zebra_mac_t *pmac1 = p1;
- const zebra_mac_t *pmac2 = p2;
+ const struct zebra_mac *pmac1 = p1;
+ const struct zebra_mac *pmac2 = p2;
if (pmac1 == NULL && pmac2 == NULL)
return true;
*/
static void *zebra_evpn_mac_alloc(void *p)
{
- const zebra_mac_t *tmp_mac = p;
- zebra_mac_t *mac;
+ const struct zebra_mac *tmp_mac = p;
+ struct zebra_mac *mac;
- mac = XCALLOC(MTYPE_MAC, sizeof(zebra_mac_t));
+ mac = XCALLOC(MTYPE_MAC, sizeof(struct zebra_mac));
*mac = *tmp_mac;
return ((void *)mac);
/*
* Add MAC entry.
*/
-zebra_mac_t *zebra_evpn_mac_add(zebra_evpn_t *zevpn,
- const struct ethaddr *macaddr)
+struct zebra_mac *zebra_evpn_mac_add(struct zebra_evpn *zevpn,
+ const struct ethaddr *macaddr)
{
- zebra_mac_t tmp_mac;
- zebra_mac_t *mac = NULL;
+ struct zebra_mac tmp_mac;
+ struct zebra_mac *mac = NULL;
- memset(&tmp_mac, 0, sizeof(zebra_mac_t));
+ memset(&tmp_mac, 0, sizeof(struct zebra_mac));
memcpy(&tmp_mac.macaddr, macaddr, ETH_ALEN);
mac = hash_get(zevpn->mac_table, &tmp_mac, zebra_evpn_mac_alloc);
assert(mac);
/*
* Delete MAC entry.
*/
-int zebra_evpn_mac_del(zebra_evpn_t *zevpn, zebra_mac_t *mac)
+int zebra_evpn_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac)
{
- zebra_mac_t *tmp_mac;
+ struct zebra_mac *tmp_mac;
if (IS_ZEBRA_DEBUG_VXLAN || IS_ZEBRA_DEBUG_EVPN_MH_MAC) {
char mac_buf[MAC_BUF_SIZE];
}
static bool zebra_evpn_check_mac_del_from_db(struct mac_walk_ctx *wctx,
- zebra_mac_t *mac)
+ struct zebra_mac *mac)
{
if ((wctx->flags & DEL_LOCAL_MAC) && (mac->flags & ZEBRA_MAC_LOCAL))
return true;
static void zebra_evpn_mac_del_hash_entry(struct hash_bucket *bucket, void *arg)
{
struct mac_walk_ctx *wctx = arg;
- zebra_mac_t *mac = bucket->data;
+ struct zebra_mac *mac = bucket->data;
if (zebra_evpn_check_mac_del_from_db(wctx, mac)) {
if (wctx->upd_client && (mac->flags & ZEBRA_MAC_LOCAL)) {
/*
* Delete all MAC entries for this EVPN.
*/
-void zebra_evpn_mac_del_all(zebra_evpn_t *zevpn, int uninstall, int upd_client,
- uint32_t flags)
+void zebra_evpn_mac_del_all(struct zebra_evpn *zevpn, int uninstall,
+ int upd_client, uint32_t flags)
{
struct mac_walk_ctx wctx;
/*
* Look up MAC hash entry.
*/
-zebra_mac_t *zebra_evpn_mac_lookup(zebra_evpn_t *zevpn,
- const struct ethaddr *mac)
+struct zebra_mac *zebra_evpn_mac_lookup(struct zebra_evpn *zevpn,
+ const struct ethaddr *mac)
{
- zebra_mac_t tmp;
- zebra_mac_t *pmac;
+ struct zebra_mac tmp;
+ struct zebra_mac *pmac;
memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp.macaddr, mac, ETH_ALEN);
}
/* program sync mac flags in the dataplane */
-int zebra_evpn_sync_mac_dp_install(zebra_mac_t *mac, bool set_inactive,
+int zebra_evpn_sync_mac_dp_install(struct zebra_mac *mac, bool set_inactive,
bool force_clear_static, const char *caller)
{
struct interface *ifp;
bool sticky;
bool set_static;
- zebra_evpn_t *zevpn = mac->zevpn;
+ struct zebra_evpn *zevpn = mac->zevpn;
vlanid_t vid;
struct zebra_if *zif;
struct interface *br_ifp;
return 0;
}
-void zebra_evpn_mac_send_add_del_to_client(zebra_mac_t *mac, bool old_bgp_ready,
+void zebra_evpn_mac_send_add_del_to_client(struct zebra_mac *mac,
+ bool old_bgp_ready,
bool new_bgp_ready)
{
if (new_bgp_ready)
*/
static int zebra_evpn_mac_hold_exp_cb(struct thread *t)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
bool old_bgp_ready;
bool new_bgp_ready;
bool old_static;
return 0;
}
-static inline void zebra_evpn_mac_start_hold_timer(zebra_mac_t *mac)
+static inline void zebra_evpn_mac_start_hold_timer(struct zebra_mac *mac)
{
if (mac->hold_timer)
return;
zmh_info->mac_hold_time, &mac->hold_timer);
}
-void zebra_evpn_mac_stop_hold_timer(zebra_mac_t *mac)
+void zebra_evpn_mac_stop_hold_timer(struct zebra_mac *mac)
{
if (!mac->hold_timer)
return;
THREAD_OFF(mac->hold_timer);
}
-void zebra_evpn_sync_mac_del(zebra_mac_t *mac)
+void zebra_evpn_sync_mac_del(struct zebra_mac *mac)
{
bool old_static;
bool new_static;
__func__);
}
-static inline bool zebra_evpn_mac_is_bgp_seq_ok(zebra_evpn_t *zevpn,
- zebra_mac_t *mac, uint32_t seq,
- uint16_t ipa_len,
+static inline bool zebra_evpn_mac_is_bgp_seq_ok(struct zebra_evpn *zevpn,
+ struct zebra_mac *mac,
+ uint32_t seq, uint16_t ipa_len,
const struct ipaddr *ipaddr,
bool sync)
{
return true;
}
-zebra_mac_t *zebra_evpn_proc_sync_mac_update(
- zebra_evpn_t *zevpn, const struct ethaddr *macaddr, uint16_t ipa_len,
- const struct ipaddr *ipaddr, uint8_t flags, uint32_t seq,
- const esi_t *esi, struct sync_mac_ip_ctx *ctx)
+struct zebra_mac *zebra_evpn_proc_sync_mac_update(
+ struct zebra_evpn *zevpn, const struct ethaddr *macaddr,
+ uint16_t ipa_len, const struct ipaddr *ipaddr, uint8_t flags,
+ uint32_t seq, const esi_t *esi, struct sync_mac_ip_ctx *ctx)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
bool inform_bgp = false;
bool inform_dataplane = false;
bool seq_change = false;
if (IS_ZEBRA_DEBUG_EVPN_MH_MAC && (old_flags != new_flags)) {
char mac_buf[MAC_BUF_SIZE], omac_buf[MAC_BUF_SIZE];
- struct zebra_mac_t_ omac;
+ struct zebra_mac omac;
omac.flags = old_flags;
zlog_debug(
/* update local fowarding info. return true if a dest-ES change
* is detected
*/
-static bool zebra_evpn_local_mac_update_fwd_info(zebra_mac_t *mac,
+static bool zebra_evpn_local_mac_update_fwd_info(struct zebra_mac *mac,
struct interface *ifp,
vlanid_t vid)
{
void *arg)
{
struct mac_walk_ctx *wctx = arg;
- zebra_mac_t *zmac = bucket->data;
+ struct zebra_mac *zmac = bucket->data;
if (CHECK_FLAG(zmac->flags, ZEBRA_MAC_DEF_GW))
return;
}
/* Iterator to Notify Local MACs of a EVPN */
-void zebra_evpn_send_mac_list_to_client(zebra_evpn_t *zevpn)
+void zebra_evpn_send_mac_list_to_client(struct zebra_evpn *zevpn)
{
struct mac_walk_ctx wctx;
&wctx);
}
-void zebra_evpn_rem_mac_del(zebra_evpn_t *zevpn, zebra_mac_t *mac)
+void zebra_evpn_rem_mac_del(struct zebra_evpn *zevpn, struct zebra_mac *mac)
{
zebra_evpn_process_neigh_on_remote_mac_del(zevpn, mac);
/* the remote sequence number in the auto mac entry
/* Print Duplicate MAC */
void zebra_evpn_print_dad_mac_hash(struct hash_bucket *bucket, void *ctxt)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
- mac = (zebra_mac_t *)bucket->data;
+ mac = (struct zebra_mac *)bucket->data;
if (!mac)
return;
void zebra_evpn_print_dad_mac_hash_detail(struct hash_bucket *bucket,
void *ctxt)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
- mac = (zebra_mac_t *)bucket->data;
+ mac = (struct zebra_mac *)bucket->data;
if (!mac)
return;
zebra_evpn_print_mac_hash_detail(bucket, ctxt);
}
-int zebra_evpn_mac_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf,
- const struct ethaddr *macaddr,
- uint16_t ipa_len,
- const struct ipaddr *ipaddr,
- zebra_mac_t **macp, struct in_addr vtep_ip,
- uint8_t flags, uint32_t seq,
- const esi_t *esi)
+int zebra_evpn_mac_remote_macip_add(
+ struct zebra_evpn *zevpn, struct zebra_vrf *zvrf,
+ const struct ethaddr *macaddr, uint16_t ipa_len,
+ const struct ipaddr *ipaddr, struct zebra_mac **macp,
+ struct in_addr vtep_ip, uint8_t flags, uint32_t seq, const esi_t *esi)
{
char buf1[INET6_ADDRSTRLEN];
bool sticky;
bool is_dup_detect = false;
esi_t *old_esi;
bool old_static = false;
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
bool old_es_present;
bool new_es_present;
return 0;
}
-int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn,
+int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf,
+ struct zebra_evpn *zevpn,
struct interface *ifp,
const struct ethaddr *macaddr, vlanid_t vid,
bool sticky, bool local_inactive,
- bool dp_static, zebra_mac_t *mac)
+ bool dp_static, struct zebra_mac *mac)
{
bool mac_sticky = false;
bool inform_client = false;
return 0;
}
-int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, zebra_mac_t *mac,
+int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac,
bool clear_static)
{
bool old_bgp_ready;
return 0;
}
-int zebra_evpn_mac_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn,
- const struct ipaddr *ip, zebra_mac_t **macp,
+int zebra_evpn_mac_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
+ const struct ipaddr *ip,
+ struct zebra_mac **macp,
const struct ethaddr *macaddr, vlanid_t vlan_id,
bool def_gw)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
ns_id_t local_ns_id = NS_DEFAULT;
struct zebra_vrf *zvrf;
return 0;
}
-void zebra_evpn_mac_svi_del(struct interface *ifp, zebra_evpn_t *zevpn)
+void zebra_evpn_mac_svi_del(struct interface *ifp, struct zebra_evpn *zevpn)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
struct ethaddr macaddr;
bool old_bgp_ready;
}
}
-void zebra_evpn_mac_svi_add(struct interface *ifp, zebra_evpn_t *zevpn)
+void zebra_evpn_mac_svi_add(struct interface *ifp, struct zebra_evpn *zevpn)
{
- zebra_mac_t *mac = NULL;
+ struct zebra_mac *mac = NULL;
struct ethaddr macaddr;
struct zebra_if *zif = ifp->info;
bool old_bgp_ready;
extern "C" {
#endif
-typedef struct zebra_mac_t_ zebra_mac_t;
struct host_rb_entry {
RB_ENTRY(host_rb_entry) hl_entry;
* information. The correct VNI will be obtained as zebra maintains
* the mapping (of VLAN to VNI).
*/
-struct zebra_mac_t_ {
+struct zebra_mac {
/* MAC address. */
struct ethaddr macaddr;
(ZEBRA_MAC_ES_PEER_PROXY | ZEBRA_MAC_ES_PEER_ACTIVE)
/* back pointer to zevpn */
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
/* Local or remote info.
* Note: fwd_info is only relevant if mac->es is NULL.
* Context for MAC hash walk - used by callbacks.
*/
struct mac_walk_ctx {
- zebra_evpn_t *zevpn; /* EVPN hash */
+ struct zebra_evpn *zevpn; /* EVPN hash */
struct zebra_vrf *zvrf; /* VRF - for client notification. */
int uninstall; /* uninstall from kernel? */
int upd_client; /* uninstall from client? */
bool mac_created;
bool mac_inactive;
bool mac_dp_update_deferred;
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
};
/**************************** SYNC MAC handling *****************************/
* peer we cannot let it age out i.e. we set the static bit
* in the dataplane
*/
-static inline bool zebra_evpn_mac_is_static(zebra_mac_t *mac)
+static inline bool zebra_evpn_mac_is_static(struct zebra_mac *mac)
{
return ((mac->flags & ZEBRA_MAC_ALL_PEER_FLAGS) || mac->sync_neigh_cnt);
}
|| (flags & ZEBRA_MAC_ES_PEER_ACTIVE));
}
-void zebra_evpn_mac_stop_hold_timer(zebra_mac_t *mac);
+void zebra_evpn_mac_stop_hold_timer(struct zebra_mac *mac);
-static inline void zebra_evpn_mac_clear_sync_info(zebra_mac_t *mac)
+static inline void zebra_evpn_mac_clear_sync_info(struct zebra_mac *mac)
{
UNSET_FLAG(mac->flags, ZEBRA_MAC_ALL_PEER_FLAGS);
zebra_evpn_mac_stop_hold_timer(mac);
}
-static inline bool zebra_evpn_mac_in_use(zebra_mac_t *mac)
+static inline bool zebra_evpn_mac_in_use(struct zebra_mac *mac)
{
return !list_isempty(mac->neigh_list)
|| CHECK_FLAG(mac->flags, ZEBRA_MAC_SVI);
}
struct hash *zebra_mac_db_create(const char *desc);
-uint32_t num_valid_macs(zebra_evpn_t *zevi);
-uint32_t num_dup_detected_macs(zebra_evpn_t *zevi);
-int zebra_evpn_rem_mac_uninstall(zebra_evpn_t *zevi, zebra_mac_t *mac,
+uint32_t num_valid_macs(struct zebra_evpn *zevi);
+uint32_t num_dup_detected_macs(struct zebra_evpn *zevi);
+int zebra_evpn_rem_mac_uninstall(struct zebra_evpn *zevi, struct zebra_mac *mac,
bool force);
-int zebra_evpn_rem_mac_install(zebra_evpn_t *zevi, zebra_mac_t *mac,
+int zebra_evpn_rem_mac_install(struct zebra_evpn *zevi, struct zebra_mac *mac,
bool was_static);
-void zebra_evpn_deref_ip2mac(zebra_evpn_t *zevi, zebra_mac_t *mac);
-zebra_mac_t *zebra_evpn_mac_lookup(zebra_evpn_t *zevi,
- const struct ethaddr *mac);
-zebra_mac_t *zebra_evpn_mac_add(zebra_evpn_t *zevi,
- const struct ethaddr *macaddr);
-int zebra_evpn_mac_del(zebra_evpn_t *zevi, zebra_mac_t *mac);
+void zebra_evpn_deref_ip2mac(struct zebra_evpn *zevi, struct zebra_mac *mac);
+struct zebra_mac *zebra_evpn_mac_lookup(struct zebra_evpn *zevi,
+ const struct ethaddr *mac);
+struct zebra_mac *zebra_evpn_mac_add(struct zebra_evpn *zevi,
+ const struct ethaddr *macaddr);
+int zebra_evpn_mac_del(struct zebra_evpn *zevi, struct zebra_mac *mac);
int zebra_evpn_macip_send_msg_to_client(uint32_t id,
const struct ethaddr *macaddr,
const struct ipaddr *ip, uint8_t flags,
uint32_t seq, int state,
struct zebra_evpn_es *es, uint16_t cmd);
-void zebra_evpn_print_mac(zebra_mac_t *mac, void *ctxt, json_object *json);
+void zebra_evpn_print_mac(struct zebra_mac *mac, void *ctxt, json_object *json);
void zebra_evpn_print_mac_hash(struct hash_bucket *bucket, void *ctxt);
void zebra_evpn_print_mac_hash_detail(struct hash_bucket *bucket, void *ctxt);
-int zebra_evpn_sync_mac_dp_install(zebra_mac_t *mac, bool set_inactive,
+int zebra_evpn_sync_mac_dp_install(struct zebra_mac *mac, bool set_inactive,
bool force_clear_static, const char *caller);
-void zebra_evpn_mac_send_add_del_to_client(zebra_mac_t *mac, bool old_bgp_ready,
+void zebra_evpn_mac_send_add_del_to_client(struct zebra_mac *mac,
+ bool old_bgp_ready,
bool new_bgp_ready);
-void zebra_evpn_mac_del_all(zebra_evpn_t *zevi, int uninstall, int upd_client,
- uint32_t flags);
+void zebra_evpn_mac_del_all(struct zebra_evpn *zevi, int uninstall,
+ int upd_client, uint32_t flags);
int zebra_evpn_mac_send_add_to_client(vni_t vni, const struct ethaddr *macaddr,
uint32_t mac_flags, uint32_t seq,
struct zebra_evpn_es *es);
int zebra_evpn_mac_send_del_to_client(vni_t vni, const struct ethaddr *macaddr,
uint32_t flags, bool force);
-void zebra_evpn_send_mac_list_to_client(zebra_evpn_t *zevi);
-zebra_mac_t *zebra_evpn_proc_sync_mac_update(
- zebra_evpn_t *zevi, const struct ethaddr *macaddr, uint16_t ipa_len,
- const struct ipaddr *ipaddr, uint8_t flags, uint32_t seq,
- const esi_t *esi, struct sync_mac_ip_ctx *ctx);
-void zebra_evpn_sync_mac_del(zebra_mac_t *mac);
-void zebra_evpn_rem_mac_del(zebra_evpn_t *zevi, zebra_mac_t *mac);
+void zebra_evpn_send_mac_list_to_client(struct zebra_evpn *zevi);
+struct zebra_mac *zebra_evpn_proc_sync_mac_update(
+ struct zebra_evpn *zevi, const struct ethaddr *macaddr,
+ uint16_t ipa_len, const struct ipaddr *ipaddr, uint8_t flags,
+ uint32_t seq, const esi_t *esi, struct sync_mac_ip_ctx *ctx);
+void zebra_evpn_sync_mac_del(struct zebra_mac *mac);
+void zebra_evpn_rem_mac_del(struct zebra_evpn *zevi, struct zebra_mac *mac);
void zebra_evpn_print_dad_mac_hash(struct hash_bucket *bucket, void *ctxt);
void zebra_evpn_print_dad_mac_hash_detail(struct hash_bucket *bucket,
void *ctxt);
-int zebra_evpn_mac_remote_macip_add(zebra_evpn_t *zevpn, struct zebra_vrf *zvrf,
- const struct ethaddr *macaddr,
- uint16_t ipa_len,
- const struct ipaddr *ipaddr,
- zebra_mac_t **macp, struct in_addr vtep_ip,
- uint8_t flags, uint32_t seq,
- const esi_t *esi);
-
-int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf, zebra_evpn_t *zevpn,
+int zebra_evpn_mac_remote_macip_add(
+ struct zebra_evpn *zevpn, struct zebra_vrf *zvrf,
+ const struct ethaddr *macaddr, uint16_t ipa_len,
+ const struct ipaddr *ipaddr, struct zebra_mac **macp,
+ struct in_addr vtep_ip, uint8_t flags, uint32_t seq, const esi_t *esi);
+
+int zebra_evpn_add_update_local_mac(struct zebra_vrf *zvrf,
+ struct zebra_evpn *zevpn,
struct interface *ifp,
const struct ethaddr *macaddr, vlanid_t vid,
bool sticky, bool local_inactive,
- bool dp_static, zebra_mac_t *mac);
-int zebra_evpn_del_local_mac(zebra_evpn_t *zevpn, zebra_mac_t *mac,
+ bool dp_static, struct zebra_mac *mac);
+int zebra_evpn_del_local_mac(struct zebra_evpn *zevpn, struct zebra_mac *mac,
bool clear_static);
-int zebra_evpn_mac_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn,
- const struct ipaddr *ip, zebra_mac_t **macp,
+int zebra_evpn_mac_gw_macip_add(struct interface *ifp, struct zebra_evpn *zevpn,
+ const struct ipaddr *ip,
+ struct zebra_mac **macp,
const struct ethaddr *macaddr, vlanid_t vlan_id,
bool def_gw);
-void zebra_evpn_mac_svi_add(struct interface *ifp, zebra_evpn_t *zevpn);
-void zebra_evpn_mac_svi_del(struct interface *ifp, zebra_evpn_t *zevpn);
+void zebra_evpn_mac_svi_add(struct interface *ifp, struct zebra_evpn *zevpn);
+void zebra_evpn_mac_svi_del(struct interface *ifp, struct zebra_evpn *zevpn);
void zebra_evpn_mac_ifp_del(struct interface *ifp);
-void zebra_evpn_mac_clear_fwd_info(zebra_mac_t *zmac);
+void zebra_evpn_mac_clear_fwd_info(struct zebra_mac *zmac);
#ifdef __cplusplus
}
static void zebra_evpn_es_get_one_base_evpn(void);
static int zebra_evpn_es_evi_send_to_client(struct zebra_evpn_es *es,
- zebra_evpn_t *zevpn, bool add);
+ struct zebra_evpn *zevpn, bool add);
static void zebra_evpn_local_es_del(struct zebra_evpn_es **esp);
static int zebra_evpn_local_es_update(struct zebra_if *zif, esi_t *esi);
static bool zebra_evpn_es_br_port_dplane_update(struct zebra_evpn_es *es,
/*****************************************************************************/
/* Ethernet Segment to EVI association -
* 1. The ES-EVI entry is maintained as a RB tree per L2-VNI
- * (zebra_evpn_t.es_evi_rb_tree).
+ * (struct zebra_evpn.es_evi_rb_tree).
* 2. Each local ES-EVI entry is sent to BGP which advertises it as an
* EAD-EVI (Type-1 EVPN) route
* 3. Local ES-EVI setup is re-evaluated on the following triggers -
* tables.
*/
static struct zebra_evpn_es_evi *zebra_evpn_es_evi_new(struct zebra_evpn_es *es,
- zebra_evpn_t *zevpn)
+ struct zebra_evpn *zevpn)
{
struct zebra_evpn_es_evi *es_evi;
static void zebra_evpn_es_evi_free(struct zebra_evpn_es_evi *es_evi)
{
struct zebra_evpn_es *es = es_evi->es;
- zebra_evpn_t *zevpn = es_evi->zevpn;
+ struct zebra_evpn *zevpn = es_evi->zevpn;
if (IS_ZEBRA_DEBUG_EVPN_MH_ES)
zlog_debug("es %s evi %d free",
}
/* find the ES-EVI in the per-L2-VNI RB tree */
-static struct zebra_evpn_es_evi *zebra_evpn_es_evi_find(
- struct zebra_evpn_es *es, zebra_evpn_t *zevpn)
+static struct zebra_evpn_es_evi *
+zebra_evpn_es_evi_find(struct zebra_evpn_es *es, struct zebra_evpn *zevpn)
{
struct zebra_evpn_es_evi es_evi;
zebra_evpn_es_evi_free(es_evi);
}
static void zebra_evpn_local_es_evi_del(struct zebra_evpn_es *es,
- zebra_evpn_t *zevpn)
+ struct zebra_evpn *zevpn)
{
struct zebra_evpn_es_evi *es_evi;
/* Create an ES-EVI if it doesn't already exist and tell BGP */
static void zebra_evpn_local_es_evi_add(struct zebra_evpn_es *es,
- zebra_evpn_t *zevpn)
+ struct zebra_evpn *zevpn)
{
struct zebra_evpn_es_evi *es_evi;
}
}
-static void zebra_evpn_es_evi_show_one_evpn(zebra_evpn_t *zevpn,
+static void zebra_evpn_es_evi_show_one_evpn(struct zebra_evpn *zevpn,
struct vty *vty,
json_object *json_array, int detail)
{
static void zebra_evpn_es_evi_show_one_evpn_hash_cb(struct hash_bucket *bucket,
void *ctxt)
{
- zebra_evpn_t *zevpn = (zebra_evpn_t *)bucket->data;
+ struct zebra_evpn *zevpn = (struct zebra_evpn *)bucket->data;
struct evpn_mh_show_ctx *wctx = (struct evpn_mh_show_ctx *)ctxt;
zebra_evpn_es_evi_show_one_evpn(zevpn, wctx->vty,
void zebra_evpn_es_evi_show_vni(struct vty *vty, bool uj, vni_t vni, int detail)
{
json_object *json_array = NULL;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
zevpn = zebra_evpn_lookup(vni);
if (uj)
}
/* Initialize the ES tables maintained per-L2_VNI */
-void zebra_evpn_es_evi_init(zebra_evpn_t *zevpn)
+void zebra_evpn_es_evi_init(struct zebra_evpn *zevpn)
{
/* Initialize the ES-EVI RB tree */
RB_INIT(zebra_es_evi_rb_head, &zevpn->es_evi_rb_tree);
}
/* Cleanup the ES info maintained per- EVPN */
-void zebra_evpn_es_evi_cleanup(zebra_evpn_t *zevpn)
+void zebra_evpn_es_evi_cleanup(struct zebra_evpn *zevpn)
{
struct zebra_evpn_es_evi *es_evi;
struct zebra_evpn_es_evi *es_evi_next;
/* called when the oper state or bridge membership changes for the
* vxlan device
*/
-void zebra_evpn_update_all_es(zebra_evpn_t *zevpn)
+void zebra_evpn_update_all_es(struct zebra_evpn *zevpn)
{
struct zebra_evpn_es_evi *es_evi;
struct listnode *node;
/* called when a EVPN-L2VNI is set or cleared against a BD */
static void zebra_evpn_acc_bd_evpn_set(struct zebra_evpn_access_bd *acc_bd,
- zebra_evpn_t *zevpn, zebra_evpn_t *old_zevpn)
+ struct zebra_evpn *zevpn,
+ struct zebra_evpn *old_zevpn)
{
struct zebra_if *zif;
struct listnode *node;
{
struct zebra_evpn_access_bd *acc_bd;
struct zebra_if *old_vxlan_zif;
- zebra_evpn_t *old_zevpn;
+ struct zebra_evpn *old_zevpn;
if (!vid)
return;
}
/* handle EVPN add/del */
-void zebra_evpn_vxl_evpn_set(struct zebra_if *zif, zebra_evpn_t *zevpn,
- bool set)
+void zebra_evpn_vxl_evpn_set(struct zebra_if *zif, struct zebra_evpn *zevpn,
+ bool set)
{
struct zebra_l2info_vxlan *vxl;
struct zebra_evpn_access_bd *acc_bd;
}
} else {
if (acc_bd->zevpn) {
- zebra_evpn_t *old_zevpn = acc_bd->zevpn;
+ struct zebra_evpn *old_zevpn = acc_bd->zevpn;
acc_bd->zevpn = NULL;
zebra_evpn_acc_bd_evpn_set(acc_bd, NULL, old_zevpn);
}
/* update remote macs associated with the ES */
static void zebra_evpn_nhg_mac_update(struct zebra_evpn_es *es)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
struct listnode *node;
bool local_via_nw;
}
}
-static void zebra_evpn_flush_local_mac(zebra_mac_t *mac, struct interface *ifp)
+static void zebra_evpn_flush_local_mac(struct zebra_mac *mac,
+ struct interface *ifp)
{
struct zebra_if *zif;
struct interface *br_ifp;
static void zebra_evpn_es_flush_local_macs(struct zebra_evpn_es *es,
struct interface *ifp, bool add)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
struct listnode *node;
struct listnode *nnode;
return;
}
-void zebra_evpn_es_mac_deref_entry(zebra_mac_t *mac)
+void zebra_evpn_es_mac_deref_entry(struct zebra_mac *mac)
{
struct zebra_evpn_es *es = mac->es;
/* Associate a MAC entry with a local or remote ES. Returns false if there
* was no ES change.
*/
-bool zebra_evpn_es_mac_ref_entry(zebra_mac_t *mac, struct zebra_evpn_es *es)
+bool zebra_evpn_es_mac_ref_entry(struct zebra_mac *mac,
+ struct zebra_evpn_es *es)
{
if (mac->es == es)
return false;
return true;
}
-bool zebra_evpn_es_mac_ref(zebra_mac_t *mac, const esi_t *esi)
+bool zebra_evpn_es_mac_ref(struct zebra_mac *mac, const esi_t *esi)
{
struct zebra_evpn_es *es;
/* Inform BGP about local ES-EVI add or del */
static int zebra_evpn_es_evi_send_to_client(struct zebra_evpn_es *es,
- zebra_evpn_t *zevpn, bool add)
+ struct zebra_evpn *zevpn, bool add)
{
struct zserv *client;
struct stream *s;
static void zebra_evpn_es_bypass_update_macs(struct zebra_evpn_es *es,
struct interface *ifp, bool bypass)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
struct listnode *node;
struct listnode *nnode;
struct zebra_if *zif;
static void zebra_evpn_local_mac_oper_state_change(struct zebra_evpn_es *es)
{
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
struct listnode *node;
/* If fast-failover is supported by the dataplane via the use
* necessary
*/
/* called when a new vni is added or becomes oper up or becomes a bridge port */
-void zebra_evpn_es_set_base_evpn(zebra_evpn_t *zevpn)
+void zebra_evpn_es_set_base_evpn(struct zebra_evpn *zevpn)
{
struct listnode *node;
struct zebra_evpn_es *es;
/* called when a vni is removed or becomes oper down or is removed from a
* bridge
*/
-void zebra_evpn_es_clear_base_evpn(zebra_evpn_t *zevpn)
+void zebra_evpn_es_clear_base_evpn(struct zebra_evpn *zevpn)
{
struct listnode *node;
struct zebra_evpn_es *es;
/* Locate an "eligible" L2-VNI to follow */
static int zebra_evpn_es_get_one_base_evpn_cb(struct hash_bucket *b, void *data)
{
- zebra_evpn_t *zevpn = b->data;
+ struct zebra_evpn *zevpn = b->data;
zebra_evpn_es_set_base_evpn(zevpn);
*/
struct zebra_evpn_es_evi {
struct zebra_evpn_es *es;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
/* ES-EVI flags */
uint32_t flags;
uint8_t df_alg;
uint32_t df_pref;
- /* XXX - maintain a backpointer to zebra_vtep_t */
+ /* XXX - maintain a backpointer to struct zebra_vtep */
};
/* Local/access-side broadcast domain - zebra_evpn_access_bd is added to -
/* list of members associated with the BD i.e. (potential) ESs */
struct list *mbr_zifs;
/* presence of zevpn activates the EVI on all the ESs in mbr_zifs */
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
/* SVI associated with the VLAN */
struct zebra_if *vlan_zif;
};
* XXX: once single vxlan device model becomes available this will
* not be necessary
*/
- zebra_evpn_t *es_base_evpn;
+ struct zebra_evpn *es_base_evpn;
struct in_addr es_originator_ip;
/* L2 NH and NHG ids -
};
/* returns TRUE if the EVPN is ready to be sent to BGP */
-static inline bool zebra_evpn_send_to_client_ok(zebra_evpn_t *zevpn)
+static inline bool zebra_evpn_send_to_client_ok(struct zebra_evpn *zevpn)
{
return !!(zevpn->flags & ZEVPN_READY_FOR_BGP);
}
-static inline bool zebra_evpn_mac_is_es_local(zebra_mac_t *mac)
+static inline bool zebra_evpn_mac_is_es_local(struct zebra_mac *mac)
{
return mac->es && (mac->es->flags & ZEBRA_EVPNES_LOCAL);
}
extern bool zebra_evpn_is_if_es_capable(struct zebra_if *zif);
extern void zebra_evpn_if_init(struct zebra_if *zif);
extern void zebra_evpn_if_cleanup(struct zebra_if *zif);
-extern void zebra_evpn_es_evi_init(zebra_evpn_t *zevpn);
-extern void zebra_evpn_es_evi_cleanup(zebra_evpn_t *zevpn);
-extern void zebra_evpn_vxl_evpn_set(struct zebra_if *zif, zebra_evpn_t *zevpn,
- bool set);
-extern void zebra_evpn_es_set_base_evpn(zebra_evpn_t *zevpn);
-extern void zebra_evpn_es_clear_base_evpn(zebra_evpn_t *zevpn);
+extern void zebra_evpn_es_evi_init(struct zebra_evpn *zevpn);
+extern void zebra_evpn_es_evi_cleanup(struct zebra_evpn *zevpn);
+extern void zebra_evpn_vxl_evpn_set(struct zebra_if *zif,
+ struct zebra_evpn *zevpn, bool set);
+extern void zebra_evpn_es_set_base_evpn(struct zebra_evpn *zevpn);
+extern void zebra_evpn_es_clear_base_evpn(struct zebra_evpn *zevpn);
extern void zebra_evpn_vl_vxl_ref(uint16_t vid, struct zebra_if *vxlan_zif);
extern void zebra_evpn_vl_vxl_deref(uint16_t vid, struct zebra_if *vxlan_zif);
extern void zebra_evpn_vl_mbr_ref(uint16_t vid, struct zebra_if *zif);
extern void zebra_evpn_es_show(struct vty *vty, bool uj);
extern void zebra_evpn_es_show_detail(struct vty *vty, bool uj);
extern void zebra_evpn_es_show_esi(struct vty *vty, bool uj, esi_t *esi);
-extern void zebra_evpn_update_all_es(zebra_evpn_t *zevpn);
+extern void zebra_evpn_update_all_es(struct zebra_evpn *zevpn);
extern void zebra_evpn_proc_remote_es(ZAPI_HANDLER_ARGS);
int zebra_evpn_remote_es_add(const esi_t *esi, struct in_addr vtep_ip,
bool esr_rxed, uint8_t df_alg, uint16_t df_pref);
extern void zebra_evpn_es_evi_show(struct vty *vty, bool uj, int detail);
extern void zebra_evpn_es_evi_show_vni(struct vty *vty, bool uj,
vni_t vni, int detail);
-extern void zebra_evpn_es_mac_deref_entry(zebra_mac_t *mac);
-extern bool zebra_evpn_es_mac_ref_entry(zebra_mac_t *mac,
+extern void zebra_evpn_es_mac_deref_entry(struct zebra_mac *mac);
+extern bool zebra_evpn_es_mac_ref_entry(struct zebra_mac *mac,
struct zebra_evpn_es *es);
-extern bool zebra_evpn_es_mac_ref(zebra_mac_t *mac, const esi_t *esi);
+extern bool zebra_evpn_es_mac_ref(struct zebra_mac *mac, const esi_t *esi);
extern struct zebra_evpn_es *zebra_evpn_es_find(const esi_t *esi);
extern void zebra_evpn_interface_init(void);
extern int zebra_evpn_mh_if_write(struct vty *vty, struct interface *ifp);
*/
static unsigned int neigh_hash_keymake(const void *p)
{
- const zebra_neigh_t *n = p;
+ const struct zebra_neigh *n = p;
const struct ipaddr *ip = &n->ip;
if (IS_IPADDR_V4(ip))
*/
static bool neigh_cmp(const void *p1, const void *p2)
{
- const zebra_neigh_t *n1 = p1;
- const zebra_neigh_t *n2 = p2;
+ const struct zebra_neigh *n1 = p1;
+ const struct zebra_neigh *n2 = p2;
if (n1 == NULL && n2 == NULL)
return true;
int neigh_list_cmp(void *p1, void *p2)
{
- const zebra_neigh_t *n1 = p1;
- const zebra_neigh_t *n2 = p2;
+ const struct zebra_neigh *n1 = p1;
+ const struct zebra_neigh *n2 = p2;
return memcmp(&n1->ip, &n2->ip, sizeof(struct ipaddr));
}
return hash_create_size(8, neigh_hash_keymake, neigh_cmp, desc);
}
-uint32_t num_dup_detected_neighs(zebra_evpn_t *zevpn)
+uint32_t num_dup_detected_neighs(struct zebra_evpn *zevpn)
{
unsigned int i;
uint32_t num_neighs = 0;
struct hash *hash;
struct hash_bucket *hb;
- zebra_neigh_t *nbr;
+ struct zebra_neigh *nbr;
hash = zevpn->neigh_table;
if (!hash)
return num_neighs;
for (i = 0; i < hash->size; i++) {
for (hb = hash->index[i]; hb; hb = hb->next) {
- nbr = (zebra_neigh_t *)hb->data;
+ nbr = (struct zebra_neigh *)hb->data;
if (CHECK_FLAG(nbr->flags, ZEBRA_NEIGH_DUPLICATE))
num_neighs++;
}
*/
void zebra_evpn_find_neigh_addr_width(struct hash_bucket *bucket, void *ctxt)
{
- zebra_neigh_t *n;
+ struct zebra_neigh *n;
char buf[INET6_ADDRSTRLEN];
struct neigh_walk_ctx *wctx = ctxt;
int width;
- n = (zebra_neigh_t *)bucket->data;
+ n = (struct zebra_neigh *)bucket->data;
ipaddr2str(&n->ip, buf, sizeof(buf));
width = strlen(buf);
/*
* Count of remote neighbors referencing this MAC.
*/
-int remote_neigh_count(zebra_mac_t *zmac)
+int remote_neigh_count(struct zebra_mac *zmac)
{
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
struct listnode *node = NULL;
int count = 0;
/*
* Install remote neighbor into the kernel.
*/
-int zebra_evpn_rem_neigh_install(zebra_evpn_t *zevpn, zebra_neigh_t *n,
- bool was_static)
+int zebra_evpn_rem_neigh_install(struct zebra_evpn *zevpn,
+ struct zebra_neigh *n, bool was_static)
{
struct interface *vlan_if;
int flags;
*/
void zebra_evpn_install_neigh_hash(struct hash_bucket *bucket, void *ctxt)
{
- zebra_neigh_t *n;
+ struct zebra_neigh *n;
struct neigh_walk_ctx *wctx = ctxt;
- n = (zebra_neigh_t *)bucket->data;
+ n = (struct zebra_neigh *)bucket->data;
if (CHECK_FLAG(n->flags, ZEBRA_NEIGH_REMOTE))
zebra_evpn_rem_neigh_install(wctx->zevpn, n,
*/
static void *zebra_evpn_neigh_alloc(void *p)
{
- const zebra_neigh_t *tmp_n = p;
- zebra_neigh_t *n;
+ const struct zebra_neigh *tmp_n = p;
+ struct zebra_neigh *n;
- n = XCALLOC(MTYPE_NEIGH, sizeof(zebra_neigh_t));
+ n = XCALLOC(MTYPE_NEIGH, sizeof(struct zebra_neigh));
*n = *tmp_n;
return ((void *)n);
}
-static void zebra_evpn_local_neigh_ref_mac(zebra_neigh_t *n,
+static void zebra_evpn_local_neigh_ref_mac(struct zebra_neigh *n,
const struct ethaddr *macaddr,
- zebra_mac_t *mac,
+ struct zebra_mac *mac,
bool send_mac_update)
{
bool old_static;
}
/* sync-path that is active on an ES peer */
-static void zebra_evpn_sync_neigh_dp_install(zebra_neigh_t *n,
+static void zebra_evpn_sync_neigh_dp_install(struct zebra_neigh *n,
bool set_inactive,
bool force_clear_static,
const char *caller)
*/
int zebra_evpn_neigh_send_add_to_client(vni_t vni, const struct ipaddr *ip,
const struct ethaddr *macaddr,
- zebra_mac_t *zmac, uint32_t neigh_flags,
- uint32_t seq)
+ struct zebra_mac *zmac,
+ uint32_t neigh_flags, uint32_t seq)
{
uint8_t flags = 0;
vni, macaddr, ip, flags, 0, state, NULL, ZEBRA_MACIP_DEL);
}
-static void zebra_evpn_neigh_send_add_del_to_client(zebra_neigh_t *n,
+static void zebra_evpn_neigh_send_add_del_to_client(struct zebra_neigh *n,
bool old_bgp_ready,
bool new_bgp_ready)
{
* to update the sync-neigh references against the MAC
* and inform the dataplane about the static flag changes.
*/
-void zebra_evpn_sync_neigh_static_chg(zebra_neigh_t *n, bool old_n_static,
+void zebra_evpn_sync_neigh_static_chg(struct zebra_neigh *n, bool old_n_static,
bool new_n_static, bool defer_n_dp,
bool defer_mac_dp, const char *caller)
{
- zebra_mac_t *mac = n->mac;
+ struct zebra_mac *mac = n->mac;
bool old_mac_static;
bool new_mac_static;
*/
static int zebra_evpn_neigh_hold_exp_cb(struct thread *t)
{
- zebra_neigh_t *n;
+ struct zebra_neigh *n;
bool old_bgp_ready;
bool new_bgp_ready;
bool old_n_static;
return 0;
}
-static inline void zebra_evpn_neigh_start_hold_timer(zebra_neigh_t *n)
+static inline void zebra_evpn_neigh_start_hold_timer(struct zebra_neigh *n)
{
if (n->hold_timer)
return;
zmh_info->neigh_hold_time, &n->hold_timer);
}
-static void zebra_evpn_local_neigh_deref_mac(zebra_neigh_t *n,
+static void zebra_evpn_local_neigh_deref_mac(struct zebra_neigh *n,
bool send_mac_update)
{
- zebra_mac_t *mac = n->mac;
- zebra_evpn_t *zevpn = n->zevpn;
+ struct zebra_mac *mac = n->mac;
+ struct zebra_evpn *zevpn = n->zevpn;
bool old_static;
bool new_static;
zebra_evpn_deref_ip2mac(zevpn, mac);
}
-bool zebra_evpn_neigh_is_bgp_seq_ok(zebra_evpn_t *zevpn, zebra_neigh_t *n,
+bool zebra_evpn_neigh_is_bgp_seq_ok(struct zebra_evpn *zevpn,
+ struct zebra_neigh *n,
const struct ethaddr *macaddr, uint32_t seq,
bool sync)
{
/*
* Add neighbor entry.
*/
-static zebra_neigh_t *zebra_evpn_neigh_add(zebra_evpn_t *zevpn,
- const struct ipaddr *ip,
- const struct ethaddr *mac,
- zebra_mac_t *zmac, uint32_t n_flags)
+static struct zebra_neigh *zebra_evpn_neigh_add(struct zebra_evpn *zevpn,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac,
+ struct zebra_mac *zmac,
+ uint32_t n_flags)
{
- zebra_neigh_t tmp_n;
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh tmp_n;
+ struct zebra_neigh *n = NULL;
- memset(&tmp_n, 0, sizeof(zebra_neigh_t));
+ memset(&tmp_n, 0, sizeof(struct zebra_neigh));
memcpy(&tmp_n.ip, ip, sizeof(struct ipaddr));
n = hash_get(zevpn->neigh_table, &tmp_n, zebra_evpn_neigh_alloc);
assert(n);
/*
* Delete neighbor entry.
*/
-int zebra_evpn_neigh_del(zebra_evpn_t *zevpn, zebra_neigh_t *n)
+int zebra_evpn_neigh_del(struct zebra_evpn *zevpn, struct zebra_neigh *n)
{
- zebra_neigh_t *tmp_n;
+ struct zebra_neigh *tmp_n;
if (n->mac)
listnode_delete(n->mac->neigh_list, n);
return 0;
}
-void zebra_evpn_sync_neigh_del(zebra_neigh_t *n)
+void zebra_evpn_sync_neigh_del(struct zebra_neigh *n)
{
bool old_n_static;
bool new_n_static;
false /*defer_mac_dp*/, __func__);
}
-zebra_neigh_t *
-zebra_evpn_proc_sync_neigh_update(zebra_evpn_t *zevpn, zebra_neigh_t *n,
- uint16_t ipa_len, const struct ipaddr *ipaddr,
- uint8_t flags, uint32_t seq, const esi_t *esi,
- struct sync_mac_ip_ctx *ctx)
+struct zebra_neigh *zebra_evpn_proc_sync_neigh_update(
+ struct zebra_evpn *zevpn, struct zebra_neigh *n, uint16_t ipa_len,
+ const struct ipaddr *ipaddr, uint8_t flags, uint32_t seq,
+ const esi_t *esi, struct sync_mac_ip_ctx *ctx)
{
struct interface *ifp = NULL;
bool is_router;
- zebra_mac_t *mac = ctx->mac;
+ struct zebra_mac *mac = ctx->mac;
uint32_t tmp_seq;
bool old_router = false;
bool old_bgp_ready = false;
/*
* Uninstall remote neighbor from the kernel.
*/
-static int zebra_evpn_neigh_uninstall(zebra_evpn_t *zevpn, zebra_neigh_t *n)
+static int zebra_evpn_neigh_uninstall(struct zebra_evpn *zevpn,
+ struct zebra_neigh *n)
{
struct interface *vlan_if;
void *arg)
{
struct neigh_walk_ctx *wctx = arg;
- zebra_neigh_t *n = bucket->data;
+ struct zebra_neigh *n = bucket->data;
if (((wctx->flags & DEL_LOCAL_NEIGH) && (n->flags & ZEBRA_NEIGH_LOCAL))
|| ((wctx->flags & DEL_REMOTE_NEIGH)
/*
* Delete all neighbor entries for this EVPN.
*/
-void zebra_evpn_neigh_del_all(zebra_evpn_t *zevpn, int uninstall,
+void zebra_evpn_neigh_del_all(struct zebra_evpn *zevpn, int uninstall,
int upd_client, uint32_t flags)
{
struct neigh_walk_ctx wctx;
/*
* Look up neighbor hash entry.
*/
-zebra_neigh_t *zebra_evpn_neigh_lookup(zebra_evpn_t *zevpn,
- const struct ipaddr *ip)
+struct zebra_neigh *zebra_evpn_neigh_lookup(struct zebra_evpn *zevpn,
+ const struct ipaddr *ip)
{
- zebra_neigh_t tmp;
- zebra_neigh_t *n;
+ struct zebra_neigh tmp;
+ struct zebra_neigh *n;
memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp.ip, ip, sizeof(struct ipaddr));
* Process all neighbors associated with a MAC upon the MAC being learnt
* locally or undergoing any other change (such as sequence number).
*/
-void zebra_evpn_process_neigh_on_local_mac_change(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac,
+void zebra_evpn_process_neigh_on_local_mac_change(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac,
bool seq_change,
bool es_change)
{
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
struct listnode *node = NULL;
struct zebra_vrf *zvrf = NULL;
* Process all neighbors associated with a local MAC upon the MAC being
* deleted.
*/
-void zebra_evpn_process_neigh_on_local_mac_del(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac)
+void zebra_evpn_process_neigh_on_local_mac_del(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac)
{
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
struct listnode *node = NULL;
if (IS_ZEBRA_DEBUG_VXLAN)
* Process all neighbors associated with a MAC upon the MAC being remotely
* learnt.
*/
-void zebra_evpn_process_neigh_on_remote_mac_add(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac)
+void zebra_evpn_process_neigh_on_remote_mac_add(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac)
{
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
struct listnode *node = NULL;
if (IS_ZEBRA_DEBUG_VXLAN)
* Process all neighbors associated with a remote MAC upon the MAC being
* deleted.
*/
-void zebra_evpn_process_neigh_on_remote_mac_del(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac)
+void zebra_evpn_process_neigh_on_remote_mac_del(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac)
{
/* NOTE: Currently a NO-OP. */
}
static inline void zebra_evpn_local_neigh_update_log(
- const char *pfx, zebra_neigh_t *n, bool is_router, bool local_inactive,
- bool old_bgp_ready, bool new_bgp_ready, bool inform_dataplane,
- bool inform_bgp, const char *sfx)
+ const char *pfx, struct zebra_neigh *n, bool is_router,
+ bool local_inactive, bool old_bgp_ready, bool new_bgp_ready,
+ bool inform_dataplane, bool inform_bgp, const char *sfx)
{
if (!IS_ZEBRA_DEBUG_EVPN_MH_NEIGH)
return;
* from MAC.
*/
static int zebra_evpn_ip_inherit_dad_from_mac(struct zebra_vrf *zvrf,
- zebra_mac_t *old_zmac,
- zebra_mac_t *new_zmac,
- zebra_neigh_t *nbr)
+ struct zebra_mac *old_zmac,
+ struct zebra_mac *new_zmac,
+ struct zebra_neigh *nbr)
{
bool is_old_mac_dup = false;
bool is_new_mac_dup = false;
static int zebra_evpn_dad_ip_auto_recovery_exp(struct thread *t)
{
struct zebra_vrf *zvrf = NULL;
- zebra_neigh_t *nbr = NULL;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_neigh *nbr = NULL;
+ struct zebra_evpn *zevpn = NULL;
nbr = THREAD_ARG(t);
return 0;
}
-static void
-zebra_evpn_dup_addr_detect_for_neigh(struct zebra_vrf *zvrf, zebra_neigh_t *nbr,
- struct in_addr vtep_ip, bool do_dad,
- bool *is_dup_detect, bool is_local)
+static void zebra_evpn_dup_addr_detect_for_neigh(
+ struct zebra_vrf *zvrf, struct zebra_neigh *nbr, struct in_addr vtep_ip,
+ bool do_dad, bool *is_dup_detect, bool is_local)
{
struct timeval elapsed = {0, 0};
}
}
-int zebra_evpn_local_neigh_update(zebra_evpn_t *zevpn, struct interface *ifp,
+int zebra_evpn_local_neigh_update(struct zebra_evpn *zevpn,
+ struct interface *ifp,
const struct ipaddr *ip,
const struct ethaddr *macaddr, bool is_router,
bool local_inactive, bool dp_static)
{
struct zebra_vrf *zvrf;
- zebra_neigh_t *n = NULL;
- zebra_mac_t *zmac = NULL, *old_zmac = NULL;
+ struct zebra_neigh *n = NULL;
+ struct zebra_mac *zmac = NULL, *old_zmac = NULL;
uint32_t old_mac_seq = 0, mac_new_seq = 0;
bool upd_mac_seq = false;
bool neigh_mac_change = false;
return 0;
}
-int zebra_evpn_remote_neigh_update(zebra_evpn_t *zevpn, struct interface *ifp,
+int zebra_evpn_remote_neigh_update(struct zebra_evpn *zevpn,
+ struct interface *ifp,
const struct ipaddr *ip,
const struct ethaddr *macaddr,
uint16_t state)
{
- zebra_neigh_t *n = NULL;
- zebra_mac_t *zmac = NULL;
+ struct zebra_neigh *n = NULL;
+ struct zebra_mac *zmac = NULL;
/* If the neighbor is unknown, there is no further action. */
n = zebra_evpn_neigh_lookup(zevpn, ip);
void *arg)
{
struct mac_walk_ctx *wctx = arg;
- zebra_neigh_t *zn = bucket->data;
- zebra_mac_t *zmac = NULL;
+ struct zebra_neigh *zn = bucket->data;
+ struct zebra_mac *zmac = NULL;
if (CHECK_FLAG(zn->flags, ZEBRA_NEIGH_DEF_GW))
return;
}
/* Iterator of a specific EVPN */
-void zebra_evpn_send_neigh_to_client(zebra_evpn_t *zevpn)
+void zebra_evpn_send_neigh_to_client(struct zebra_evpn *zevpn)
{
struct neigh_walk_ctx wctx;
void zebra_evpn_clear_dup_neigh_hash(struct hash_bucket *bucket, void *ctxt)
{
struct neigh_walk_ctx *wctx = ctxt;
- zebra_neigh_t *nbr;
- zebra_evpn_t *zevpn;
+ struct zebra_neigh *nbr;
+ struct zebra_evpn *zevpn;
char buf[INET6_ADDRSTRLEN];
- nbr = (zebra_neigh_t *)bucket->data;
+ nbr = (struct zebra_neigh *)bucket->data;
if (!nbr)
return;
/*
* Print a specific neighbor entry.
*/
-void zebra_evpn_print_neigh(zebra_neigh_t *n, void *ctxt, json_object *json)
+void zebra_evpn_print_neigh(struct zebra_neigh *n, void *ctxt,
+ json_object *json)
{
struct vty *vty;
char buf1[ETHER_ADDR_STRLEN];
"Seq #'s");
}
-static char *zebra_evpn_print_neigh_flags(zebra_neigh_t *n, char *flags_buf,
- uint32_t flags_buf_sz)
+static char *zebra_evpn_print_neigh_flags(struct zebra_neigh *n,
+ char *flags_buf,
+ uint32_t flags_buf_sz)
{
snprintf(flags_buf, flags_buf_sz, "%s%s%s",
(n->flags & ZEBRA_NEIGH_ES_PEER_ACTIVE) ?
{
struct vty *vty;
json_object *json_evpn = NULL, *json_row = NULL;
- zebra_neigh_t *n;
+ struct zebra_neigh *n;
char buf1[ETHER_ADDR_STRLEN];
char buf2[INET6_ADDRSTRLEN];
char addr_buf[PREFIX_STRLEN];
vty = wctx->vty;
json_evpn = wctx->json;
- n = (zebra_neigh_t *)bucket->data;
+ n = (struct zebra_neigh *)bucket->data;
if (json_evpn)
json_row = json_object_new_object();
{
struct vty *vty;
json_object *json_evpn = NULL, *json_row = NULL;
- zebra_neigh_t *n;
+ struct zebra_neigh *n;
char buf[INET6_ADDRSTRLEN];
struct neigh_walk_ctx *wctx = ctxt;
vty = wctx->vty;
json_evpn = wctx->json;
- n = (zebra_neigh_t *)bucket->data;
+ n = (struct zebra_neigh *)bucket->data;
if (!n)
return;
void zebra_evpn_print_dad_neigh_hash(struct hash_bucket *bucket, void *ctxt)
{
- zebra_neigh_t *nbr;
+ struct zebra_neigh *nbr;
- nbr = (zebra_neigh_t *)bucket->data;
+ nbr = (struct zebra_neigh *)bucket->data;
if (!nbr)
return;
void zebra_evpn_print_dad_neigh_hash_detail(struct hash_bucket *bucket,
void *ctxt)
{
- zebra_neigh_t *nbr;
+ struct zebra_neigh *nbr;
- nbr = (zebra_neigh_t *)bucket->data;
+ nbr = (struct zebra_neigh *)bucket->data;
if (!nbr)
return;
zebra_evpn_print_neigh_hash_detail(bucket, ctxt);
}
-void zebra_evpn_neigh_remote_macip_add(zebra_evpn_t *zevpn,
+void zebra_evpn_neigh_remote_macip_add(struct zebra_evpn *zevpn,
struct zebra_vrf *zvrf,
const struct ipaddr *ipaddr,
- zebra_mac_t *mac, struct in_addr vtep_ip,
- uint8_t flags, uint32_t seq)
+ struct zebra_mac *mac,
+ struct in_addr vtep_ip, uint8_t flags,
+ uint32_t seq)
{
- zebra_neigh_t *n;
+ struct zebra_neigh *n;
int update_neigh = 0;
- zebra_mac_t *old_mac = NULL;
+ struct zebra_mac *old_mac = NULL;
bool old_static = false;
bool do_dad = false;
bool is_dup_detect = false;
"sync->remote neigh vni %u ip %pIA mac %pEA seq %d f0x%x",
n->zevpn->vni, &n->ip, &n->emac,
seq, n->flags);
- zebra_evpn_neigh_clear_sync_info(n);
if (IS_ZEBRA_NEIGH_ACTIVE(n))
zebra_evpn_neigh_send_del_to_client(
zevpn->vni, &n->ip, &n->emac,
n->flags, n->state,
false /*force*/);
+ zebra_evpn_neigh_clear_sync_info(n);
}
if (memcmp(&n->emac, &mac->macaddr,
sizeof(struct ethaddr))
n->rem_seq = seq;
}
-int zebra_evpn_neigh_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn,
- struct ipaddr *ip, zebra_mac_t *mac)
+int zebra_evpn_neigh_gw_macip_add(struct interface *ifp,
+ struct zebra_evpn *zevpn, struct ipaddr *ip,
+ struct zebra_mac *mac)
{
- zebra_neigh_t *n;
+ struct zebra_neigh *n;
assert(mac);
return 0;
}
-void zebra_evpn_neigh_remote_uninstall(zebra_evpn_t *zevpn,
- struct zebra_vrf *zvrf, zebra_neigh_t *n,
- zebra_mac_t *mac,
+void zebra_evpn_neigh_remote_uninstall(struct zebra_evpn *zevpn,
+ struct zebra_vrf *zvrf,
+ struct zebra_neigh *n,
+ struct zebra_mac *mac,
const struct ipaddr *ipaddr)
{
if (zvrf->dad_freeze && CHECK_FLAG(n->flags, ZEBRA_NEIGH_DUPLICATE)
}
}
-int zebra_evpn_neigh_del_ip(zebra_evpn_t *zevpn, const struct ipaddr *ip)
+int zebra_evpn_neigh_del_ip(struct zebra_evpn *zevpn, const struct ipaddr *ip)
{
- zebra_neigh_t *n;
- zebra_mac_t *zmac;
+ struct zebra_neigh *n;
+ struct zebra_mac *zmac;
bool old_bgp_ready;
bool new_bgp_ready;
struct zebra_vrf *zvrf;
extern "C" {
#endif
-typedef struct zebra_neigh_t_ zebra_neigh_t;
-
#define IS_ZEBRA_NEIGH_ACTIVE(n) (n->state == ZEBRA_NEIGH_ACTIVE)
#define IS_ZEBRA_NEIGH_INACTIVE(n) (n->state == ZEBRA_NEIGH_INACTIVE)
* it is sufficient for zebra to maintain against the VNI. The correct
* VNI will be obtained as zebra maintains the mapping (of VLAN to VNI).
*/
-struct zebra_neigh_t_ {
+struct zebra_neigh {
/* IP address. */
struct ipaddr ip;
struct ethaddr emac;
/* Back pointer to MAC. Only applicable to hosts in a L2-VNI. */
- zebra_mac_t *mac;
+ struct zebra_mac *mac;
/* Underlying interface. */
ifindex_t ifindex;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t flags;
#define ZEBRA_NEIGH_LOCAL 0x01
* Context for neighbor hash walk - used by callbacks.
*/
struct neigh_walk_ctx {
- zebra_evpn_t *zevpn; /* VNI hash */
+ struct zebra_evpn *zevpn; /* VNI hash */
struct zebra_vrf *zvrf; /* VRF - for client notification. */
int uninstall; /* uninstall from kernel? */
int upd_client; /* uninstall from client? */
};
/**************************** SYNC neigh handling **************************/
-static inline bool zebra_evpn_neigh_is_static(zebra_neigh_t *neigh)
+static inline bool zebra_evpn_neigh_is_static(struct zebra_neigh *neigh)
{
return !!(neigh->flags & ZEBRA_NEIGH_ALL_PEER_FLAGS);
}
-static inline bool zebra_evpn_neigh_is_ready_for_bgp(zebra_neigh_t *n)
+static inline bool zebra_evpn_neigh_is_ready_for_bgp(struct zebra_neigh *n)
{
bool mac_ready;
bool neigh_ready;
return mac_ready && neigh_ready;
}
-static inline void zebra_evpn_neigh_stop_hold_timer(zebra_neigh_t *n)
+static inline void zebra_evpn_neigh_stop_hold_timer(struct zebra_neigh *n)
{
if (!n->hold_timer)
return;
THREAD_OFF(n->hold_timer);
}
-void zebra_evpn_sync_neigh_static_chg(zebra_neigh_t *n, bool old_n_static,
+void zebra_evpn_sync_neigh_static_chg(struct zebra_neigh *n, bool old_n_static,
bool new_n_static, bool defer_n_dp,
bool defer_mac_dp, const char *caller);
-static inline bool zebra_evpn_neigh_clear_sync_info(zebra_neigh_t *n)
+static inline bool zebra_evpn_neigh_clear_sync_info(struct zebra_neigh *n)
{
bool old_n_static = false;
bool new_n_static = false;
return old_n_static != new_n_static;
}
-int remote_neigh_count(zebra_mac_t *zmac);
+int remote_neigh_count(struct zebra_mac *zmac);
int neigh_list_cmp(void *p1, void *p2);
struct hash *zebra_neigh_db_create(const char *desc);
-uint32_t num_dup_detected_neighs(zebra_evpn_t *zevpn);
+uint32_t num_dup_detected_neighs(struct zebra_evpn *zevpn);
void zebra_evpn_find_neigh_addr_width(struct hash_bucket *bucket, void *ctxt);
-int remote_neigh_count(zebra_mac_t *zmac);
-int zebra_evpn_rem_neigh_install(zebra_evpn_t *zevpn, zebra_neigh_t *n,
- bool was_static);
+int remote_neigh_count(struct zebra_mac *zmac);
+int zebra_evpn_rem_neigh_install(struct zebra_evpn *zevpn,
+ struct zebra_neigh *n, bool was_static);
void zebra_evpn_install_neigh_hash(struct hash_bucket *bucket, void *ctxt);
int zebra_evpn_neigh_send_add_to_client(vni_t vni, const struct ipaddr *ip,
const struct ethaddr *macaddr,
- zebra_mac_t *zmac, uint32_t neigh_flags,
- uint32_t seq);
+ struct zebra_mac *zmac,
+ uint32_t neigh_flags, uint32_t seq);
int zebra_evpn_neigh_send_del_to_client(vni_t vni, struct ipaddr *ip,
struct ethaddr *macaddr, uint32_t flags,
int state, bool force);
-bool zebra_evpn_neigh_is_bgp_seq_ok(zebra_evpn_t *zevpn, zebra_neigh_t *n,
+bool zebra_evpn_neigh_is_bgp_seq_ok(struct zebra_evpn *zevpn,
+ struct zebra_neigh *n,
const struct ethaddr *macaddr, uint32_t seq,
bool sync);
-int zebra_evpn_neigh_del(zebra_evpn_t *zevpn, zebra_neigh_t *n);
-void zebra_evpn_sync_neigh_del(zebra_neigh_t *n);
-zebra_neigh_t *
-zebra_evpn_proc_sync_neigh_update(zebra_evpn_t *zevpn, zebra_neigh_t *n,
- uint16_t ipa_len, const struct ipaddr *ipaddr,
- uint8_t flags, uint32_t seq, const esi_t *esi,
- struct sync_mac_ip_ctx *ctx);
-void zebra_evpn_neigh_del_all(zebra_evpn_t *zevpn, int uninstall,
+int zebra_evpn_neigh_del(struct zebra_evpn *zevpn, struct zebra_neigh *n);
+void zebra_evpn_sync_neigh_del(struct zebra_neigh *n);
+struct zebra_neigh *zebra_evpn_proc_sync_neigh_update(
+ struct zebra_evpn *zevpn, struct zebra_neigh *n, uint16_t ipa_len,
+ const struct ipaddr *ipaddr, uint8_t flags, uint32_t seq,
+ const esi_t *esi, struct sync_mac_ip_ctx *ctx);
+void zebra_evpn_neigh_del_all(struct zebra_evpn *zevpn, int uninstall,
int upd_client, uint32_t flags);
-zebra_neigh_t *zebra_evpn_neigh_lookup(zebra_evpn_t *zevpn,
- const struct ipaddr *ip);
-
-int zebra_evpn_rem_neigh_install(zebra_evpn_t *zevpn, zebra_neigh_t *n,
- bool was_static);
-void zebra_evpn_process_neigh_on_remote_mac_add(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac);
-void zebra_evpn_process_neigh_on_local_mac_del(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac);
-void zebra_evpn_process_neigh_on_local_mac_change(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac,
+struct zebra_neigh *zebra_evpn_neigh_lookup(struct zebra_evpn *zevpn,
+ const struct ipaddr *ip);
+
+int zebra_evpn_rem_neigh_install(struct zebra_evpn *zevpn,
+ struct zebra_neigh *n, bool was_static);
+void zebra_evpn_process_neigh_on_remote_mac_add(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac);
+void zebra_evpn_process_neigh_on_local_mac_del(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac);
+void zebra_evpn_process_neigh_on_local_mac_change(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac,
bool seq_change,
bool es_change);
-void zebra_evpn_process_neigh_on_remote_mac_del(zebra_evpn_t *zevpn,
- zebra_mac_t *zmac);
-int zebra_evpn_local_neigh_update(zebra_evpn_t *zevpn, struct interface *ifp,
+void zebra_evpn_process_neigh_on_remote_mac_del(struct zebra_evpn *zevpn,
+ struct zebra_mac *zmac);
+int zebra_evpn_local_neigh_update(struct zebra_evpn *zevpn,
+ struct interface *ifp,
const struct ipaddr *ip,
const struct ethaddr *macaddr, bool is_router,
bool local_inactive, bool dp_static);
-int zebra_evpn_remote_neigh_update(zebra_evpn_t *zevpn, struct interface *ifp,
+int zebra_evpn_remote_neigh_update(struct zebra_evpn *zevpn,
+ struct interface *ifp,
const struct ipaddr *ip,
const struct ethaddr *macaddr,
uint16_t state);
-void zebra_evpn_send_neigh_to_client(zebra_evpn_t *zevpn);
+void zebra_evpn_send_neigh_to_client(struct zebra_evpn *zevpn);
void zebra_evpn_clear_dup_neigh_hash(struct hash_bucket *bucket, void *ctxt);
-void zebra_evpn_print_neigh(zebra_neigh_t *n, void *ctxt, json_object *json);
+void zebra_evpn_print_neigh(struct zebra_neigh *n, void *ctxt,
+ json_object *json);
void zebra_evpn_print_neigh_hash(struct hash_bucket *bucket, void *ctxt);
void zebra_evpn_print_neigh_hdr(struct vty *vty, struct neigh_walk_ctx *wctx);
void zebra_evpn_print_neigh_hash_detail(struct hash_bucket *bucket, void *ctxt);
void zebra_evpn_print_dad_neigh_hash(struct hash_bucket *bucket, void *ctxt);
void zebra_evpn_print_dad_neigh_hash_detail(struct hash_bucket *bucket,
void *ctxt);
-void zebra_evpn_neigh_remote_macip_add(zebra_evpn_t *zevpn,
+void zebra_evpn_neigh_remote_macip_add(struct zebra_evpn *zevpn,
struct zebra_vrf *zvrf,
const struct ipaddr *ipaddr,
- zebra_mac_t *mac, struct in_addr vtep_ip,
- uint8_t flags, uint32_t seq);
-int zebra_evpn_neigh_gw_macip_add(struct interface *ifp, zebra_evpn_t *zevpn,
- struct ipaddr *ip, zebra_mac_t *mac);
-void zebra_evpn_neigh_remote_uninstall(zebra_evpn_t *zevpn,
- struct zebra_vrf *zvrf, zebra_neigh_t *n,
- zebra_mac_t *mac,
+ struct zebra_mac *mac,
+ struct in_addr vtep_ip, uint8_t flags,
+ uint32_t seq);
+int zebra_evpn_neigh_gw_macip_add(struct interface *ifp,
+ struct zebra_evpn *zevpn, struct ipaddr *ip,
+ struct zebra_mac *mac);
+void zebra_evpn_neigh_remote_uninstall(struct zebra_evpn *zevpn,
+ struct zebra_vrf *zvrf,
+ struct zebra_neigh *n,
+ struct zebra_mac *mac,
const struct ipaddr *ipaddr);
-int zebra_evpn_neigh_del_ip(zebra_evpn_t *zevpn, const struct ipaddr *ip);
+int zebra_evpn_neigh_del_ip(struct zebra_evpn *zevpn, const struct ipaddr *ip);
#ifdef __cplusplus
}
/* EVPN<=>vxlan_zif association */
-static inline void zevpn_vxlan_if_set(zebra_evpn_t *zevpn,
+static inline void zevpn_vxlan_if_set(struct zebra_evpn *zevpn,
struct interface *ifp, bool set)
{
struct zebra_if *zif;
static void zfpm_start_stats_timer(void);
static void zfpm_mac_info_del(struct fpm_mac_info_t *fpm_mac);
+static const char ipv4_ll_buf[16] = "169.254.0.1";
+union g_addr ipv4ll_gateway;
+
/*
* zfpm_thread_should_yield
*/
* This function checks if we already have enqueued an update for this RMAC,
* If yes, update the same fpm_mac_info_t. Else, create and enqueue an update.
*/
-static int zfpm_trigger_rmac_update(zebra_mac_t *rmac, zebra_l3vni_t *zl3vni,
- bool delete, const char *reason)
+static int zfpm_trigger_rmac_update(struct zebra_mac *rmac,
+ struct zebra_l3vni *zl3vni, bool delete,
+ const char *reason)
{
struct fpm_mac_info_t *fpm_mac, key;
struct interface *vxlan_if, *svi_if;
static void zfpm_trigger_rmac_update_wrapper(struct hash_bucket *bucket,
void *args)
{
- zebra_mac_t *zrmac = (zebra_mac_t *)bucket->data;
- zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)args;
+ struct zebra_mac *zrmac = (struct zebra_mac *)bucket->data;
+ struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)args;
zfpm_trigger_rmac_update(zrmac, zl3vni, false, "RMAC added");
}
*/
static void zfpm_iterate_rmac_table(struct hash_bucket *bucket, void *args)
{
- zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)bucket->data;
+ struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)bucket->data;
hash_iterate(zl3vni->rmac_table, zfpm_trigger_rmac_update_wrapper,
(void *)zl3vni);
zfpm_stats_init(&zfpm_g->last_ivl_stats);
zfpm_stats_init(&zfpm_g->cumulative_stats);
+ memset(&ipv4ll_gateway, 0, sizeof(ipv4ll_gateway));
+ inet_pton(AF_INET, ipv4_ll_buf, &ipv4ll_gateway.ipv4);
+
install_node(&zebra_node);
install_element(ENABLE_NODE, &show_zebra_fpm_stats_cmd);
install_element(ENABLE_NODE, &clear_zebra_fpm_stats_cmd);
if (nexthop->type == NEXTHOP_TYPE_IPV6
|| nexthop->type == NEXTHOP_TYPE_IPV6_IFINDEX) {
- nhi.gateway = &nexthop->gate;
+ /* Special handling for IPv4 route with IPv6 Link Local next hop
+ */
+ if (ri->af == AF_INET)
+ nhi.gateway = &ipv4ll_gateway;
+ else
+ nhi.gateway = &nexthop->gate;
}
if (nexthop->type == NEXTHOP_TYPE_IFINDEX) {
extern struct route_entry *zfpm_route_for_update(rib_dest_t *dest);
+extern union g_addr ipv4ll_gateway;
+
#ifdef __cplusplus
}
#endif
static void fec_evaluate(struct zebra_vrf *zvrf);
static uint32_t fec_derive_label_from_index(struct zebra_vrf *vrf,
- zebra_fec_t *fec);
+ struct zebra_fec *fec);
static int lsp_install(struct zebra_vrf *zvrf, mpls_label_t label,
struct route_node *rn, struct route_entry *re);
static int lsp_uninstall(struct zebra_vrf *zvrf, mpls_label_t label);
-static int fec_change_update_lsp(struct zebra_vrf *zvrf, zebra_fec_t *fec,
+static int fec_change_update_lsp(struct zebra_vrf *zvrf, struct zebra_fec *fec,
mpls_label_t old_label);
-static int fec_send(zebra_fec_t *fec, struct zserv *client);
-static void fec_update_clients(zebra_fec_t *fec);
-static void fec_print(zebra_fec_t *fec, struct vty *vty);
-static zebra_fec_t *fec_find(struct route_table *table, struct prefix *p);
-static zebra_fec_t *fec_add(struct route_table *table, struct prefix *p,
- mpls_label_t label, uint32_t flags,
- uint32_t label_index);
-static int fec_del(zebra_fec_t *fec);
+static int fec_send(struct zebra_fec *fec, struct zserv *client);
+static void fec_update_clients(struct zebra_fec *fec);
+static void fec_print(struct zebra_fec *fec, struct vty *vty);
+static struct zebra_fec *fec_find(struct route_table *table, struct prefix *p);
+static struct zebra_fec *fec_add(struct route_table *table, struct prefix *p,
+ mpls_label_t label, uint32_t flags,
+ uint32_t label_index);
+static int fec_del(struct zebra_fec *fec);
static unsigned int label_hash(const void *p);
static bool label_cmp(const void *p1, const void *p2);
-static int nhlfe_nexthop_active_ipv4(zebra_nhlfe_t *nhlfe,
+static int nhlfe_nexthop_active_ipv4(struct zebra_nhlfe *nhlfe,
struct nexthop *nexthop);
-static int nhlfe_nexthop_active_ipv6(zebra_nhlfe_t *nhlfe,
+static int nhlfe_nexthop_active_ipv6(struct zebra_nhlfe *nhlfe,
struct nexthop *nexthop);
-static int nhlfe_nexthop_active(zebra_nhlfe_t *nhlfe);
+static int nhlfe_nexthop_active(struct zebra_nhlfe *nhlfe);
-static void lsp_select_best_nhlfe(zebra_lsp_t *lsp);
+static void lsp_select_best_nhlfe(struct zebra_lsp *lsp);
static void lsp_uninstall_from_kernel(struct hash_bucket *bucket, void *ctxt);
static void lsp_schedule(struct hash_bucket *bucket, void *ctxt);
static wq_item_status lsp_process(struct work_queue *wq, void *data);
static void lsp_processq_del(struct work_queue *wq, void *data);
static void lsp_processq_complete(struct work_queue *wq);
-static int lsp_processq_add(zebra_lsp_t *lsp);
+static int lsp_processq_add(struct zebra_lsp *lsp);
static void *lsp_alloc(void *p);
/* Check whether lsp can be freed - no nhlfes, e.g., and call free api */
-static void lsp_check_free(struct hash *lsp_table, zebra_lsp_t **plsp);
+static void lsp_check_free(struct hash *lsp_table, struct zebra_lsp **plsp);
/* Free lsp; sets caller's pointer to NULL */
-static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp);
+static void lsp_free(struct hash *lsp_table, struct zebra_lsp **plsp);
-static char *nhlfe2str(const zebra_nhlfe_t *nhlfe, char *buf, int size);
-static char *nhlfe_config_str(const zebra_nhlfe_t *nhlfe, char *buf, int size);
-static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype,
+static char *nhlfe2str(const struct zebra_nhlfe *nhlfe, char *buf, int size);
+static char *nhlfe_config_str(const struct zebra_nhlfe *nhlfe, char *buf,
+ int size);
+static int nhlfe_nhop_match(struct zebra_nhlfe *nhlfe,
+ enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex);
-static zebra_nhlfe_t *nhlfe_find(struct nhlfe_list_head *list,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate, ifindex_t ifindex);
-static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate, ifindex_t ifindex,
- uint8_t num_labels, const mpls_label_t *labels,
- bool is_backup);
-static int nhlfe_del(zebra_nhlfe_t *nhlfe);
-static void nhlfe_free(zebra_nhlfe_t *nhlfe);
-static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe,
+static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate,
+ ifindex_t ifindex);
+static struct zebra_nhlfe *
+nhlfe_add(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels,
+ bool is_backup);
+static int nhlfe_del(struct zebra_nhlfe *nhlfe);
+static void nhlfe_free(struct zebra_nhlfe *nhlfe);
+static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe,
struct mpls_label_stack *nh_label);
-static int mpls_lsp_uninstall_all(struct hash *lsp_table, zebra_lsp_t *lsp,
+static int mpls_lsp_uninstall_all(struct hash *lsp_table, struct zebra_lsp *lsp,
enum lsp_types_t type);
static int mpls_static_lsp_uninstall_all(struct zebra_vrf *zvrf,
mpls_label_t in_label);
-static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty,
+static void nhlfe_print(struct zebra_nhlfe *nhlfe, struct vty *vty,
const char *indent);
-static void lsp_print(struct vty *vty, zebra_lsp_t *lsp);
+static void lsp_print(struct vty *vty, struct zebra_lsp *lsp);
static void mpls_lsp_uninstall_all_type(struct hash_bucket *bucket, void *ctxt);
static void mpls_ftn_uninstall_all(struct zebra_vrf *zvrf,
int afi, enum lsp_types_t lsp_type);
-static int lsp_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+static int lsp_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
const struct zapi_nexthop *znh);
-static int lsp_backup_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+static int lsp_backup_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
const struct zapi_nexthop *znh);
/* Static functions */
/*
* Handle failure in LSP install, clear flags for NHLFE.
*/
-static void clear_nhlfe_installed(zebra_lsp_t *lsp)
+static void clear_nhlfe_installed(struct zebra_lsp *lsp)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
frr_each_safe(nhlfe_list, &lsp->nhlfe_list, nhlfe) {
struct route_node *rn, struct route_entry *re)
{
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
enum lsp_types_t lsp_type;
char buf[BUFSIZ];
static int lsp_uninstall(struct zebra_vrf *zvrf, mpls_label_t label)
{
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
char buf[BUFSIZ];
/* Lookup table. */
static void fec_evaluate(struct zebra_vrf *zvrf)
{
struct route_node *rn;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
uint32_t old_label, new_label;
int af;
* globally configured label block (SRGB).
*/
static uint32_t fec_derive_label_from_index(struct zebra_vrf *zvrf,
- zebra_fec_t *fec)
+ struct zebra_fec *fec)
{
uint32_t label;
* There is a change for this FEC. Install or uninstall label forwarding
* entries, as appropriate.
*/
-static int fec_change_update_lsp(struct zebra_vrf *zvrf, zebra_fec_t *fec,
+static int fec_change_update_lsp(struct zebra_vrf *zvrf, struct zebra_fec *fec,
mpls_label_t old_label)
{
struct route_table *table;
/*
* Inform about FEC to a registered client.
*/
-static int fec_send(zebra_fec_t *fec, struct zserv *client)
+static int fec_send(struct zebra_fec *fec, struct zserv *client)
{
struct stream *s;
struct route_node *rn;
* Update all registered clients about this FEC. Caller should've updated
* FEC and ensure no duplicate updates.
*/
-static void fec_update_clients(zebra_fec_t *fec)
+static void fec_update_clients(struct zebra_fec *fec)
{
struct listnode *node;
struct zserv *client;
/*
* Print a FEC-label binding entry.
*/
-static void fec_print(zebra_fec_t *fec, struct vty *vty)
+static void fec_print(struct zebra_fec *fec, struct vty *vty)
{
struct route_node *rn;
struct listnode *node;
/*
* Locate FEC-label binding that matches with passed info.
*/
-static zebra_fec_t *fec_find(struct route_table *table, struct prefix *p)
+static struct zebra_fec *fec_find(struct route_table *table, struct prefix *p)
{
struct route_node *rn;
* Add a FEC. This may be upon a client registering for a binding
* or when a binding is configured.
*/
-static zebra_fec_t *fec_add(struct route_table *table, struct prefix *p,
- mpls_label_t label, uint32_t flags,
- uint32_t label_index)
+static struct zebra_fec *fec_add(struct route_table *table, struct prefix *p,
+ mpls_label_t label, uint32_t flags,
+ uint32_t label_index)
{
struct route_node *rn;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
apply_mask(p);
fec = rn->info;
if (!fec) {
- fec = XCALLOC(MTYPE_FEC, sizeof(zebra_fec_t));
+ fec = XCALLOC(MTYPE_FEC, sizeof(struct zebra_fec));
rn->info = fec;
fec->rn = rn;
* a FEC and no binding exists or when the binding is deleted and there
* are no registered clients.
*/
-static int fec_del(zebra_fec_t *fec)
+static int fec_del(struct zebra_fec *fec)
{
list_delete(&fec->client_list);
fec->rn->info = NULL;
*/
static unsigned int label_hash(const void *p)
{
- const zebra_ile_t *ile = p;
+ const struct zebra_ile *ile = p;
return (jhash_1word(ile->in_label, 0));
}
*/
static bool label_cmp(const void *p1, const void *p2)
{
- const zebra_ile_t *ile1 = p1;
- const zebra_ile_t *ile2 = p2;
+ const struct zebra_ile *ile1 = p1;
+ const struct zebra_ile *ile2 = p2;
return (ile1->in_label == ile2->in_label);
}
* the passed flag.
* NOTE: Looking only for connected routes right now.
*/
-static int nhlfe_nexthop_active_ipv4(zebra_nhlfe_t *nhlfe,
+static int nhlfe_nexthop_active_ipv4(struct zebra_nhlfe *nhlfe,
struct nexthop *nexthop)
{
struct route_table *table;
* the passed flag.
* NOTE: Looking only for connected routes right now.
*/
-static int nhlfe_nexthop_active_ipv6(zebra_nhlfe_t *nhlfe,
+static int nhlfe_nexthop_active_ipv6(struct zebra_nhlfe *nhlfe,
struct nexthop *nexthop)
{
struct route_table *table;
* or not.
* NOTE: Each NHLFE points to only 1 nexthop.
*/
-static int nhlfe_nexthop_active(zebra_nhlfe_t *nhlfe)
+static int nhlfe_nexthop_active(struct zebra_nhlfe *nhlfe)
{
struct nexthop *nexthop;
struct interface *ifp;
* marked. This is invoked when an LSP scheduled for processing (due
* to some change) is examined.
*/
-static void lsp_select_best_nhlfe(zebra_lsp_t *lsp)
+static void lsp_select_best_nhlfe(struct zebra_lsp *lsp)
{
- zebra_nhlfe_t *nhlfe;
- zebra_nhlfe_t *best;
+ struct zebra_nhlfe *nhlfe;
+ struct zebra_nhlfe *best;
struct nexthop *nexthop;
int changed = 0;
*/
static void lsp_uninstall_from_kernel(struct hash_bucket *bucket, void *ctxt)
{
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
- lsp = (zebra_lsp_t *)bucket->data;
+ lsp = (struct zebra_lsp *)bucket->data;
if (CHECK_FLAG(lsp->flags, LSP_FLAG_INSTALLED))
(void)dplane_lsp_delete(lsp);
}
*/
static void lsp_schedule(struct hash_bucket *bucket, void *ctxt)
{
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
- lsp = (zebra_lsp_t *)bucket->data;
+ lsp = (struct zebra_lsp *)bucket->data;
/* In the common flow, this is used when external events occur. For
* LSPs with backup nhlfes, we'll assume that the forwarding
*/
static wq_item_status lsp_process(struct work_queue *wq, void *data)
{
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *oldbest, *newbest;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *oldbest, *newbest;
char buf[BUFSIZ], buf2[BUFSIZ];
struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
enum zebra_dplane_result res;
- lsp = (zebra_lsp_t *)data;
+ lsp = (struct zebra_lsp *)data;
if (!lsp) // unexpected
return WQ_SUCCESS;
break;
}
} else if (CHECK_FLAG(lsp->flags, LSP_FLAG_CHANGED)) {
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
UNSET_FLAG(lsp->flags, LSP_FLAG_CHANGED);
static void lsp_processq_del(struct work_queue *wq, void *data)
{
struct zebra_vrf *zvrf;
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
struct hash *lsp_table;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
zvrf = vrf_info_lookup(VRF_DEFAULT);
assert(zvrf);
if (!lsp_table) // unexpected
return;
- lsp = (zebra_lsp_t *)data;
+ lsp = (struct zebra_lsp *)data;
if (!lsp) // unexpected
return;
/*
* Add LSP forwarding entry to queue for subsequent processing.
*/
-static int lsp_processq_add(zebra_lsp_t *lsp)
+static int lsp_processq_add(struct zebra_lsp *lsp)
{
/* If already scheduled, exit. */
if (CHECK_FLAG(lsp->flags, LSP_FLAG_SCHEDULED))
*/
static void *lsp_alloc(void *p)
{
- const zebra_ile_t *ile = p;
- zebra_lsp_t *lsp;
+ const struct zebra_ile *ile = p;
+ struct zebra_lsp *lsp;
- lsp = XCALLOC(MTYPE_LSP, sizeof(zebra_lsp_t));
+ lsp = XCALLOC(MTYPE_LSP, sizeof(struct zebra_lsp));
lsp->ile = *ile;
nhlfe_list_init(&lsp->nhlfe_list);
nhlfe_list_init(&lsp->backup_nhlfe_list);
/*
* Check whether lsp can be freed - no nhlfes, e.g., and call free api
*/
-static void lsp_check_free(struct hash *lsp_table, zebra_lsp_t **plsp)
+static void lsp_check_free(struct hash *lsp_table, struct zebra_lsp **plsp)
{
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
if (plsp == NULL || *plsp == NULL)
return;
* Dtor for an LSP: remove from ile hash, release any internal allocations,
* free LSP object.
*/
-static void lsp_free(struct hash *lsp_table, zebra_lsp_t **plsp)
+static void lsp_free(struct hash *lsp_table, struct zebra_lsp **plsp)
{
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
if (plsp == NULL || *plsp == NULL)
return;
/*
* Create printable string for NHLFE entry.
*/
-static char *nhlfe2str(const zebra_nhlfe_t *nhlfe, char *buf, int size)
+static char *nhlfe2str(const struct zebra_nhlfe *nhlfe, char *buf, int size)
{
const struct nexthop *nexthop;
/*
* Check if NHLFE matches with search info passed.
*/
-static int nhlfe_nhop_match(zebra_nhlfe_t *nhlfe, enum nexthop_types_t gtype,
+static int nhlfe_nhop_match(struct zebra_nhlfe *nhlfe,
+ enum nexthop_types_t gtype,
const union g_addr *gate, ifindex_t ifindex)
{
struct nexthop *nhop;
/*
* Locate NHLFE that matches with passed info.
*/
-static zebra_nhlfe_t *nhlfe_find(struct nhlfe_list_head *list,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate, ifindex_t ifindex)
+static struct zebra_nhlfe *nhlfe_find(struct nhlfe_list_head *list,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate,
+ ifindex_t ifindex)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
frr_each_safe(nhlfe_list, list, nhlfe) {
if (nhlfe->type != lsp_type)
/*
* Allocate and init new NHLFE.
*/
-static zebra_nhlfe_t *nhlfe_alloc(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate, ifindex_t ifindex,
- uint8_t num_labels,
- const mpls_label_t *labels)
+static struct zebra_nhlfe *
+nhlfe_alloc(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels, const mpls_label_t *labels)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
assert(lsp);
- nhlfe = XCALLOC(MTYPE_NHLFE, sizeof(zebra_nhlfe_t));
+ nhlfe = XCALLOC(MTYPE_NHLFE, sizeof(struct zebra_nhlfe));
nhlfe->lsp = lsp;
nhlfe->type = lsp_type;
* Add primary or backup NHLFE. Base entry must have been created and
* duplicate check done.
*/
-static zebra_nhlfe_t *nhlfe_add(zebra_lsp_t *lsp, enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate, ifindex_t ifindex,
- uint8_t num_labels, const mpls_label_t *labels,
- bool is_backup)
+static struct zebra_nhlfe *nhlfe_add(struct zebra_lsp *lsp,
+ enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype,
+ const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels,
+ const mpls_label_t *labels, bool is_backup)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
if (!lsp)
return NULL;
/*
* Common delete for NHLFEs.
*/
-static void nhlfe_free(zebra_nhlfe_t *nhlfe)
+static void nhlfe_free(struct zebra_nhlfe *nhlfe)
{
if (!nhlfe)
return;
/*
* Disconnect NHLFE from LSP, and free. Entry must be present on LSP's list.
*/
-static int nhlfe_del(zebra_nhlfe_t *nhlfe)
+static int nhlfe_del(struct zebra_nhlfe *nhlfe)
{
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
if (!nhlfe)
return -1;
/*
* Update label for NHLFE entry.
*/
-static void nhlfe_out_label_update(zebra_nhlfe_t *nhlfe,
+static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe,
struct mpls_label_stack *nh_label)
{
nhlfe->nexthop->nh_label->label[0] = nh_label->label[0];
}
-static int mpls_lsp_uninstall_all(struct hash *lsp_table, zebra_lsp_t *lsp,
+static int mpls_lsp_uninstall_all(struct hash *lsp_table, struct zebra_lsp *lsp,
enum lsp_types_t type)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
int schedule_lsp = 0;
char buf[BUFSIZ];
mpls_label_t in_label)
{
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
/* Lookup table. */
lsp_table = zvrf->lsp_table;
return mpls_lsp_uninstall_all(lsp_table, lsp, ZEBRA_LSP_STATIC);
}
-static json_object *nhlfe_json(zebra_nhlfe_t *nhlfe)
+static json_object *nhlfe_json(struct zebra_nhlfe *nhlfe)
{
char buf[BUFSIZ];
json_object *json_nhlfe = NULL;
/*
* Print the NHLFE for a LSP forwarding entry.
*/
-static void nhlfe_print(zebra_nhlfe_t *nhlfe, struct vty *vty,
+static void nhlfe_print(struct zebra_nhlfe *nhlfe, struct vty *vty,
const char *indent)
{
struct nexthop *nexthop;
/*
* Print an LSP forwarding entry.
*/
-static void lsp_print(struct vty *vty, zebra_lsp_t *lsp)
+static void lsp_print(struct vty *vty, struct zebra_lsp *lsp)
{
- zebra_nhlfe_t *nhlfe, *backup;
+ struct zebra_nhlfe *nhlfe, *backup;
int i, j;
vty_out(vty, "Local label: %u%s\n", lsp->ile.in_label,
/*
* JSON objects for an LSP forwarding entry.
*/
-static json_object *lsp_json(zebra_lsp_t *lsp)
+static json_object *lsp_json(struct zebra_lsp *lsp)
{
- zebra_nhlfe_t *nhlfe = NULL;
+ struct zebra_nhlfe *nhlfe = NULL;
json_object *json = json_object_new_object();
json_object *json_nhlfe_list = json_object_new_array();
/*
* Compare two LSPs based on their label values.
*/
-static int lsp_cmp(const zebra_lsp_t *lsp1, const zebra_lsp_t *lsp2)
+static int lsp_cmp(const struct zebra_lsp *lsp1, const struct zebra_lsp *lsp2)
{
if (lsp1->ile.in_label < lsp2->ile.in_label)
return -1;
{
struct zebra_vrf *zvrf;
mpls_label_t label;
- zebra_ile_t tmp_ile;
+ struct zebra_ile tmp_ile;
struct hash *lsp_table;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
struct nexthop *nexthop;
enum dplane_op_e op;
enum zebra_dplane_result status;
struct nhlfe_list_head *nhlfe_head,
int *start_counter, int *end_counter)
{
- zebra_nhlfe_t *nhlfe;
- const zebra_nhlfe_t *ctx_nhlfe;
+ struct zebra_nhlfe *nhlfe;
+ const struct zebra_nhlfe *ctx_nhlfe;
struct nexthop *nexthop;
const struct nexthop *ctx_nexthop;
int start_count = 0, end_count = 0;
const struct nhlfe_list_head *ctx_head)
{
int ret = 0;
- zebra_nhlfe_t *nhlfe;
- const zebra_nhlfe_t *ctx_nhlfe;
+ struct zebra_nhlfe *nhlfe;
+ const struct zebra_nhlfe *ctx_nhlfe;
struct nexthop *nexthop;
const struct nexthop *ctx_nexthop;
bool is_debug = (IS_ZEBRA_DEBUG_DPLANE | IS_ZEBRA_DEBUG_MPLS);
void zebra_mpls_process_dplane_notify(struct zebra_dplane_ctx *ctx)
{
struct zebra_vrf *zvrf;
- zebra_ile_t tmp_ile;
+ struct zebra_ile tmp_ile;
struct hash *lsp_table;
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
const struct nhlfe_list_head *ctx_list;
int start_count = 0, end_count = 0; /* Installed counts */
bool changed_p = false;
struct route_entry *re)
{
struct route_table *table;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
table = zvrf->fec_table[family2afi(PREFIX_FAMILY(&rn->p))];
if (!table)
struct route_entry *re)
{
struct route_table *table;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
table = zvrf->fec_table[family2afi(PREFIX_FAMILY(&rn->p))];
if (!table)
* Add an NHLFE to an LSP, return the newly-added object. This path only changes
* the LSP object - nothing is scheduled for processing, for example.
*/
-zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- const mpls_label_t *out_labels)
+struct zebra_nhlfe *
+zebra_mpls_lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels,
+ const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
* This path only changes the LSP object - nothing is scheduled for
* processing, for example.
*/
-zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nhlfe(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- const mpls_label_t *out_labels)
+struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nhlfe(
+ struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate, ifindex_t ifindex,
+ uint8_t num_labels, const mpls_label_t *out_labels)
{
/* Just a public pass-through to the internal implementation */
return nhlfe_add(lsp, lsp_type, gtype, gate, ifindex, num_labels,
/*
* Add an NHLFE to an LSP based on a nexthop; return the newly-added object
*/
-zebra_nhlfe_t *zebra_mpls_lsp_add_nh(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- const struct nexthop *nh)
+struct zebra_nhlfe *zebra_mpls_lsp_add_nh(struct zebra_lsp *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
return NULL;
* Add a backup NHLFE to an LSP based on a nexthop;
* return the newly-added object.
*/
-zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nh(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- const struct nexthop *nh)
+struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nh(struct zebra_lsp *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
if (nh->nh_label == NULL || nh->nh_label->num_labels == 0)
return NULL;
/*
* Free an allocated NHLFE
*/
-void zebra_mpls_nhlfe_free(zebra_nhlfe_t *nhlfe)
+void zebra_mpls_nhlfe_free(struct zebra_nhlfe *nhlfe)
{
/* Just a pass-through to the internal implementation */
nhlfe_free(nhlfe);
struct zserv *client)
{
struct route_table *table;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
bool new_client;
bool label_change = false;
uint32_t old_label;
struct zserv *client)
{
struct route_table *table;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
table = zvrf->fec_table[family2afi(PREFIX_FAMILY(p))];
if (!table)
{
struct zebra_vrf *zvrf = vrf_info_lookup(VRF_DEFAULT);
struct route_node *rn;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
struct listnode *node;
struct zserv *fec_client;
int af;
* TODO: Currently walks entire table, can optimize later with another
* hash..
*/
-zebra_fec_t *zebra_mpls_fec_for_label(struct zebra_vrf *zvrf,
- mpls_label_t label)
+struct zebra_fec *zebra_mpls_fec_for_label(struct zebra_vrf *zvrf,
+ mpls_label_t label)
{
struct route_node *rn;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
int af;
for (af = AFI_IP; af < AFI_MAX; af++) {
mpls_label_t in_label)
{
struct route_table *table;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
mpls_label_t old_label;
int ret = 0;
int zebra_mpls_static_fec_del(struct zebra_vrf *zvrf, struct prefix *p)
{
struct route_table *table;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
mpls_label_t old_label;
table = zvrf->fec_table[family2afi(PREFIX_FAMILY(p))];
{
struct route_node *rn;
int af;
- zebra_fec_t *fec;
+ struct zebra_fec *fec;
int write = 0;
for (af = AFI_IP; af < AFI_MAX; af++) {
afi_t afi = AFI_IP;
const struct prefix *prefix = NULL;
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp = NULL;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp = NULL;
/* Prep LSP for add case */
if (add_p) {
* a new LSP entry or a new NHLFE for an existing in-label or an update of
* the out-label for an existing NHLFE (update case).
*/
-static zebra_nhlfe_t *
-lsp_add_nhlfe(zebra_lsp_t *lsp, enum lsp_types_t type,
+static struct zebra_nhlfe *
+lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t type,
uint8_t num_out_labels, const mpls_label_t *out_labels,
enum nexthop_types_t gtype, const union g_addr *gate,
ifindex_t ifindex, bool is_backup)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
char buf[MPLS_LABEL_STRLEN];
const char *backup_str;
const union g_addr *gate, ifindex_t ifindex)
{
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
/* Lookup table. */
lsp_table = zvrf->lsp_table;
/*
* Install or replace NHLFE, using info from zapi nexthop
*/
-static int lsp_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+static int lsp_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
const struct zapi_nexthop *znh)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num, znh->labels,
znh->type, &znh->gate, znh->ifindex,
/*
* Install/update backup NHLFE for an LSP, using info from a zapi message.
*/
-static int lsp_backup_znh_install(zebra_lsp_t *lsp, enum lsp_types_t type,
+static int lsp_backup_znh_install(struct zebra_lsp *lsp, enum lsp_types_t type,
const struct zapi_nexthop *znh)
{
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
nhlfe = lsp_add_nhlfe(lsp, type, znh->label_num,
znh->labels, znh->type, &znh->gate,
return 0;
}
-zebra_lsp_t *mpls_lsp_find(struct zebra_vrf *zvrf, mpls_label_t in_label)
+struct zebra_lsp *mpls_lsp_find(struct zebra_vrf *zvrf, mpls_label_t in_label)
{
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
+ struct zebra_ile tmp_ile;
/* Lookup table. */
lsp_table = zvrf->lsp_table;
bool backup_p)
{
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
char buf[NEXTHOP_STRLEN];
bool schedule_lsp = false;
mpls_label_t in_label)
{
struct hash *lsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
/* Lookup table. */
lsp_table = zvrf->lsp_table;
static void mpls_lsp_uninstall_all_type(struct hash_bucket *bucket, void *ctxt)
{
struct lsp_uninstall_args *args = ctxt;
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
struct hash *lsp_table;
- lsp = (zebra_lsp_t *)bucket->data;
+ lsp = (struct zebra_lsp *)bucket->data;
if (nhlfe_list_first(&lsp->nhlfe_list) == NULL)
return;
union g_addr *gate, ifindex_t ifindex)
{
struct hash *slsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
const struct nexthop *nh;
/* Lookup table. */
ifindex_t ifindex)
{
struct hash *slsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
char buf[BUFSIZ];
/* Lookup table. */
ifindex_t ifindex)
{
struct hash *slsp_table;
- zebra_ile_t tmp_ile;
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_ile tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
/* Lookup table. */
slsp_table = zvrf->slsp_table;
mpls_label_t label, bool use_json)
{
struct hash *lsp_table;
- zebra_lsp_t *lsp;
- zebra_ile_t tmp_ile;
+ struct zebra_lsp *lsp;
+ struct zebra_ile tmp_ile;
json_object *json = NULL;
/* Lookup table. */
{
char buf[BUFSIZ];
json_object *json = NULL;
- zebra_lsp_t *lsp = NULL;
- zebra_nhlfe_t *nhlfe = NULL;
+ struct zebra_lsp *lsp = NULL;
+ struct zebra_nhlfe *nhlfe = NULL;
struct listnode *node = NULL;
struct list *lsp_list = hash_get_sorted_list(zvrf->lsp_table, lsp_cmp);
/*
* Create printable string for static LSP configuration.
*/
-static char *nhlfe_config_str(const zebra_nhlfe_t *nhlfe, char *buf, int size)
+static char *nhlfe_config_str(const struct zebra_nhlfe *nhlfe, char *buf,
+ int size)
{
const struct nexthop *nh;
*/
int zebra_mpls_write_lsp_config(struct vty *vty, struct zebra_vrf *zvrf)
{
- zebra_lsp_t *lsp;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_lsp *lsp;
+ struct zebra_nhlfe *nhlfe;
struct nexthop *nh;
struct listnode *node;
struct list *slsp_list =
? AF_INET6 \
: AF_INET)
-/* Typedefs */
-
-typedef struct zebra_ile_t_ zebra_ile_t;
-typedef struct zebra_nhlfe_t_ zebra_nhlfe_t;
-typedef struct zebra_lsp_t_ zebra_lsp_t;
-typedef struct zebra_fec_t_ zebra_fec_t;
-
/* Declare LSP nexthop list types */
PREDECL_DLIST(nhlfe_list);
/*
* (Outgoing) nexthop label forwarding entry
*/
-struct zebra_nhlfe_t_ {
+struct zebra_nhlfe {
/* Type of entry - static etc. */
enum lsp_types_t type;
struct nexthop *nexthop;
/* Backpointer to base entry. */
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
/* Runtime info - flags, pointers etc. */
uint32_t flags;
/*
* Incoming label entry
*/
-struct zebra_ile_t_ {
+struct zebra_ile {
mpls_label_t in_label;
};
/*
* Label swap entry (ile -> list of nhlfes)
*/
-struct zebra_lsp_t_ {
+struct zebra_lsp {
/* Incoming label */
- zebra_ile_t ile;
+ struct zebra_ile ile;
/* List of NHLFEs, pointer to best, and num equal-cost. */
struct nhlfe_list_head nhlfe_list;
- zebra_nhlfe_t *best_nhlfe;
+ struct zebra_nhlfe *best_nhlfe;
uint32_t num_ecmp;
/* Backup nhlfes, if present. The nexthop in a primary/active nhlfe
/*
* FEC to label binding.
*/
-struct zebra_fec_t_ {
+struct zebra_fec {
/* FEC (prefix) */
struct route_node *rn;
};
/* Declare typesafe list apis/macros */
-DECLARE_DLIST(nhlfe_list, struct zebra_nhlfe_t_, list);
+DECLARE_DLIST(nhlfe_list, struct zebra_nhlfe, list);
/* Function declarations. */
struct route_entry *re);
/* Add an NHLFE to an LSP, return the newly-added object */
-zebra_nhlfe_t *zebra_mpls_lsp_add_nhlfe(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- const mpls_label_t *out_labels);
+struct zebra_nhlfe *
+zebra_mpls_lsp_add_nhlfe(struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate,
+ ifindex_t ifindex, uint8_t num_labels,
+ const mpls_label_t *out_labels);
/* Add or update a backup NHLFE for an LSP; return the object */
-zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nhlfe(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- enum nexthop_types_t gtype,
- const union g_addr *gate,
- ifindex_t ifindex,
- uint8_t num_labels,
- const mpls_label_t *out_labels);
+struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nhlfe(
+ struct zebra_lsp *lsp, enum lsp_types_t lsp_type,
+ enum nexthop_types_t gtype, const union g_addr *gate, ifindex_t ifindex,
+ uint8_t num_labels, const mpls_label_t *out_labels);
/*
* Add NHLFE or backup NHLFE to an LSP based on a nexthop. These just maintain
* the LSP and NHLFE objects; nothing is scheduled for processing.
* Return: the newly-added object
*/
-zebra_nhlfe_t *zebra_mpls_lsp_add_nh(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- const struct nexthop *nh);
-zebra_nhlfe_t *zebra_mpls_lsp_add_backup_nh(zebra_lsp_t *lsp,
- enum lsp_types_t lsp_type,
- const struct nexthop *nh);
+struct zebra_nhlfe *zebra_mpls_lsp_add_nh(struct zebra_lsp *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh);
+struct zebra_nhlfe *zebra_mpls_lsp_add_backup_nh(struct zebra_lsp *lsp,
+ enum lsp_types_t lsp_type,
+ const struct nexthop *nh);
/* Free an allocated NHLFE */
-void zebra_mpls_nhlfe_free(zebra_nhlfe_t *nhlfe);
+void zebra_mpls_nhlfe_free(struct zebra_nhlfe *nhlfe);
int zebra_mpls_fec_register(struct zebra_vrf *zvrf, struct prefix *p,
uint32_t label, uint32_t label_index,
* TODO: Currently walks entire table, can optimize later with another
* hash..
*/
-zebra_fec_t *zebra_mpls_fec_for_label(struct zebra_vrf *zvrf,
- mpls_label_t label);
+struct zebra_fec *zebra_mpls_fec_for_label(struct zebra_vrf *zvrf,
+ mpls_label_t label);
/*
* Inform if specified label is currently bound to a FEC or not.
/*
* Lookup LSP by its input label.
*/
-zebra_lsp_t *mpls_lsp_find(struct zebra_vrf *zvrf, mpls_label_t in_label);
+struct zebra_lsp *mpls_lsp_find(struct zebra_vrf *zvrf, mpls_label_t in_label);
/*
* Uninstall a particular NHLFE in the forwarding table. If this is
} kr_state;
static int kernel_send_rtmsg_v4(int action, mpls_label_t in_label,
- const zebra_nhlfe_t *nhlfe)
+ const struct zebra_nhlfe *nhlfe)
{
struct iovec iov[5];
struct rt_msghdr hdr;
#endif
static int kernel_send_rtmsg_v6(int action, mpls_label_t in_label,
- const zebra_nhlfe_t *nhlfe)
+ const struct zebra_nhlfe *nhlfe)
{
struct iovec iov[5];
struct rt_msghdr hdr;
static int kernel_lsp_cmd(struct zebra_dplane_ctx *ctx)
{
const struct nhlfe_list_head *head;
- const zebra_nhlfe_t *nhlfe;
+ const struct zebra_nhlfe *nhlfe;
const struct nexthop *nexthop = NULL;
unsigned int nexthop_num = 0;
int action;
struct vrf *vrf;
struct zebra_vrf *zvrf;
vni_t vni = 0;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct zebra_vrf *zvrf_evpn = NULL;
char err[ERR_STR_SZ];
bool pfx_only = false;
nexthop_add_labels(resolved_hop, label_type, num_labels,
labels);
+ if (nexthop->nh_srv6) {
+ nexthop_add_srv6_seg6local(resolved_hop,
+ nexthop->nh_srv6->seg6local_action,
+ &nexthop->nh_srv6->seg6local_ctx);
+ nexthop_add_srv6_seg6(resolved_hop,
+ &nexthop->nh_srv6->seg6_segs);
+ }
+
resolved_hop->rparent = nexthop;
_nexthop_add(&nexthop->resolved, resolved_hop);
struct route_node *rn;
struct route_entry *match = NULL;
int resolved;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
struct nexthop *newhop;
struct interface *ifp;
rib_dest_t *dest;
if (!strcmp(ZEBRA_PTM_INVALID_VRF, vrf_str) && ifp) {
vrf_id = ifp->vrf_id;
} else {
- vrf_id = vrf_name_to_id(vrf_str);
+ struct vrf *pVrf;
+
+ pVrf = vrf_lookup_by_name(vrf_str);
+ if (pVrf)
+ vrf_id = pVrf->vrf_id;
+ else
+ vrf_id = VRF_DEFAULT;
}
if (!strcmp(bfdst_str, ZEBRA_PTM_BFDSTATUS_DOWN_STR)) {
if (!(pw->flags & F_PSEUDOWIRE_CWORD))
vty_out(vty, " control-word exclude\n");
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
write = 1;
}
static int zebra_sr_policy_notify_update_client(struct zebra_sr_policy *policy,
struct zserv *client)
{
- const zebra_nhlfe_t *nhlfe;
+ const struct zebra_nhlfe *nhlfe;
struct stream *s;
uint32_t message = 0;
unsigned long nump = 0;
}
static void zebra_sr_policy_activate(struct zebra_sr_policy *policy,
- zebra_lsp_t *lsp)
+ struct zebra_lsp *lsp)
{
policy->status = ZEBRA_SR_POLICY_UP;
policy->lsp = lsp;
}
static void zebra_sr_policy_update(struct zebra_sr_policy *policy,
- zebra_lsp_t *lsp,
+ struct zebra_lsp *lsp,
struct zapi_srte_tunnel *old_tunnel)
{
bool bsid_changed;
struct zapi_srte_tunnel *new_tunnel)
{
struct zapi_srte_tunnel old_tunnel = policy->segment_list;
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
if (new_tunnel)
policy->segment_list = *new_tunnel;
int zebra_sr_policy_bsid_install(struct zebra_sr_policy *policy)
{
struct zapi_srte_tunnel *zt = &policy->segment_list;
- zebra_nhlfe_t *nhlfe;
+ struct zebra_nhlfe *nhlfe;
if (zt->local_label == MPLS_LABEL_NONE)
return 0;
char name[SRTE_POLICY_NAME_MAX_LENGTH];
enum zebra_sr_policy_status status;
struct zapi_srte_tunnel segment_list;
- zebra_lsp_t *lsp;
+ struct zebra_lsp *lsp;
struct zebra_vrf *zvrf;
};
RB_HEAD(zebra_sr_policy_instance_head, zebra_sr_policy);
vty_out(vty, " locator %s\n", locator->name);
vty_out(vty, " prefix %s/%u\n", str,
locator->prefix.prefixlen);
+ vty_out(vty, " exit\n");
vty_out(vty, " !\n");
}
+ vty_out(vty, " exit\n");
vty_out(vty, " !\n");
+ vty_out(vty, " exit\n");
vty_out(vty, " !\n");
+ vty_out(vty, "exit\n");
vty_out(vty, "!\n");
}
return 0;
#include "zebra/zebra_vxlan.h"
#include "zebra/zebra_netns_notify.h"
#include "zebra/zebra_routemap.h"
+#ifndef VTYSH_EXTRACT_PL
+#include "zebra/zebra_vrf_clippy.c"
+#endif
static void zebra_vrf_table_create(struct zebra_vrf *zvrf, afi_t afi,
safi_t safi);
router_id_write(vty, zvrf);
if (zvrf_id(zvrf) != VRF_DEFAULT)
- vty_endframe(vty, " exit-vrf\n!\n");
+ vty_endframe(vty, "exit-vrf\n!\n");
else
vty_out(vty, "!\n");
}
return 0;
}
+DEFPY (vrf_netns,
+ vrf_netns_cmd,
+ "netns NAME$netns_name",
+ "Attach VRF to a Namespace\n"
+ "The file name in " NS_RUN_DIR ", or a full pathname\n")
+{
+ char *pathname = ns_netns_pathname(vty, netns_name);
+ int ret;
+
+ VTY_DECLVAR_CONTEXT(vrf, vrf);
+
+ if (!pathname)
+ return CMD_WARNING_CONFIG_FAILED;
+
+ frr_with_privs(&zserv_privs) {
+ ret = vrf_netns_handler_create(vty, vrf, pathname,
+ NS_UNKNOWN,
+ NS_UNKNOWN,
+ NS_UNKNOWN);
+ }
+
+ return ret;
+}
+
+DEFUN (no_vrf_netns,
+ no_vrf_netns_cmd,
+ "no netns [NAME]",
+ NO_STR
+ "Detach VRF from a Namespace\n"
+ "The file name in " NS_RUN_DIR ", or a full pathname\n")
+{
+ struct ns *ns = NULL;
+
+ VTY_DECLVAR_CONTEXT(vrf, vrf);
+
+ if (!vrf_is_backend_netns()) {
+ vty_out(vty, "VRF backend is not Netns. Aborting\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+ if (!vrf->ns_ctxt) {
+ vty_out(vty, "VRF %s(%u) is not configured with NetNS\n",
+ vrf->name, vrf->vrf_id);
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ ns = (struct ns *)vrf->ns_ctxt;
+
+ ns->vrf_ctxt = NULL;
+ vrf_disable(vrf);
+ /* vrf ID from VRF is necessary for Zebra
+ * so that propagate to other clients is done
+ */
+ ns_delete(ns);
+ vrf->ns_ctxt = NULL;
+ return CMD_SUCCESS;
+}
+
/* Zebra VRF initialization. */
void zebra_vrf_init(void)
{
vrf_init(zebra_vrf_new, zebra_vrf_enable, zebra_vrf_disable,
zebra_vrf_delete, zebra_vrf_update);
- vrf_cmd_init(vrf_config_write, &zserv_privs);
+ vrf_cmd_init(vrf_config_write);
+
+ if (vrf_is_backend_netns() && ns_have_netns()) {
+ /* Install NS commands. */
+ install_element(VRF_NODE, &vrf_netns_cmd);
+ install_element(VRF_NODE, &no_vrf_netns_cmd);
+ }
}
break;
}
- if ((re->vrf_id != nexthop->vrf_id)
- && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE))
- vty_out(vty, "(vrf %s)", vrf_id_to_name(nexthop->vrf_id));
+ if (re->vrf_id != nexthop->vrf_id) {
+ struct vrf *vrf = vrf_lookup_by_id(nexthop->vrf_id);
+
+ vty_out(vty, "(vrf %s)", VRF_LOGNAME(vrf));
+ }
if (CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_DUPLICATE))
vty_out(vty, " (duplicate nexthop removed)");
break;
}
- if ((re == NULL || (nexthop->vrf_id != re->vrf_id))
- && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE))
+ if ((re == NULL || (nexthop->vrf_id != re->vrf_id)))
vty_out(vty, " (vrf %s)", vrf_id_to_name(nexthop->vrf_id));
if (!CHECK_FLAG(nexthop->flags, NEXTHOP_FLAG_ACTIVE))
break;
}
- if ((nexthop->vrf_id != re->vrf_id)
- && (nexthop->type != NEXTHOP_TYPE_BLACKHOLE))
+ if (nexthop->vrf_id != re->vrf_id)
json_object_string_add(json_nexthop, "vrf",
vrf_id_to_name(nexthop->vrf_id));
DEFINE_MTYPE_STATIC(ZEBRA, L3NEIGH, "EVPN Neighbor");
DEFINE_MTYPE_STATIC(ZEBRA, ZVXLAN_SG, "zebra VxLAN multicast group");
-DEFINE_HOOK(zebra_rmac_update, (zebra_mac_t *rmac, zebra_l3vni_t *zl3vni,
- bool delete, const char *reason), (rmac, zl3vni, delete, reason));
+DEFINE_HOOK(zebra_rmac_update,
+ (struct zebra_mac * rmac, struct zebra_l3vni *zl3vni, bool delete,
+ const char *reason),
+ (rmac, zl3vni, delete, reason));
/* static function declarations */
static void zevpn_print_neigh_hash_all_evpn(struct hash_bucket *bucket,
void **args);
-static void zl3vni_print_nh(zebra_neigh_t *n, struct vty *vty,
+static void zl3vni_print_nh(struct zebra_neigh *n, struct vty *vty,
json_object *json);
-static void zl3vni_print_rmac(zebra_mac_t *zrmac, struct vty *vty,
+static void zl3vni_print_rmac(struct zebra_mac *zrmac, struct vty *vty,
json_object *json);
static void zevpn_print_mac_hash_all_evpn(struct hash_bucket *bucket, void *ctxt);
/* l3-vni next-hop neigh related APIs */
-static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni,
- const struct ipaddr *ip);
+static struct zebra_neigh *zl3vni_nh_lookup(struct zebra_l3vni *zl3vni,
+ const struct ipaddr *ip);
static void *zl3vni_nh_alloc(void *p);
-static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni,
- const struct ipaddr *vtep_ip,
- const struct ethaddr *rmac);
-static int zl3vni_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *n);
-static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n);
-static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n);
+static struct zebra_neigh *zl3vni_nh_add(struct zebra_l3vni *zl3vni,
+ const struct ipaddr *vtep_ip,
+ const struct ethaddr *rmac);
+static int zl3vni_nh_del(struct zebra_l3vni *zl3vni, struct zebra_neigh *n);
+static int zl3vni_nh_install(struct zebra_l3vni *zl3vni, struct zebra_neigh *n);
+static int zl3vni_nh_uninstall(struct zebra_l3vni *zl3vni,
+ struct zebra_neigh *n);
/* l3-vni rmac related APIs */
static void zl3vni_print_rmac_hash(struct hash_bucket *, void *);
-static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni,
- const struct ethaddr *rmac);
+static struct zebra_mac *zl3vni_rmac_lookup(struct zebra_l3vni *zl3vni,
+ const struct ethaddr *rmac);
static void *zl3vni_rmac_alloc(void *p);
-static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni,
- const struct ethaddr *rmac);
-static int zl3vni_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac);
-static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac);
-static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac);
+static struct zebra_mac *zl3vni_rmac_add(struct zebra_l3vni *zl3vni,
+ const struct ethaddr *rmac);
+static int zl3vni_rmac_del(struct zebra_l3vni *zl3vni, struct zebra_mac *zrmac);
+static int zl3vni_rmac_install(struct zebra_l3vni *zl3vni,
+ struct zebra_mac *zrmac);
+static int zl3vni_rmac_uninstall(struct zebra_l3vni *zl3vni,
+ struct zebra_mac *zrmac);
/* l3-vni related APIs*/
static void *zl3vni_alloc(void *p);
-static zebra_l3vni_t *zl3vni_add(vni_t vni, vrf_id_t vrf_id);
-static int zl3vni_del(zebra_l3vni_t *zl3vni);
-static void zebra_vxlan_process_l3vni_oper_up(zebra_l3vni_t *zl3vni);
-static void zebra_vxlan_process_l3vni_oper_down(zebra_l3vni_t *zl3vni);
+static struct zebra_l3vni *zl3vni_add(vni_t vni, vrf_id_t vrf_id);
+static int zl3vni_del(struct zebra_l3vni *zl3vni);
+static void zebra_vxlan_process_l3vni_oper_up(struct zebra_l3vni *zl3vni);
+static void zebra_vxlan_process_l3vni_oper_down(struct zebra_l3vni *zl3vni);
static void zevpn_build_hash_table(void);
static unsigned int zebra_vxlan_sg_hash_key_make(const void *p);
static bool zebra_vxlan_sg_hash_eq(const void *p1, const void *p2);
static void zebra_vxlan_sg_do_deref(struct zebra_vrf *zvrf,
struct in_addr sip, struct in_addr mcast_grp);
-static zebra_vxlan_sg_t *zebra_vxlan_sg_do_ref(struct zebra_vrf *vrf,
- struct in_addr sip, struct in_addr mcast_grp);
+static struct zebra_vxlan_sg *zebra_vxlan_sg_do_ref(struct zebra_vrf *vrf,
+ struct in_addr sip,
+ struct in_addr mcast_grp);
static void zebra_vxlan_sg_deref(struct in_addr local_vtep_ip,
struct in_addr mcast_grp);
static void zebra_vxlan_sg_ref(struct in_addr local_vtep_ip,
{
struct vty *vty;
json_object *json = NULL, *json_evpn = NULL;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_neigh;
struct neigh_walk_ctx wctx;
char vni_str[VNI_STR_LEN];
json = (json_object *)args[1];
print_dup = (uint32_t)(uintptr_t)args[2];
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
num_neigh = hashcount(zevpn->neigh_table);
{
struct vty *vty;
json_object *json = NULL, *json_evpn = NULL;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_neigh;
struct neigh_walk_ctx wctx;
char vni_str[VNI_STR_LEN];
json = (json_object *)args[1];
print_dup = (uint32_t)(uintptr_t)args[2];
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
if (!zevpn) {
if (json)
vty_out(vty, "{}\n");
}
/* print a specific next hop for an l3vni */
-static void zl3vni_print_nh(zebra_neigh_t *n, struct vty *vty,
+static void zl3vni_print_nh(struct zebra_neigh *n, struct vty *vty,
json_object *json)
{
char buf1[ETHER_ADDR_STRLEN];
}
/* Print a specific RMAC entry */
-static void zl3vni_print_rmac(zebra_mac_t *zrmac, struct vty *vty,
+static void zl3vni_print_rmac(struct zebra_mac *zrmac, struct vty *vty,
json_object *json)
{
char buf1[ETHER_ADDR_STRLEN];
struct vty *vty;
json_object *json = NULL, *json_evpn = NULL;
json_object *json_mac = NULL;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_macs;
struct mac_walk_ctx *wctx = ctxt;
char vni_str[VNI_STR_LEN];
vty = wctx->vty;
json = wctx->json;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
wctx->zevpn = zevpn;
/*We are iterating over a new VNI, set the count to 0*/
struct vty *vty;
json_object *json = NULL, *json_evpn = NULL;
json_object *json_mac = NULL;
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_macs;
struct mac_walk_ctx *wctx = ctxt;
char vni_str[VNI_STR_LEN];
vty = wctx->vty;
json = wctx->json;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
if (!zevpn) {
if (json)
vty_out(vty, "{}\n");
struct vty *vty = NULL;
struct json_object *json_evpn = NULL;
struct json_object *json_nh = NULL;
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
char buf1[ETHER_ADDR_STRLEN];
char buf2[INET6_ADDRSTRLEN];
json_evpn = wctx->json;
if (json_evpn)
json_nh = json_object_new_object();
- n = (zebra_neigh_t *)bucket->data;
+ n = (struct zebra_neigh *)bucket->data;
if (!json_evpn) {
vty_out(vty, "%-15s %-17s\n",
struct vty *vty = NULL;
json_object *json = NULL;
json_object *json_evpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
uint32_t num_nh = 0;
struct nh_walk_ctx wctx;
char vni_str[VNI_STR_LEN];
vty = (struct vty *)args[0];
json = (struct json_object *)args[1];
- zl3vni = (zebra_l3vni_t *)bucket->data;
+ zl3vni = (struct zebra_l3vni *)bucket->data;
num_nh = hashcount(zl3vni->nh_table);
if (!num_nh)
struct vty *vty = NULL;
json_object *json = NULL;
json_object *json_evpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
uint32_t num_rmacs;
struct rmac_walk_ctx wctx;
char vni_str[VNI_STR_LEN];
vty = (struct vty *)args[0];
json = (struct json_object *)args[1];
- zl3vni = (zebra_l3vni_t *)bucket->data;
+ zl3vni = (struct zebra_l3vni *)bucket->data;
num_rmacs = hashcount(zl3vni->rmac_table);
if (!num_rmacs)
static void zl3vni_print_rmac_hash(struct hash_bucket *bucket, void *ctx)
{
- zebra_mac_t *zrmac = NULL;
+ struct zebra_mac *zrmac = NULL;
struct rmac_walk_ctx *wctx = NULL;
struct vty *vty = NULL;
struct json_object *json = NULL;
json = wctx->json;
if (json)
json_rmac = json_object_new_object();
- zrmac = (zebra_mac_t *)bucket->data;
+ zrmac = (struct zebra_mac *)bucket->data;
if (!json) {
vty_out(vty, "%-17s %-21pI4\n",
}
/* print a specific L3 VNI entry */
-static void zl3vni_print(zebra_l3vni_t *zl3vni, void **ctx)
+static void zl3vni_print(struct zebra_l3vni *zl3vni, void **ctx)
{
char buf[PREFIX_STRLEN];
struct vty *vty = NULL;
json_object *json = NULL;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
json_object *json_evpn_list = NULL;
struct listnode *node = NULL, *nnode = NULL;
struct vty *vty = NULL;
json_object *json = NULL;
json_object *json_evpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
vty = (struct vty *)ctx[0];
json = (json_object *)ctx[1];
- zl3vni = (zebra_l3vni_t *)bucket->data;
+ zl3vni = (struct zebra_l3vni *)bucket->data;
if (!json) {
vty_out(vty, "%-10u %-4s %-21s %-8lu %-8lu %-15s %-37s\n",
static void zl3vni_print_hash_detail(struct hash_bucket *bucket, void *data)
{
struct vty *vty = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
json_object *json_array = NULL;
bool use_json = false;
struct zebra_evpn_show *zes = data;
json_array = zes->json;
use_json = zes->use_json;
- zl3vni = (zebra_l3vni_t *)bucket->data;
+ zl3vni = (struct zebra_l3vni *)bucket->data;
zebra_vxlan_print_vni(vty, zes->zvrf, zl3vni->vni,
use_json, json_array);
return tmp_if;
}
-static int zebra_evpn_vxlan_del(zebra_evpn_t *zevpn)
+static int zebra_evpn_vxlan_del(struct zebra_evpn *zevpn)
{
zevpn_vxlan_if_set(zevpn, zevpn->vxlan_if, false /* set */);
/* Walk VxLAN interfaces and create EVPN hash. */
for (rn = route_top(zns->if_table); rn; rn = route_next(rn)) {
vni_t vni;
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct zebra_if *zif;
struct zebra_l2info_vxlan *vxl;
*/
static void zebra_evpn_vxlan_cleanup_all(struct hash_bucket *bucket, void *arg)
{
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct zebra_vrf *zvrf = (struct zebra_vrf *)arg;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
/* remove from l3-vni list */
if (zvrf->l3vni)
/* cleanup L3VNI */
static void zl3vni_cleanup_all(struct hash_bucket *bucket, void *args)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
- zl3vni = (zebra_l3vni_t *)bucket->data;
+ zl3vni = (struct zebra_l3vni *)bucket->data;
zebra_vxlan_process_l3vni_oper_down(zl3vni);
}
/*
* Look up MAC hash entry.
*/
-static zebra_mac_t *zl3vni_rmac_lookup(zebra_l3vni_t *zl3vni,
- const struct ethaddr *rmac)
+static struct zebra_mac *zl3vni_rmac_lookup(struct zebra_l3vni *zl3vni,
+ const struct ethaddr *rmac)
{
- zebra_mac_t tmp;
- zebra_mac_t *pmac;
+ struct zebra_mac tmp;
+ struct zebra_mac *pmac;
memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp.macaddr, rmac, ETH_ALEN);
*/
static void *zl3vni_rmac_alloc(void *p)
{
- const zebra_mac_t *tmp_rmac = p;
- zebra_mac_t *zrmac;
+ const struct zebra_mac *tmp_rmac = p;
+ struct zebra_mac *zrmac;
- zrmac = XCALLOC(MTYPE_L3VNI_MAC, sizeof(zebra_mac_t));
+ zrmac = XCALLOC(MTYPE_L3VNI_MAC, sizeof(struct zebra_mac));
*zrmac = *tmp_rmac;
return ((void *)zrmac);
/*
* Add RMAC entry to l3-vni
*/
-static zebra_mac_t *zl3vni_rmac_add(zebra_l3vni_t *zl3vni,
- const struct ethaddr *rmac)
+static struct zebra_mac *zl3vni_rmac_add(struct zebra_l3vni *zl3vni,
+ const struct ethaddr *rmac)
{
- zebra_mac_t tmp_rmac;
- zebra_mac_t *zrmac = NULL;
+ struct zebra_mac tmp_rmac;
+ struct zebra_mac *zrmac = NULL;
- memset(&tmp_rmac, 0, sizeof(zebra_mac_t));
+ memset(&tmp_rmac, 0, sizeof(struct zebra_mac));
memcpy(&tmp_rmac.macaddr, rmac, ETH_ALEN);
zrmac = hash_get(zl3vni->rmac_table, &tmp_rmac, zl3vni_rmac_alloc);
assert(zrmac);
/*
* Delete MAC entry.
*/
-static int zl3vni_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
+static int zl3vni_rmac_del(struct zebra_l3vni *zl3vni, struct zebra_mac *zrmac)
{
- zebra_mac_t *tmp_rmac;
+ struct zebra_mac *tmp_rmac;
struct host_rb_entry *hle;
while (!RB_EMPTY(host_rb_tree_entry, &zrmac->host_rb)) {
/*
* Install remote RMAC into the forwarding plane.
*/
-static int zl3vni_rmac_install(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
+static int zl3vni_rmac_install(struct zebra_l3vni *zl3vni,
+ struct zebra_mac *zrmac)
{
const struct zebra_if *zif = NULL, *br_zif = NULL;
const struct zebra_l2info_vxlan *vxl = NULL;
/*
* Uninstall remote RMAC from the forwarding plane.
*/
-static int zl3vni_rmac_uninstall(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac)
+static int zl3vni_rmac_uninstall(struct zebra_l3vni *zl3vni,
+ struct zebra_mac *zrmac)
{
const struct zebra_if *zif = NULL, *br_zif;
const struct zebra_l2info_vxlan *vxl = NULL;
}
/* handle rmac add */
-static int zl3vni_remote_rmac_add(zebra_l3vni_t *zl3vni,
+static int zl3vni_remote_rmac_add(struct zebra_l3vni *zl3vni,
const struct ethaddr *rmac,
const struct ipaddr *vtep_ip,
const struct prefix *host_prefix)
{
- zebra_mac_t *zrmac = NULL;
+ struct zebra_mac *zrmac = NULL;
zrmac = zl3vni_rmac_lookup(zl3vni, rmac);
if (!zrmac) {
/* handle rmac delete */
-static void zl3vni_remote_rmac_del(zebra_l3vni_t *zl3vni, zebra_mac_t *zrmac,
- struct prefix *host_prefix)
+static void zl3vni_remote_rmac_del(struct zebra_l3vni *zl3vni,
+ struct zebra_mac *zrmac,
+ struct prefix *host_prefix)
{
rb_delete_host(&zrmac->host_rb, host_prefix);
/*
* Look up nh hash entry on a l3-vni.
*/
-static zebra_neigh_t *zl3vni_nh_lookup(zebra_l3vni_t *zl3vni,
- const struct ipaddr *ip)
+static struct zebra_neigh *zl3vni_nh_lookup(struct zebra_l3vni *zl3vni,
+ const struct ipaddr *ip)
{
- zebra_neigh_t tmp;
- zebra_neigh_t *n;
+ struct zebra_neigh tmp;
+ struct zebra_neigh *n;
memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp.ip, ip, sizeof(struct ipaddr));
*/
static void *zl3vni_nh_alloc(void *p)
{
- const zebra_neigh_t *tmp_n = p;
- zebra_neigh_t *n;
+ const struct zebra_neigh *tmp_n = p;
+ struct zebra_neigh *n;
- n = XCALLOC(MTYPE_L3NEIGH, sizeof(zebra_neigh_t));
+ n = XCALLOC(MTYPE_L3NEIGH, sizeof(struct zebra_neigh));
*n = *tmp_n;
return ((void *)n);
/*
* Add neighbor entry.
*/
-static zebra_neigh_t *zl3vni_nh_add(zebra_l3vni_t *zl3vni,
- const struct ipaddr *ip,
- const struct ethaddr *mac)
+static struct zebra_neigh *zl3vni_nh_add(struct zebra_l3vni *zl3vni,
+ const struct ipaddr *ip,
+ const struct ethaddr *mac)
{
- zebra_neigh_t tmp_n;
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh tmp_n;
+ struct zebra_neigh *n = NULL;
- memset(&tmp_n, 0, sizeof(zebra_neigh_t));
+ memset(&tmp_n, 0, sizeof(struct zebra_neigh));
memcpy(&tmp_n.ip, ip, sizeof(struct ipaddr));
n = hash_get(zl3vni->nh_table, &tmp_n, zl3vni_nh_alloc);
assert(n);
/*
* Delete neighbor entry.
*/
-static int zl3vni_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
+static int zl3vni_nh_del(struct zebra_l3vni *zl3vni, struct zebra_neigh *n)
{
- zebra_neigh_t *tmp_n;
+ struct zebra_neigh *tmp_n;
struct host_rb_entry *hle;
while (!RB_EMPTY(host_rb_tree_entry, &n->host_rb)) {
/*
* Install remote nh as neigh into the kernel.
*/
-static int zl3vni_nh_install(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
+static int zl3vni_nh_install(struct zebra_l3vni *zl3vni, struct zebra_neigh *n)
{
uint8_t flags;
int ret = 0;
/*
* Uninstall remote nh from the kernel.
*/
-static int zl3vni_nh_uninstall(zebra_l3vni_t *zl3vni, zebra_neigh_t *n)
+static int zl3vni_nh_uninstall(struct zebra_l3vni *zl3vni,
+ struct zebra_neigh *n)
{
if (!(n->flags & ZEBRA_NEIGH_REMOTE)
|| !(n->flags & ZEBRA_NEIGH_REMOTE_NH))
}
/* add remote vtep as a neigh entry */
-static int zl3vni_remote_nh_add(zebra_l3vni_t *zl3vni,
+static int zl3vni_remote_nh_add(struct zebra_l3vni *zl3vni,
const struct ipaddr *vtep_ip,
const struct ethaddr *rmac,
const struct prefix *host_prefix)
{
- zebra_neigh_t *nh = NULL;
+ struct zebra_neigh *nh = NULL;
/* Create the next hop entry, or update its mac, if necessary. */
nh = zl3vni_nh_lookup(zl3vni, vtep_ip);
}
/* handle nh neigh delete */
-static void zl3vni_remote_nh_del(zebra_l3vni_t *zl3vni, zebra_neigh_t *nh,
+static void zl3vni_remote_nh_del(struct zebra_l3vni *zl3vni,
+ struct zebra_neigh *nh,
struct prefix *host_prefix)
{
rb_delete_host(&nh->host_rb, host_prefix);
/* handle neigh update from kernel - the only thing of interest is to
* readd stale entries.
*/
-static int zl3vni_local_nh_add_update(zebra_l3vni_t *zl3vni, struct ipaddr *ip,
- uint16_t state)
+static int zl3vni_local_nh_add_update(struct zebra_l3vni *zl3vni,
+ struct ipaddr *ip, uint16_t state)
{
#ifdef GNU_LINUX
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
n = zl3vni_nh_lookup(zl3vni, ip);
if (!n)
}
/* handle neigh delete from kernel */
-static int zl3vni_local_nh_del(zebra_l3vni_t *zl3vni, struct ipaddr *ip)
+static int zl3vni_local_nh_del(struct zebra_l3vni *zl3vni, struct ipaddr *ip)
{
- zebra_neigh_t *n = NULL;
+ struct zebra_neigh *n = NULL;
n = zl3vni_nh_lookup(zl3vni, ip);
if (!n)
*/
static unsigned int l3vni_hash_keymake(const void *p)
{
- const zebra_l3vni_t *zl3vni = p;
+ const struct zebra_l3vni *zl3vni = p;
return jhash_1word(zl3vni->vni, 0);
}
*/
static bool l3vni_hash_cmp(const void *p1, const void *p2)
{
- const zebra_l3vni_t *zl3vni1 = p1;
- const zebra_l3vni_t *zl3vni2 = p2;
+ const struct zebra_l3vni *zl3vni1 = p1;
+ const struct zebra_l3vni *zl3vni2 = p2;
return (zl3vni1->vni == zl3vni2->vni);
}
*/
static void *zl3vni_alloc(void *p)
{
- zebra_l3vni_t *zl3vni = NULL;
- const zebra_l3vni_t *tmp_l3vni = p;
+ struct zebra_l3vni *zl3vni = NULL;
+ const struct zebra_l3vni *tmp_l3vni = p;
- zl3vni = XCALLOC(MTYPE_ZL3VNI, sizeof(zebra_l3vni_t));
+ zl3vni = XCALLOC(MTYPE_ZL3VNI, sizeof(struct zebra_l3vni));
zl3vni->vni = tmp_l3vni->vni;
return ((void *)zl3vni);
}
/*
* Look up L3 VNI hash entry.
*/
-zebra_l3vni_t *zl3vni_lookup(vni_t vni)
+struct zebra_l3vni *zl3vni_lookup(vni_t vni)
{
- zebra_l3vni_t tmp_l3vni;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni tmp_l3vni;
+ struct zebra_l3vni *zl3vni = NULL;
- memset(&tmp_l3vni, 0, sizeof(zebra_l3vni_t));
+ memset(&tmp_l3vni, 0, sizeof(struct zebra_l3vni));
tmp_l3vni.vni = vni;
zl3vni = hash_lookup(zrouter.l3vni_table, &tmp_l3vni);
/*
* Add L3 VNI hash entry.
*/
-static zebra_l3vni_t *zl3vni_add(vni_t vni, vrf_id_t vrf_id)
+static struct zebra_l3vni *zl3vni_add(vni_t vni, vrf_id_t vrf_id)
{
- zebra_l3vni_t tmp_zl3vni;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni tmp_zl3vni;
+ struct zebra_l3vni *zl3vni = NULL;
- memset(&tmp_zl3vni, 0, sizeof(zebra_l3vni_t));
+ memset(&tmp_zl3vni, 0, sizeof(struct zebra_l3vni));
tmp_zl3vni.vni = vni;
zl3vni = hash_get(zrouter.l3vni_table, &tmp_zl3vni, zl3vni_alloc);
/*
* Delete L3 VNI hash entry.
*/
-static int zl3vni_del(zebra_l3vni_t *zl3vni)
+static int zl3vni_del(struct zebra_l3vni *zl3vni)
{
- zebra_l3vni_t *tmp_zl3vni;
+ struct zebra_l3vni *tmp_zl3vni;
/* free the list of l2vnis */
list_delete(&zl3vni->l2vnis);
void **_pifp)
{
struct zebra_ns *zns = ns->info;
- zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)_zl3vni;
+ struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)_zl3vni;
struct route_node *rn = NULL;
struct interface *ifp = NULL;
struct zebra_vrf *zvrf;
return NS_WALK_CONTINUE;
}
-struct interface *zl3vni_map_to_vxlan_if(zebra_l3vni_t *zl3vni)
+struct interface *zl3vni_map_to_vxlan_if(struct zebra_l3vni *zl3vni)
{
struct interface **p_ifp;
struct interface *ifp = NULL;
return ifp;
}
-struct interface *zl3vni_map_to_svi_if(zebra_l3vni_t *zl3vni)
+struct interface *zl3vni_map_to_svi_if(struct zebra_l3vni *zl3vni)
{
struct zebra_if *zif = NULL; /* zebra_if for vxlan_if */
struct zebra_l2info_vxlan *vxl = NULL; /* l2 info for vxlan_if */
return zvni_map_to_svi(vxl->access_vlan, zif->brslave_info.br_if);
}
-struct interface *zl3vni_map_to_mac_vlan_if(zebra_l3vni_t *zl3vni)
+struct interface *zl3vni_map_to_mac_vlan_if(struct zebra_l3vni *zl3vni)
{
struct zebra_if *zif = NULL; /* zebra_if for vxlan_if */
}
-zebra_l3vni_t *zl3vni_from_vrf(vrf_id_t vrf_id)
+struct zebra_l3vni *zl3vni_from_vrf(vrf_id_t vrf_id)
{
struct zebra_vrf *zvrf = NULL;
* Map SVI and associated bridge to a VNI. This is invoked upon getting
* neighbor notifications, to see if they are of interest.
*/
-static zebra_l3vni_t *zl3vni_from_svi(struct interface *ifp,
- struct interface *br_if)
+static struct zebra_l3vni *zl3vni_from_svi(struct interface *ifp,
+ struct interface *br_if)
{
int found = 0;
vlanid_t vid = 0;
uint8_t bridge_vlan_aware = 0;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct zebra_ns *zns = NULL;
struct route_node *rn = NULL;
struct zebra_if *zif = NULL;
vni_t vni_id_from_svi(struct interface *ifp, struct interface *br_if)
{
vni_t vni = 0;
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
/* Check if an L3VNI belongs to this SVI interface.
* If not, check if an L2VNI belongs to this SVI interface.
return vni;
}
-static inline void zl3vni_get_vrr_rmac(zebra_l3vni_t *zl3vni,
+static inline void zl3vni_get_vrr_rmac(struct zebra_l3vni *zl3vni,
struct ethaddr *rmac)
{
if (!zl3vni)
/*
* Inform BGP about l3-vni.
*/
-static int zl3vni_send_add_to_client(zebra_l3vni_t *zl3vni)
+static int zl3vni_send_add_to_client(struct zebra_l3vni *zl3vni)
{
struct stream *s = NULL;
struct zserv *client = NULL;
/*
* Inform BGP about local l3-VNI deletion.
*/
-static int zl3vni_send_del_to_client(zebra_l3vni_t *zl3vni)
+static int zl3vni_send_del_to_client(struct zebra_l3vni *zl3vni)
{
struct stream *s = NULL;
struct zserv *client = NULL;
return zserv_send_message(client, s);
}
-static void zebra_vxlan_process_l3vni_oper_up(zebra_l3vni_t *zl3vni)
+static void zebra_vxlan_process_l3vni_oper_up(struct zebra_l3vni *zl3vni)
{
if (!zl3vni)
return;
zl3vni_send_add_to_client(zl3vni);
}
-static void zebra_vxlan_process_l3vni_oper_down(zebra_l3vni_t *zl3vni)
+static void zebra_vxlan_process_l3vni_oper_down(struct zebra_l3vni *zl3vni)
{
if (!zl3vni)
return;
static void zevpn_add_to_l3vni_list(struct hash_bucket *bucket, void *ctxt)
{
- zebra_evpn_t *zevpn = (zebra_evpn_t *)bucket->data;
- zebra_l3vni_t *zl3vni = (zebra_l3vni_t *)ctxt;
+ struct zebra_evpn *zevpn = (struct zebra_evpn *)bucket->data;
+ struct zebra_l3vni *zl3vni = (struct zebra_l3vni *)ctxt;
if (zevpn->vrf_id == zl3vni_vrf_id(zl3vni))
listnode_add_sort(zl3vni->l2vnis, zevpn);
static int zebra_vxlan_handle_vni_transition(struct zebra_vrf *zvrf, vni_t vni,
int add)
{
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
/* There is a possibility that VNI notification was already received
* from kernel and we programmed it as L2-VNI
/* delete and uninstall rmac hash entry */
static void zl3vni_del_rmac_hash_entry(struct hash_bucket *bucket, void *ctx)
{
- zebra_mac_t *zrmac = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_mac *zrmac = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
- zrmac = (zebra_mac_t *)bucket->data;
- zl3vni = (zebra_l3vni_t *)ctx;
+ zrmac = (struct zebra_mac *)bucket->data;
+ zl3vni = (struct zebra_l3vni *)ctx;
zl3vni_rmac_uninstall(zl3vni, zrmac);
/* Send RMAC for FPM processing */
/* delete and uninstall nh hash entry */
static void zl3vni_del_nh_hash_entry(struct hash_bucket *bucket, void *ctx)
{
- zebra_neigh_t *n = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_neigh *n = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
- n = (zebra_neigh_t *)bucket->data;
- zl3vni = (zebra_l3vni_t *)ctx;
+ n = (struct zebra_neigh *)bucket->data;
+ zl3vni = (struct zebra_l3vni *)ctx;
zl3vni_nh_uninstall(zl3vni, n);
zl3vni_nh_del(zl3vni, n);
}
/* re-add remote rmac if needed */
-static int zebra_vxlan_readd_remote_rmac(zebra_l3vni_t *zl3vni,
+static int zebra_vxlan_readd_remote_rmac(struct zebra_l3vni *zl3vni,
struct ethaddr *rmac)
{
- zebra_mac_t *zrmac = NULL;
+ struct zebra_mac *zrmac = NULL;
zrmac = zl3vni_rmac_lookup(zl3vni, rmac);
if (!zrmac)
int is_l3vni_for_prefix_routes_only(vni_t vni)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
zl3vni = zl3vni_lookup(vni);
if (!zl3vni)
const struct ipaddr *vtep_ip,
const struct prefix *host_prefix)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct ipaddr ipv4_vtep;
zl3vni = zl3vni_from_vrf(vrf_id);
struct ipaddr *vtep_ip,
struct prefix *host_prefix)
{
- zebra_l3vni_t *zl3vni = NULL;
- zebra_neigh_t *nh = NULL;
- zebra_mac_t *zrmac = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
+ struct zebra_neigh *nh = NULL;
+ struct zebra_mac *zrmac = NULL;
zl3vni = zl3vni_from_vrf(vrf_id);
if (!zl3vni)
void zebra_vxlan_print_specific_rmac_l3vni(struct vty *vty, vni_t l3vni,
struct ethaddr *rmac, bool use_json)
{
- zebra_l3vni_t *zl3vni = NULL;
- zebra_mac_t *zrmac = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
+ struct zebra_mac *zrmac = NULL;
json_object *json = NULL;
if (!is_evpn_enabled()) {
void zebra_vxlan_print_rmacs_l3vni(struct vty *vty, vni_t l3vni, bool use_json)
{
- zebra_l3vni_t *zl3vni;
+ struct zebra_l3vni *zl3vni;
uint32_t num_rmacs;
struct rmac_walk_ctx wctx;
json_object *json = NULL;
void zebra_vxlan_print_specific_nh_l3vni(struct vty *vty, vni_t l3vni,
struct ipaddr *ip, bool use_json)
{
- zebra_l3vni_t *zl3vni = NULL;
- zebra_neigh_t *n = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
+ struct zebra_neigh *n = NULL;
json_object *json = NULL;
if (!is_evpn_enabled()) {
uint32_t num_nh;
struct nh_walk_ctx wctx;
json_object *json = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
if (!is_evpn_enabled())
return;
{
void *args[2];
json_object *json = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
if (!is_evpn_enabled()) {
if (use_json)
json_object *json_vrfs)
{
char buf[ETHER_ADDR_STRLEN];
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
zl3vni = zl3vni_lookup(zvrf->l3vni);
if (!zl3vni)
void zebra_vxlan_print_neigh_vni(struct vty *vty, struct zebra_vrf *zvrf,
vni_t vni, bool use_json)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_neigh;
struct neigh_walk_ctx wctx;
json_object *json = NULL;
struct zebra_vrf *zvrf, vni_t vni,
struct ipaddr *ip, bool use_json)
{
- zebra_evpn_t *zevpn;
- zebra_neigh_t *n;
+ struct zebra_evpn *zevpn;
+ struct zebra_neigh *n;
json_object *json = NULL;
if (!is_evpn_enabled())
vni_t vni, struct in_addr vtep_ip,
bool use_json)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_neigh;
struct neigh_walk_ctx wctx;
json_object *json = NULL;
vni_t vni,
bool use_json)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_neigh;
struct neigh_walk_ctx wctx;
json_object *json = NULL;
void zebra_vxlan_print_macs_vni(struct vty *vty, struct zebra_vrf *zvrf,
vni_t vni, bool use_json)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_macs;
struct mac_walk_ctx wctx;
json_object *json = NULL;
vni_t vni, struct ethaddr *macaddr,
bool use_json)
{
- zebra_evpn_t *zevpn;
- zebra_mac_t *mac;
+ struct zebra_evpn *zevpn;
+ struct zebra_mac *mac;
json_object *json = NULL;
if (!is_evpn_enabled())
struct zebra_vrf *zvrf,
vni_t vni, bool use_json)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
struct mac_walk_ctx wctx;
uint32_t num_macs;
json_object *json = NULL;
struct ethaddr *macaddr, char *errmsg,
size_t errmsg_len)
{
- zebra_evpn_t *zevpn;
- zebra_mac_t *mac;
+ struct zebra_evpn *zevpn;
+ struct zebra_mac *mac;
struct listnode *node = NULL;
- zebra_neigh_t *nbr = NULL;
+ struct zebra_neigh *nbr = NULL;
if (!is_evpn_enabled())
return 0;
struct ipaddr *ip, char *errmsg,
size_t errmsg_len)
{
- zebra_evpn_t *zevpn;
- zebra_neigh_t *nbr;
- zebra_mac_t *mac;
+ struct zebra_evpn *zevpn;
+ struct zebra_neigh *nbr;
+ struct zebra_mac *mac;
char buf[INET6_ADDRSTRLEN];
char buf2[ETHER_ADDR_STRLEN];
static void zevpn_clear_dup_mac_hash(struct hash_bucket *bucket, void *ctxt)
{
struct mac_walk_ctx *wctx = ctxt;
- zebra_mac_t *mac;
- zebra_evpn_t *zevpn;
+ struct zebra_mac *mac;
+ struct zebra_evpn *zevpn;
struct listnode *node = NULL;
- zebra_neigh_t *nbr = NULL;
+ struct zebra_neigh *nbr = NULL;
- mac = (zebra_mac_t *)bucket->data;
+ mac = (struct zebra_mac *)bucket->data;
if (!mac)
return;
static void zevpn_clear_dup_detect_hash_vni_all(struct hash_bucket *bucket,
void **args)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
struct zebra_vrf *zvrf;
struct mac_walk_ctx m_wctx;
struct neigh_walk_ctx n_wctx;
- zevpn = (zebra_evpn_t *)bucket->data;
+ zevpn = (struct zebra_evpn *)bucket->data;
if (!zevpn)
return;
int zebra_vxlan_clear_dup_detect_vni(struct zebra_vrf *zvrf, vni_t vni)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
struct mac_walk_ctx m_wctx;
struct neigh_walk_ctx n_wctx;
vni_t vni, struct in_addr vtep_ip,
bool use_json)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
uint32_t num_macs;
struct mac_walk_ctx wctx;
json_object *json = NULL;
{
json_object *json = NULL;
void *args[2];
- zebra_l3vni_t *zl3vni = NULL;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
if (!is_evpn_enabled())
return;
struct interface *link_if,
struct ipaddr *ip)
{
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
/* check if this is a remote neigh entry corresponding to remote
* next-hop
bool is_router,
bool local_inactive, bool dp_static)
{
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
/* check if this is a remote neigh entry corresponding to remote
* next-hop
struct zebra_vrf *zvrf = NULL;
struct zebra_l2info_vxlan *vxl;
vni_t vni;
- zebra_evpn_t *zevpn = NULL;
- zebra_vtep_t *zvtep = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_vtep *zvtep = NULL;
zif = ifp->info;
assert(zif);
struct zebra_if *zif;
struct zebra_l2info_vxlan *vxl;
vni_t vni;
- zebra_evpn_t *zevpn;
- zebra_mac_t *mac;
+ struct zebra_evpn *zevpn;
+ struct zebra_mac *mac;
zif = ifp->info;
assert(zif);
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan *vxl = NULL;
vni_t vni;
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
- zebra_mac_t *mac = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
+ struct zebra_mac *mac = NULL;
zif = ifp->info;
assert(zif);
int zebra_vxlan_local_mac_del(struct interface *ifp, struct interface *br_if,
struct ethaddr *macaddr, vlanid_t vid)
{
- zebra_evpn_t *zevpn;
- zebra_mac_t *mac;
+ struct zebra_evpn *zevpn;
+ struct zebra_mac *mac;
/* We are interested in MACs only on ports or (port, VLAN) that
* map to a VNI.
bool sticky, bool local_inactive,
bool dp_static)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
struct zebra_vrf *zvrf;
assert(ifp);
void zebra_vxlan_remote_vtep_del(vrf_id_t vrf_id, vni_t vni,
struct in_addr vtep_ip)
{
- zebra_evpn_t *zevpn;
- zebra_vtep_t *zvtep;
+ struct zebra_evpn *zevpn;
+ struct zebra_vtep *zvtep;
struct interface *ifp;
struct zebra_if *zif;
struct zebra_vrf *zvrf;
void zebra_vxlan_remote_vtep_add(vrf_id_t vrf_id, vni_t vni,
struct in_addr vtep_ip, int flood_control)
{
- zebra_evpn_t *zevpn;
+ struct zebra_evpn *zevpn;
struct interface *ifp;
struct zebra_if *zif;
- zebra_vtep_t *zvtep;
+ struct zebra_vtep *zvtep;
struct zebra_vrf *zvrf;
if (!is_evpn_enabled()) {
{
struct ipaddr ip;
struct ethaddr macaddr;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
memset(&ip, 0, sizeof(struct ipaddr));
memset(&macaddr, 0, sizeof(struct ethaddr));
*/
int zebra_vxlan_svi_down(struct interface *ifp, struct interface *link_if)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
zl3vni = zl3vni_from_svi(ifp, link_if);
if (zl3vni) {
/* remove association with svi-if */
zl3vni->svi_if = NULL;
} else {
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
/* Unlink the SVI from the access VLAN */
zebra_evpn_acc_bd_svi_set(ifp->info, link_if->info, false);
*/
int zebra_vxlan_svi_up(struct interface *ifp, struct interface *link_if)
{
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
zl3vni = zl3vni_from_svi(ifp, link_if);
if (zl3vni) {
*/
void zebra_vxlan_macvlan_down(struct interface *ifp)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct zebra_if *zif, *link_zif;
struct interface *link_ifp, *link_if;
*/
void zebra_vxlan_macvlan_up(struct interface *ifp)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct zebra_if *zif, *link_zif;
struct interface *link_ifp, *link_if;
vni_t vni;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan *vxl = NULL;
- zebra_l3vni_t *zl3vni = NULL;
- zebra_evpn_t *zevpn;
+ struct zebra_l3vni *zl3vni = NULL;
+ struct zebra_evpn *zevpn;
/* Check if EVPN is enabled. */
if (!is_evpn_enabled())
vni_t vni;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan *vxl = NULL;
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
/* Check if EVPN is enabled. */
if (!is_evpn_enabled())
vni_t vni;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan *vxl = NULL;
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
/* Check if EVPN is enabled. */
if (!is_evpn_enabled())
vni_t vni;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan *vxl = NULL;
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct interface *vlan_if = NULL;
/* Check if EVPN is enabled. */
vni_t vni;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan *vxl = NULL;
- zebra_evpn_t *zevpn = NULL;
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_evpn *zevpn = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
/* Check if EVPN is enabled. */
if (!is_evpn_enabled())
listnode_add_sort_nodup(zl3vni->l2vnis, zevpn);
}
- if (IS_ZEBRA_DEBUG_VXLAN) {
- char addr_buf1[INET_ADDRSTRLEN];
- char addr_buf2[INET_ADDRSTRLEN];
-
- inet_ntop(AF_INET, &vxl->vtep_ip,
- addr_buf1, INET_ADDRSTRLEN);
- inet_ntop(AF_INET, &vxl->mcast_grp,
- addr_buf2, INET_ADDRSTRLEN);
-
+ if (IS_ZEBRA_DEBUG_VXLAN)
zlog_debug(
- "Add L2-VNI %u VRF %s intf %s(%u) VLAN %u local IP %s mcast_grp %s master %u",
+ "Add L2-VNI %u VRF %s intf %s(%u) VLAN %u local IP %pI4 mcast_grp %pI4 master %u",
vni,
vlan_if ? vrf_id_to_name(vlan_if->vrf_id)
: VRF_DEFAULT_NAME,
ifp->name, ifp->ifindex, vxl->access_vlan,
- addr_buf1, addr_buf2,
+ &vxl->vtep_ip, &vxl->mcast_grp,
zif->brslave_info.bridge_ifindex);
- }
/* If down or not mapped to a bridge, we're done. */
if (!if_is_operative(ifp) || !zif->brslave_info.br_if)
char *err, int err_str_sz, int filter,
int add)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
struct zebra_vrf *zvrf_evpn = NULL;
zvrf_evpn = zebra_vrf_get_evpn();
int zebra_vxlan_vrf_enable(struct zebra_vrf *zvrf)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
if (zvrf->l3vni)
zl3vni = zl3vni_lookup(zvrf->l3vni);
int zebra_vxlan_vrf_disable(struct zebra_vrf *zvrf)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
if (zvrf->l3vni)
zl3vni = zl3vni_lookup(zvrf->l3vni);
int zebra_vxlan_vrf_delete(struct zebra_vrf *zvrf)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
vni_t vni;
if (zvrf->l3vni)
struct stream *s;
int advertise;
vni_t vni = 0;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
struct interface *ifp = NULL;
if (!EVPN_ENABLED(zvrf)) {
struct stream *s;
int advertise;
vni_t vni = 0;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
struct interface *ifp = NULL;
struct zebra_if *zif = NULL;
struct zebra_l2info_vxlan zl2_info;
struct stream *s;
int advertise;
vni_t vni = 0;
- zebra_evpn_t *zevpn = NULL;
+ struct zebra_evpn *zevpn = NULL;
struct interface *ifp = NULL;
if (!EVPN_ENABLED(zvrf)) {
/* get the l3vni svi ifindex */
ifindex_t get_l3vni_svi_ifindex(vrf_id_t vrf_id)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
zl3vni = zl3vni_from_vrf(vrf_id);
if (!zl3vni || !is_l3vni_oper_up(zl3vni))
static unsigned int zebra_vxlan_sg_hash_key_make(const void *p)
{
- const zebra_vxlan_sg_t *vxlan_sg = p;
+ const struct zebra_vxlan_sg *vxlan_sg = p;
return (jhash_2words(vxlan_sg->sg.src.s_addr,
vxlan_sg->sg.grp.s_addr, 0));
static bool zebra_vxlan_sg_hash_eq(const void *p1, const void *p2)
{
- const zebra_vxlan_sg_t *sg1 = p1;
- const zebra_vxlan_sg_t *sg2 = p2;
+ const struct zebra_vxlan_sg *sg1 = p1;
+ const struct zebra_vxlan_sg *sg2 = p2;
return ((sg1->sg.src.s_addr == sg2->sg.src.s_addr)
&& (sg1->sg.grp.s_addr == sg2->sg.grp.s_addr));
}
-static zebra_vxlan_sg_t *zebra_vxlan_sg_new(struct zebra_vrf *zvrf,
- struct prefix_sg *sg)
+static struct zebra_vxlan_sg *zebra_vxlan_sg_new(struct zebra_vrf *zvrf,
+ struct prefix_sg *sg)
{
- zebra_vxlan_sg_t *vxlan_sg;
+ struct zebra_vxlan_sg *vxlan_sg;
vxlan_sg = XCALLOC(MTYPE_ZVXLAN_SG, sizeof(*vxlan_sg));
return vxlan_sg;
}
-static zebra_vxlan_sg_t *zebra_vxlan_sg_find(struct zebra_vrf *zvrf,
- struct prefix_sg *sg)
+static struct zebra_vxlan_sg *zebra_vxlan_sg_find(struct zebra_vrf *zvrf,
+ struct prefix_sg *sg)
{
- zebra_vxlan_sg_t lookup;
+ struct zebra_vxlan_sg lookup;
lookup.sg = *sg;
return hash_lookup(zvrf->vxlan_sg_table, &lookup);
}
-static zebra_vxlan_sg_t *zebra_vxlan_sg_add(struct zebra_vrf *zvrf,
- struct prefix_sg *sg)
+static struct zebra_vxlan_sg *zebra_vxlan_sg_add(struct zebra_vrf *zvrf,
+ struct prefix_sg *sg)
{
- zebra_vxlan_sg_t *vxlan_sg;
- zebra_vxlan_sg_t *parent = NULL;
+ struct zebra_vxlan_sg *vxlan_sg;
+ struct zebra_vxlan_sg *parent = NULL;
struct in_addr sip;
vxlan_sg = zebra_vxlan_sg_find(zvrf, sg);
return vxlan_sg;
}
-static void zebra_vxlan_sg_del(zebra_vxlan_sg_t *vxlan_sg)
+static void zebra_vxlan_sg_del(struct zebra_vxlan_sg *vxlan_sg)
{
struct in_addr sip;
struct zebra_vrf *zvrf;
static void zebra_vxlan_sg_do_deref(struct zebra_vrf *zvrf,
struct in_addr sip, struct in_addr mcast_grp)
{
- zebra_vxlan_sg_t *vxlan_sg;
+ struct zebra_vxlan_sg *vxlan_sg;
struct prefix_sg sg;
sg.family = AF_INET;
zebra_vxlan_sg_del(vxlan_sg);
}
-static zebra_vxlan_sg_t *zebra_vxlan_sg_do_ref(struct zebra_vrf *zvrf,
- struct in_addr sip, struct in_addr mcast_grp)
+static struct zebra_vxlan_sg *zebra_vxlan_sg_do_ref(struct zebra_vrf *zvrf,
+ struct in_addr sip,
+ struct in_addr mcast_grp)
{
- zebra_vxlan_sg_t *vxlan_sg;
+ struct zebra_vxlan_sg *vxlan_sg;
struct prefix_sg sg;
sg.family = AF_INET;
static void zebra_vxlan_xg_pre_cleanup(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
+ struct zebra_vxlan_sg *vxlan_sg = (struct zebra_vxlan_sg *)bucket->data;
/* increment the ref count against (*,G) to prevent them from being
* deleted
static void zebra_vxlan_xg_post_cleanup(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
+ struct zebra_vxlan_sg *vxlan_sg = (struct zebra_vxlan_sg *)bucket->data;
/* decrement the dummy ref count against (*,G) to delete them */
if (vxlan_sg->sg.src.s_addr == INADDR_ANY) {
static void zebra_vxlan_sg_cleanup(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
+ struct zebra_vxlan_sg *vxlan_sg = (struct zebra_vxlan_sg *)bucket->data;
zebra_vxlan_sg_del(vxlan_sg);
}
static void zebra_vxlan_sg_replay_send(struct hash_bucket *bucket, void *arg)
{
- zebra_vxlan_sg_t *vxlan_sg = (zebra_vxlan_sg_t *)bucket->data;
+ struct zebra_vxlan_sg *vxlan_sg = (struct zebra_vxlan_sg *)bucket->data;
zebra_vxlan_sg_send(vxlan_sg->zvrf, &vxlan_sg->sg,
vxlan_sg->sg_str, ZEBRA_VXLAN_SG_ADD);
/* Cleanup EVPN configuration of a specific VRF */
static void zebra_evpn_vrf_cfg_cleanup(struct zebra_vrf *zvrf)
{
- zebra_l3vni_t *zl3vni = NULL;
+ struct zebra_l3vni *zl3vni = NULL;
zvrf->advertise_all_vni = 0;
zvrf->advertise_gw_macip = 0;
#define ERR_STR_SZ 256
-/* definitions */
-typedef struct zebra_l3vni_t_ zebra_l3vni_t;
-
-
/* L3 VNI hash table */
-struct zebra_l3vni_t_ {
+struct zebra_l3vni {
/* VNI key */
vni_t vni;
};
/* get the vx-intf name for l3vni */
-static inline const char *zl3vni_vxlan_if_name(zebra_l3vni_t *zl3vni)
+static inline const char *zl3vni_vxlan_if_name(struct zebra_l3vni *zl3vni)
{
return zl3vni->vxlan_if ? zl3vni->vxlan_if->name : "None";
}
/* get the svi intf name for l3vni */
-static inline const char *zl3vni_svi_if_name(zebra_l3vni_t *zl3vni)
+static inline const char *zl3vni_svi_if_name(struct zebra_l3vni *zl3vni)
{
return zl3vni->svi_if ? zl3vni->svi_if->name : "None";
}
/* get the vrf name for l3vni */
-static inline const char *zl3vni_vrf_name(zebra_l3vni_t *zl3vni)
+static inline const char *zl3vni_vrf_name(struct zebra_l3vni *zl3vni)
{
return vrf_id_to_name(zl3vni->vrf_id);
}
/* get the rmac string */
-static inline const char *zl3vni_rmac2str(zebra_l3vni_t *zl3vni, char *buf,
+static inline const char *zl3vni_rmac2str(struct zebra_l3vni *zl3vni, char *buf,
int size)
{
char *ptr;
}
/* get the sys mac string */
-static inline const char *zl3vni_sysmac2str(zebra_l3vni_t *zl3vni, char *buf,
- int size)
+static inline const char *zl3vni_sysmac2str(struct zebra_l3vni *zl3vni,
+ char *buf, int size)
{
char *ptr;
* 3. it is associated to an SVI
* 4. associated SVI is oper up
*/
-static inline int is_l3vni_oper_up(zebra_l3vni_t *zl3vni)
+static inline int is_l3vni_oper_up(struct zebra_l3vni *zl3vni)
{
return (is_evpn_enabled() && zl3vni && (zl3vni->vrf_id != VRF_UNKNOWN)
&& zl3vni->vxlan_if && if_is_operative(zl3vni->vxlan_if)
&& zl3vni->svi_if && if_is_operative(zl3vni->svi_if));
}
-static inline const char *zl3vni_state2str(zebra_l3vni_t *zl3vni)
+static inline const char *zl3vni_state2str(struct zebra_l3vni *zl3vni)
{
if (!zl3vni)
return NULL;
return NULL;
}
-static inline vrf_id_t zl3vni_vrf_id(zebra_l3vni_t *zl3vni)
+static inline vrf_id_t zl3vni_vrf_id(struct zebra_l3vni *zl3vni)
{
return zl3vni->vrf_id;
}
-static inline void zl3vni_get_svi_rmac(zebra_l3vni_t *zl3vni,
+static inline void zl3vni_get_svi_rmac(struct zebra_l3vni *zl3vni,
struct ethaddr *rmac)
{
if (!zl3vni)
/* context for neigh hash walk - update l3vni and rmac */
struct neigh_l3info_walk_ctx {
- zebra_evpn_t *zevpn;
- zebra_l3vni_t *zl3vni;
+ struct zebra_evpn *zevpn;
+ struct zebra_l3vni *zl3vni;
int add;
};
struct json_object *json;
};
-extern zebra_l3vni_t *zl3vni_from_vrf(vrf_id_t vrf_id);
-extern struct interface *zl3vni_map_to_vxlan_if(zebra_l3vni_t *zl3vni);
-extern struct interface *zl3vni_map_to_svi_if(zebra_l3vni_t *zl3vni);
-extern struct interface *zl3vni_map_to_mac_vlan_if(zebra_l3vni_t *zl3vni);
-extern zebra_l3vni_t *zl3vni_lookup(vni_t vni);
+extern struct zebra_l3vni *zl3vni_from_vrf(vrf_id_t vrf_id);
+extern struct interface *zl3vni_map_to_vxlan_if(struct zebra_l3vni *zl3vni);
+extern struct interface *zl3vni_map_to_svi_if(struct zebra_l3vni *zl3vni);
+extern struct interface *zl3vni_map_to_mac_vlan_if(struct zebra_l3vni *zl3vni);
+extern struct zebra_l3vni *zl3vni_lookup(vni_t vni);
extern vni_t vni_id_from_svi(struct interface *ifp, struct interface *br_if);
-DECLARE_HOOK(zebra_rmac_update, (zebra_mac_t *rmac, zebra_l3vni_t *zl3vni,
- bool delete, const char *reason), (rmac, zl3vni, delete, reason));
+DECLARE_HOOK(zebra_rmac_update,
+ (struct zebra_mac * rmac, struct zebra_l3vni *zl3vni, bool delete,
+ const char *reason),
+ (rmac, zl3vni, delete, reason));
#ifdef __cplusplus
* an aggregated table that pimd can consume without much
* re-interpretation.
*/
-typedef struct zebra_vxlan_sg_ {
+struct zebra_vxlan_sg {
struct zebra_vrf *zvrf;
struct prefix_sg sg;
/* For SG - num of L2 VNIs using this entry for sending BUM traffic */
/* For XG - num of SG using this as parent */
uint32_t ref_cnt;
-} zebra_vxlan_sg_t;
+};
-extern zebra_evpn_t *zevpn_lookup(vni_t vni);
-extern void zebra_vxlan_sync_mac_dp_install(zebra_mac_t *mac, bool set_inactive,
- bool force_clear_static, const char *caller);
+extern struct zebra_evpn *zevpn_lookup(vni_t vni);
+extern void zebra_vxlan_sync_mac_dp_install(struct zebra_mac *mac,
+ bool set_inactive,
+ bool force_clear_static,
+ const char *caller);
extern bool zebra_evpn_do_dup_addr_detect(struct zebra_vrf *zvrf);
#endif /* _ZEBRA_VXLAN_PRIVATE_H */