Zebra: Default route distribute handling.
/* Cluster list related functions. */
static struct cluster_list *cluster_parse(struct in_addr *pnt, int length)
{
- struct cluster_list tmp;
+ struct cluster_list tmp = {};
struct cluster_list *cluster;
tmp.length = length;
- tmp.list = pnt;
+ tmp.list = length == 0 ? NULL : pnt;
cluster = hash_get(cluster_hash, &tmp, cluster_hash_alloc);
cluster->refcnt++;
const struct cluster_list *cluster1 = p1;
const struct cluster_list *cluster2 = p2;
- return (cluster1->length == cluster2->length
- && (cluster1->list == cluster2->list
- || memcmp(cluster1->list, cluster2->list, cluster1->length)
- == 0));
+ if (cluster1->list == cluster2->list)
+ return true;
+
+ if (!cluster1->list || !cluster2->list)
+ return false;
+
+ if (cluster1->length != cluster2->length)
+ return false;
+
+ return (memcmp(cluster1->list, cluster2->list, cluster1->length) == 0);
}
static void cluster_free(struct cluster_list *cluster)
return find;
}
-void cluster_unintern(struct cluster_list *cluster)
+static void cluster_unintern(struct cluster_list **cluster)
{
- if (cluster->refcnt)
- cluster->refcnt--;
+ if ((*cluster)->refcnt)
+ (*cluster)->refcnt--;
- if (cluster->refcnt == 0) {
- hash_release(cluster_hash, cluster);
- cluster_free(cluster);
+ if ((*cluster)->refcnt == 0) {
+ void *p = hash_release(cluster_hash, *cluster);
+ assert(p == *cluster);
+ cluster_free(*cluster);
+ *cluster = NULL;
}
}
UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_LARGE_COMMUNITIES));
if (attr->cluster)
- cluster_unintern(attr->cluster);
+ cluster_unintern(&attr->cluster);
UNSET_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_CLUSTER_LIST));
if (attr->transit)
if (length == 0) {
attr->community = NULL;
- return BGP_ATTR_PARSE_PROCEED;
+ return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
+ args->total);
}
attr->community =
* malformed, the UPDATE message SHALL be handled using the approach
* of "treat-as-withdraw".
*/
- if (length % 4) {
+ if (length == 0 || length % 4) {
flog_err(EC_BGP_ATTR_LEN, "Bad cluster list length %d", length);
return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_ATTR_LENG_ERR,
if (length == 0) {
attr->lcommunity = NULL;
/* Empty extcomm doesn't seem to be invalid per se */
- return BGP_ATTR_PARSE_PROCEED;
+ return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
+ args->total);
}
attr->lcommunity =
if (length == 0) {
attr->ecommunity = NULL;
/* Empty extcomm doesn't seem to be invalid per se */
- return BGP_ATTR_PARSE_PROCEED;
+ return bgp_attr_malformed(args, BGP_NOTIFY_UPDATE_OPT_ATTR_ERR,
+ args->total);
}
attr->ecommunity =
bgp_attr_extcom_tunnel_type(attr,
(bgp_encap_types *)&attr->encap_tunneltype);
+ /* Extract link bandwidth, if any. */
+ (void)ecommunity_linkbw_present(attr->ecommunity, &attr->link_bw);
+
return BGP_ATTR_PARSE_PROCEED;
}
/* rmap set table */
uint32_t rmap_table_id;
+
+ /* Link bandwidth value, if any. */
+ uint32_t link_bw;
};
/* rmap_change_flags definition */
#define BATTR_RMAP_IPV6_GLOBAL_NHOP_CHANGED (1 << 4)
#define BATTR_RMAP_IPV6_LL_NHOP_CHANGED (1 << 5)
#define BATTR_RMAP_IPV6_PREFER_GLOBAL_CHANGED (1 << 6)
+#define BATTR_RMAP_LINK_BW_SET (1 << 7)
/* Router Reflector related structure. */
struct cluster_list {
/* Cluster list prototypes. */
extern bool cluster_loop_check(struct cluster_list *, struct in_addr);
-extern void cluster_unintern(struct cluster_list *);
/* Below exported for unit-test purposes only */
struct bgp_attr_parser_args {
{
return (attr) ? attr->mm_seqnum : 0;
}
-
#endif /* _QUAGGA_BGP_ATTR_H */
memcpy(&routermac_ecom.val[2], routermac->octet, ETH_ALEN);
if (!attr->ecommunity)
attr->ecommunity = ecommunity_new();
- ecommunity_add_val(attr->ecommunity, &routermac_ecom);
+ ecommunity_add_val(attr->ecommunity, &routermac_ecom, false, false);
ecommunity_str(attr->ecommunity);
}
Attribute structure. When the value is already exists in the
structure, we don't add the value. Newly added value is sorted by
numerical order. When the value is added to the structure return 1
- else return 0. */
-bool ecommunity_add_val(struct ecommunity *ecom, struct ecommunity_val *eval)
+ else return 0.
+ The additional parameters 'unique' and 'overwrite' ensure a particular
+ extended community (based on type and sub-type) is present only
+ once and whether the new value should replace what is existing or
+ not.
+*/
+bool ecommunity_add_val(struct ecommunity *ecom, struct ecommunity_val *eval,
+ bool unique, bool overwrite)
{
- int c;
+ int c, ins_idx;
/* When this is fist value, just add it. */
if (ecom->val == NULL) {
}
/* If the value already exists in the structure return 0. */
+ /* check also if the extended community itself exists. */
c = 0;
+ ins_idx = -1;
for (uint8_t *p = ecom->val; c < ecom->size;
p += ECOMMUNITY_SIZE, c++) {
+ if (unique) {
+ if (p[0] == eval->val[0] &&
+ p[1] == eval->val[1]) {
+ if (overwrite) {
+ memcpy(p, eval->val, ECOMMUNITY_SIZE);
+ return 1;
+ }
+ return 0;
+ }
+ }
int ret = memcmp(p, eval->val, ECOMMUNITY_SIZE);
if (ret == 0)
- return false;
- else if (ret > 0)
- break;
+ return 0;
+ if (ret > 0) {
+ if (!unique)
+ break;
+ if (ins_idx == -1)
+ ins_idx = c;
+ }
}
+ if (ins_idx == -1)
+ ins_idx = c;
+
/* Add the value to the structure with numerical sorting. */
ecom->size++;
ecom->val = XREALLOC(MTYPE_ECOMMUNITY_VAL, ecom->val,
ecom->size * ECOMMUNITY_SIZE);
- memmove(ecom->val + ((c + 1) * ECOMMUNITY_SIZE),
- ecom->val + (c * ECOMMUNITY_SIZE),
- (ecom->size - 1 - c) * ECOMMUNITY_SIZE);
- memcpy(ecom->val + (c * ECOMMUNITY_SIZE), eval->val, ECOMMUNITY_SIZE);
+ memmove(ecom->val + ((ins_idx + 1) * ECOMMUNITY_SIZE),
+ ecom->val + (ins_idx * ECOMMUNITY_SIZE),
+ (ecom->size - 1 - ins_idx) * ECOMMUNITY_SIZE);
+ memcpy(ecom->val + (ins_idx * ECOMMUNITY_SIZE),
+ eval->val, ECOMMUNITY_SIZE);
return true;
}
for (i = 0; i < ecom->size; i++) {
eval = (struct ecommunity_val *)(ecom->val
+ (i * ECOMMUNITY_SIZE));
- ecommunity_add_val(new, eval);
+ ecommunity_add_val(new, eval, false, false);
}
return new;
}
if (ecom == NULL)
ecom = ecommunity_new();
eval.val[1] = type;
- ecommunity_add_val(ecom, &eval);
+ ecommunity_add_val(ecom, &eval, false, false);
break;
case ecommunity_token_unknown:
default:
return len;
}
+static int ecommunity_lb_str(char *buf, size_t bufsz, const uint8_t *pnt)
+{
+ int len = 0;
+ as_t as;
+ uint32_t bw;
+ char bps_buf[20] = {0};
+
+#define ONE_GBPS_BYTES (1000 * 1000 * 1000 / 8)
+#define ONE_MBPS_BYTES (1000 * 1000 / 8)
+#define ONE_KBPS_BYTES (1000 / 8)
+
+ as = (*pnt++ << 8);
+ as |= (*pnt++);
+ (void)ptr_get_be32(pnt, &bw);
+ if (bw >= ONE_GBPS_BYTES)
+ sprintf(bps_buf, "%.3f Gbps", (float)(bw/ONE_GBPS_BYTES));
+ else if (bw >= ONE_MBPS_BYTES)
+ sprintf(bps_buf, "%.3f Mbps", (float)(bw/ONE_MBPS_BYTES));
+ else if (bw >= ONE_KBPS_BYTES)
+ sprintf(bps_buf, "%.3f Kbps", (float)(bw/ONE_KBPS_BYTES));
+ else
+ sprintf(bps_buf, "%u bps", bw * 8);
+
+ len = snprintf(buf, bufsz, "LB:%u:%u (%s)", as, bw, bps_buf);
+ return len;
+}
+
/* Convert extended community attribute to string.
Due to historical reason of industry standard implementation, there
INET_ADDRSTRLEN);
snprintf(encbuf, sizeof(encbuf),
"NH:%s:%d", ipv4str, pnt[5]);
+ } else if (sub_type ==
+ ECOMMUNITY_LINK_BANDWIDTH &&
+ type == ECOMMUNITY_ENCODE_AS) {
+ ecommunity_lb_str(encbuf,
+ sizeof(encbuf), pnt);
} else
unk_ecom = 1;
} else {
(uint8_t)mac.octet[5]);
} else
unk_ecom = 1;
+ } else if (type == ECOMMUNITY_ENCODE_AS_NON_TRANS) {
+ sub_type = *pnt++;
+ if (sub_type == ECOMMUNITY_LINK_BANDWIDTH)
+ ecommunity_lb_str(encbuf, sizeof(encbuf), pnt);
+ else
+ unk_ecom = 1;
} else {
sub_type = *pnt++;
unk_ecom = 1;
/* remove ext. community matching type and subtype
* return 1 on success ( removed ), 0 otherwise (not present)
*/
-extern bool ecommunity_strip(struct ecommunity *ecom, uint8_t type,
- uint8_t subtype)
+bool ecommunity_strip(struct ecommunity *ecom, uint8_t type,
+ uint8_t subtype)
{
uint8_t *p, *q, *new;
int c, found = 0;
}
}
}
+
+/*
+ * return the BGP link bandwidth extended community, if present;
+ * the actual bandwidth is returned via param
+ */
+const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom, uint32_t *bw)
+{
+ const uint8_t *eval;
+ int i;
+
+ if (bw)
+ *bw = 0;
+
+ if (!ecom || !ecom->size)
+ return NULL;
+
+ for (i = 0; i < ecom->size; i++) {
+ const uint8_t *pnt;
+ uint8_t type, sub_type;
+ uint32_t bwval;
+
+ eval = pnt = (ecom->val + (i * ECOMMUNITY_SIZE));
+ type = *pnt++;
+ sub_type = *pnt++;
+
+ if ((type == ECOMMUNITY_ENCODE_AS ||
+ type == ECOMMUNITY_ENCODE_AS_NON_TRANS) &&
+ sub_type == ECOMMUNITY_LINK_BANDWIDTH) {
+ pnt += 2; /* bandwidth is encoded as AS:val */
+ pnt = ptr_get_be32(pnt, &bwval);
+ (void)pnt; /* consume value */
+ if (bw)
+ *bw = bwval;
+ return eval;
+ }
+ }
+
+ return NULL;
+}
+
+
+struct ecommunity *ecommunity_replace_linkbw(as_t as,
+ struct ecommunity *ecom,
+ uint64_t cum_bw)
+{
+ struct ecommunity *new;
+ struct ecommunity_val lb_eval;
+ const uint8_t *eval;
+ uint8_t type;
+ uint32_t cur_bw;
+
+ /* Nothing to replace if link-bandwidth doesn't exist or
+ * is non-transitive - just return existing extcommunity.
+ */
+ new = ecom;
+ if (!ecom || !ecom->size)
+ return new;
+
+ eval = ecommunity_linkbw_present(ecom, &cur_bw);
+ if (!eval)
+ return new;
+
+ type = *eval;
+ if (type & ECOMMUNITY_FLAG_NON_TRANSITIVE)
+ return new;
+
+ /* Transitive link-bandwidth exists, replace with the passed
+ * (cumulative) bandwidth value. We need to create a new
+ * extcommunity for this - refer to AS-Path replace function
+ * for reference.
+ */
+ if (cum_bw > 0xFFFFFFFF)
+ cum_bw = 0xFFFFFFFF;
+ encode_lb_extcomm(as > BGP_AS_MAX ? BGP_AS_TRANS : as, cum_bw,
+ false, &lb_eval);
+ new = ecommunity_dup(ecom);
+ ecommunity_add_val(new, &lb_eval, true, true);
+
+ return new;
+}
#include "bgpd/bgp_route.h"
#include "bgpd/bgpd.h"
+/* Refer to rfc7153 for the IANA registry definitions. These are
+ * updated by other standards like rfc7674.
+ */
/* High-order octet of the Extended Communities type field. */
#define ECOMMUNITY_ENCODE_AS 0x00
#define ECOMMUNITY_ENCODE_IP 0x01
#define ECOMMUNITY_ENCODE_AS4 0x02
#define ECOMMUNITY_ENCODE_OPAQUE 0x03
#define ECOMMUNITY_ENCODE_EVPN 0x06
-#define ECOMMUNITY_ENCODE_TRANS_EXP 0x80 /* Flow Spec */
#define ECOMMUNITY_ENCODE_REDIRECT_IP_NH 0x08 /* Flow Spec */
+/* Generic Transitive Experimental */
+#define ECOMMUNITY_ENCODE_TRANS_EXP 0x80
+
/* RFC7674 */
#define ECOMMUNITY_EXTENDED_COMMUNITY_PART_2 0x81
#define ECOMMUNITY_EXTENDED_COMMUNITY_PART_3 0x82
+/* Non-transitive extended community types. */
+#define ECOMMUNITY_ENCODE_AS_NON_TRANS 0x40
+#define ECOMMUNITY_ENCODE_IP_NON_TRANS 0x41
+#define ECOMMUNITY_ENCODE_AS4_NON_TRANS 0x42
+#define ECOMMUNITY_ENCODE_OPAQUE_NON_TRANS 0x43
+
/* Low-order octet of the Extended Communities type field. */
+/* Note: This really depends on the high-order octet. This means that
+ * multiple definitions for the same value are possible.
+ */
#define ECOMMUNITY_ROUTE_TARGET 0x02
#define ECOMMUNITY_SITE_ORIGIN 0x03
+#define ECOMMUNITY_LINK_BANDWIDTH 0x04
#define ECOMMUNITY_TRAFFIC_RATE 0x06 /* Flow Spec */
#define ECOMMUNITY_TRAFFIC_ACTION 0x07
#define ECOMMUNITY_REDIRECT_VRF 0x08
eval->val[7] = val & 0xff;
}
+/*
+ * Encode BGP Link Bandwidth extended community
+ * bandwidth (bw) is in bytes-per-sec
+ */
+static inline void encode_lb_extcomm(as_t as, uint32_t bw, bool non_trans,
+ struct ecommunity_val *eval)
+{
+ memset(eval, 0, sizeof(*eval));
+ eval->val[0] = ECOMMUNITY_ENCODE_AS;
+ if (non_trans)
+ eval->val[0] |= ECOMMUNITY_FLAG_NON_TRANSITIVE;
+ eval->val[1] = ECOMMUNITY_LINK_BANDWIDTH;
+ eval->val[2] = (as >> 8) & 0xff;
+ eval->val[3] = as & 0xff;
+ eval->val[4] = (bw >> 24) & 0xff;
+ eval->val[5] = (bw >> 16) & 0xff;
+ eval->val[6] = (bw >> 8) & 0xff;
+ eval->val[7] = bw & 0xff;
+}
+
extern void ecommunity_init(void);
extern void ecommunity_finish(void);
extern void ecommunity_free(struct ecommunity **);
extern struct ecommunity_val *ecommunity_lookup(const struct ecommunity *,
uint8_t, uint8_t);
extern bool ecommunity_add_val(struct ecommunity *ecom,
- struct ecommunity_val *eval);
+ struct ecommunity_val *eval,
+ bool unique, bool overwrite);
/* for vpn */
extern struct ecommunity *ecommunity_new(void);
-extern bool ecommunity_add_val(struct ecommunity *, struct ecommunity_val *);
extern bool ecommunity_strip(struct ecommunity *ecom, uint8_t type,
uint8_t subtype);
extern struct ecommunity *ecommunity_new(void);
struct bgp_aggregate *aggregate,
struct ecommunity *ecommunity);
extern void bgp_aggr_ecommunity_remove(void *arg);
-
+extern const uint8_t *ecommunity_linkbw_present(struct ecommunity *ecom,
+ uint32_t *bw);
+extern struct ecommunity *ecommunity_replace_linkbw(as_t as,
+ struct ecommunity *ecom, uint64_t cum_bw);
static inline void ecommunity_strip_rts(struct ecommunity *ecom)
{
encode_route_target_as((bgp->as & 0xFFFF), vni, &eval);
ecomadd = ecommunity_new();
- ecommunity_add_val(ecomadd, &eval);
+ ecommunity_add_val(ecomadd, &eval, false, false);
for (ALL_LIST_ELEMENTS_RO(rtl, node, ecom))
if (ecommunity_cmp(ecomadd, ecom))
ecom_found = true;
struct attr *attr)
{
struct ecommunity ecom_encap;
- struct ecommunity ecom_rmac;
struct ecommunity_val eval;
struct ecommunity_val eval_rmac;
bgp_encap_types tnl_type;
struct listnode *node, *nnode;
struct ecommunity *ecom;
+ struct ecommunity *old_ecom;
struct list *vrf_export_rtl = NULL;
/* Encap */
ecom_encap.val = (uint8_t *)eval.val;
/* Add Encap */
- attr->ecommunity = ecommunity_dup(&ecom_encap);
+ if (attr->ecommunity) {
+ old_ecom = attr->ecommunity;
+ ecom = ecommunity_merge(ecommunity_dup(old_ecom), &ecom_encap);
+ if (!old_ecom->refcnt)
+ ecommunity_free(&old_ecom);
+ } else
+ ecom = ecommunity_dup(&ecom_encap);
+ attr->ecommunity = ecom;
/* Add the export RTs for L3VNI/VRF */
vrf_export_rtl = bgp_vrf->vrf_export_rtl;
/* add the router mac extended community */
if (!is_zero_mac(&attr->rmac)) {
- memset(&ecom_rmac, 0, sizeof(ecom_rmac));
encode_rmac_extcomm(&eval_rmac, &attr->rmac);
- ecom_rmac.size = 1;
- ecom_rmac.val = (uint8_t *)eval_rmac.val;
- attr->ecommunity =
- ecommunity_merge(attr->ecommunity, &ecom_rmac);
+ ecommunity_add_val(attr->ecommunity, &eval_rmac, true, true);
}
attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);
struct ecommunity ecom_encap;
struct ecommunity ecom_sticky;
struct ecommunity ecom_default_gw;
- struct ecommunity ecom_rmac;
struct ecommunity ecom_na;
struct ecommunity_val eval;
struct ecommunity_val eval_sticky;
/* Add RMAC, if told to. */
if (add_l3_ecomm) {
- memset(&ecom_rmac, 0, sizeof(ecom_rmac));
encode_rmac_extcomm(&eval_rmac, &attr->rmac);
- ecom_rmac.size = 1;
- ecom_rmac.val = (uint8_t *)eval_rmac.val;
- attr->ecommunity =
- ecommunity_merge(attr->ecommunity, &ecom_rmac);
+ ecommunity_add_val(attr->ecommunity, &eval_rmac, true, true);
}
/* Add default gateway, if needed. */
old_select->attr->nexthop);
}
UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG);
+ UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG);
bgp_zebra_clear_route_change_flags(rn);
return ret;
}
bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED);
bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED);
UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG);
+ UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG);
}
if (new_select && new_select->type == ZEBRA_ROUTE_BGP
bgp, vpn, (const struct prefix_evpn *)bgp_node_get_prefix(rn),
old_select);
UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG);
+ UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG);
bgp_zebra_clear_route_change_flags(rn);
return ret;
}
bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED);
bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED);
UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG);
+ UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG);
}
if (new_select && new_select->type == ZEBRA_ROUTE_BGP
encode_route_target_as((bgp->as & 0xFFFF), vni, &eval);
ecom_auto = ecommunity_new();
- ecommunity_add_val(ecom_auto, &eval);
+ ecommunity_add_val(ecom_auto, &eval, false, false);
node_to_del = NULL;
for (ALL_LIST_ELEMENTS(rtl, node, nnode, ecom)) {
else
bgp_port = tmp_port;
break;
- case 'e':
- multipath_num = atoi(optarg);
- if (multipath_num > MULTIPATH_NUM
- || multipath_num <= 0) {
+ case 'e': {
+ unsigned long int parsed_multipath =
+ strtoul(optarg, NULL, 10);
+ if (parsed_multipath == 0
+ || parsed_multipath > MULTIPATH_NUM
+ || parsed_multipath > UINT_MAX) {
flog_err(
EC_BGP_MULTIPATH,
- "Multipath Number specified must be less than %d and greater than 0",
+ "Multipath Number specified must be less than %u and greater than 0",
MULTIPATH_NUM);
return 1;
}
+ multipath_num = parsed_multipath;
break;
+ }
case 'l':
bgp_address = optarg;
/* listenon implies -n */
* Sets the count of multipaths into bestpath's mpath element
*/
static void bgp_path_info_mpath_count_set(struct bgp_path_info *path,
- uint32_t count)
+ uint16_t count)
{
struct bgp_path_info_mpath *mpath;
if (!count && !path->mpath)
mpath->mp_count = count;
}
+/*
+ * bgp_path_info_mpath_lb_update
+ *
+ * Update cumulative info related to link-bandwidth
+ */
+static void bgp_path_info_mpath_lb_update(struct bgp_path_info *path, bool set,
+ bool all_paths_lb, uint64_t cum_bw)
+{
+ struct bgp_path_info_mpath *mpath;
+
+ if ((mpath = path->mpath) == NULL) {
+ if (!set)
+ return;
+ mpath = bgp_path_info_mpath_get(path);
+ if (!mpath)
+ return;
+ }
+ if (set) {
+ if (cum_bw)
+ SET_FLAG(mpath->mp_flags, BGP_MP_LB_PRESENT);
+ else
+ UNSET_FLAG(mpath->mp_flags, BGP_MP_LB_PRESENT);
+ if (all_paths_lb)
+ SET_FLAG(mpath->mp_flags, BGP_MP_LB_ALL);
+ else
+ UNSET_FLAG(mpath->mp_flags, BGP_MP_LB_ALL);
+ mpath->cum_bw = cum_bw;
+ } else {
+ mpath->mp_flags = 0;
+ mpath->cum_bw = 0;
+ }
+}
+
/*
* bgp_path_info_mpath_attr
*
return path->mpath->mp_attr;
}
+/*
+ * bgp_path_info_chkwtd
+ *
+ * Return if we should attempt to do weighted ECMP or not
+ * The path passed in is the bestpath.
+ */
+bool bgp_path_info_mpath_chkwtd(struct bgp *bgp, struct bgp_path_info *path)
+{
+ /* Check if told to ignore weights or not multipath */
+ if (bgp->lb_handling == BGP_LINK_BW_IGNORE_BW || !path->mpath)
+ return false;
+
+ /* All paths in multipath should have associated weight (bandwidth)
+ * unless told explicitly otherwise.
+ */
+ if (bgp->lb_handling != BGP_LINK_BW_SKIP_MISSING &&
+ bgp->lb_handling != BGP_LINK_BW_DEFWT_4_MISSING)
+ return (path->mpath->mp_flags & BGP_MP_LB_ALL);
+
+ /* At least one path should have bandwidth. */
+ return (path->mpath->mp_flags & BGP_MP_LB_PRESENT);
+}
+
+/*
+ * bgp_path_info_mpath_attr
+ *
+ * Given bestpath bgp_path_info, return cumulative bandwidth
+ * computed for all multipaths with bandwidth info
+ */
+uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info *path)
+{
+ if (!path->mpath)
+ return 0;
+ return path->mpath->cum_bw;
+}
+
/*
* bgp_path_info_mpath_attr_set
*
struct bgp_maxpaths_cfg *mpath_cfg)
{
uint16_t maxpaths, mpath_count, old_mpath_count;
+ uint32_t bwval;
+ uint64_t cum_bw, old_cum_bw;
struct listnode *mp_node, *mp_next_node;
struct bgp_path_info *cur_mpath, *new_mpath, *next_mpath, *prev_mpath;
int mpath_changed, debug;
char nh_buf[2][INET6_ADDRSTRLEN];
+ bool all_paths_lb;
char path_buf[PATH_ADDPATH_STR_BUFFER];
mpath_changed = 0;
mpath_count = 0;
cur_mpath = NULL;
old_mpath_count = 0;
+ old_cum_bw = cum_bw = 0;
prev_mpath = new_best;
mp_node = listhead(mp_list);
debug = bgp_debug_bestpath(rn);
if (old_best) {
cur_mpath = bgp_path_info_mpath_first(old_best);
old_mpath_count = bgp_path_info_mpath_count(old_best);
+ old_cum_bw = bgp_path_info_mpath_cumbw(old_best);
bgp_path_info_mpath_count_set(old_best, 0);
+ bgp_path_info_mpath_lb_update(old_best, false, false, 0);
bgp_path_info_mpath_dequeue(old_best);
}
if (debug)
zlog_debug(
- "%pRN: starting mpath update, newbest %s num candidates %d old-mpath-count %d",
+ "%pRN: starting mpath update, newbest %s num candidates %d old-mpath-count %d old-cum-bw u%" PRIu64,
rn, new_best ? new_best->peer->host : "NONE",
- mp_list ? listcount(mp_list) : 0, old_mpath_count);
+ mp_list ? listcount(mp_list) : 0,
+ old_mpath_count, old_cum_bw);
/*
* We perform an ordered walk through both lists in parallel.
* Note that new_best might be somewhere in the mp_list, so we need
* to skip over it
*/
+ all_paths_lb = true; /* We'll reset if any path doesn't have LB. */
while (mp_node || cur_mpath) {
struct bgp_path_info *tmp_info;
cur_mpath);
prev_mpath = cur_mpath;
mpath_count++;
+ if (ecommunity_linkbw_present(
+ cur_mpath->attr->ecommunity, &bwval))
+ cum_bw += bwval;
+ else
+ all_paths_lb = false;
if (debug) {
bgp_path_info_path_with_addpath_rx_str(
cur_mpath, path_buf);
prev_mpath = new_mpath;
mpath_changed = 1;
mpath_count++;
+ if (ecommunity_linkbw_present(
+ new_mpath->attr->ecommunity, &bwval))
+ cum_bw += bwval;
+ else
+ all_paths_lb = false;
if (debug) {
bgp_path_info_path_with_addpath_rx_str(
new_mpath, path_buf);
}
if (new_best) {
+ bgp_path_info_mpath_count_set(new_best, mpath_count - 1);
+ if (mpath_count <= 1 ||
+ !ecommunity_linkbw_present(
+ new_best->attr->ecommunity, &bwval))
+ all_paths_lb = false;
+ else
+ cum_bw += bwval;
+ bgp_path_info_mpath_lb_update(new_best, true,
+ all_paths_lb, cum_bw);
+
if (debug)
zlog_debug(
- "%pRN: New mpath count (incl newbest) %d mpath-change %s",
+ "%pRN: New mpath count (incl newbest) %d mpath-change %s"
+ " all_paths_lb %d cum_bw u%" PRIu64,
rn, mpath_count,
- mpath_changed ? "YES" : "NO");
+ mpath_changed ? "YES" : "NO",
+ all_paths_lb, cum_bw);
- bgp_path_info_mpath_count_set(new_best, mpath_count - 1);
if (mpath_changed
|| (bgp_path_info_mpath_count(new_best) != old_mpath_count))
SET_FLAG(new_best->flags, BGP_PATH_MULTIPATH_CHG);
+ if ((mpath_count - 1) != old_mpath_count ||
+ old_cum_bw != cum_bw)
+ SET_FLAG(new_best->flags, BGP_PATH_LINK_BW_CHG);
}
}
bgp_path_info_mpath_count_set(dmed_best, 0);
UNSET_FLAG(dmed_best->flags, BGP_PATH_MULTIPATH_CHG);
+ UNSET_FLAG(dmed_best->flags, BGP_PATH_LINK_BW_CHG);
assert(bgp_path_info_mpath_first(dmed_best) == NULL);
}
struct bgp_path_info *mp_info;
/* When attached to best path, the number of selected multipaths */
- uint32_t mp_count;
+ uint16_t mp_count;
+
+ /* Flags - relevant as noted. */
+ uint16_t mp_flags;
+#define BGP_MP_LB_PRESENT 0x1 /* Link-bandwidth present for >= 1 path */
+#define BGP_MP_LB_ALL 0x2 /* Link-bandwidth present for all multipaths */
/* Aggregated attribute for advertising multipath route */
struct attr *mp_attr;
+
+ /* Cumulative bandiwdth of all multipaths - attached to best path. */
+ uint64_t cum_bw;
};
/* Functions to support maximum-paths configuration */
/* Accessors for multipath information */
extern uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path);
extern struct attr *bgp_path_info_mpath_attr(struct bgp_path_info *path);
+extern bool bgp_path_info_mpath_chkwtd(struct bgp *bgp,
+ struct bgp_path_info *path);
+extern uint64_t bgp_path_info_mpath_cumbw(struct bgp_path_info *path);
#endif /* _QUAGGA_BGP_MPATH_H */
ecom_copy.val[0] &=
~ECOMMUNITY_ENCODE_TRANS_EXP;
ecom_copy.val[1] = ECOMMUNITY_ROUTE_TARGET;
- ecommunity_add_val(eckey, &ecom_copy);
+ ecommunity_add_val(eckey, &ecom_copy,
+ false, false);
api_action->action = ACTION_REDIRECT;
api_action->u.redirect_vrf =
afi_t afi;
safi_t safi;
int samepeer_safe = 0; /* for synthetic mplsvpns routes */
+ bool nh_reset = false;
+ uint64_t cum_bw;
if (DISABLE_BGP_ANNOUNCE)
return false;
PEER_FLAG_FORCE_NEXTHOP_SELF)) {
if (!reflect
|| CHECK_FLAG(peer->af_flags[afi][safi],
- PEER_FLAG_FORCE_NEXTHOP_SELF))
+ PEER_FLAG_FORCE_NEXTHOP_SELF)) {
subgroup_announce_reset_nhop(
(peer_cap_enhe(peer, afi, safi)
? AF_INET6
: p->family),
attr);
+ nh_reset = true;
+ }
} else if (peer->sort == BGP_PEER_EBGP) {
/* Can also reset the nexthop if announcing to EBGP, but
* only if
if ((p->family == AF_INET) &&
(!bgp_subgrp_multiaccess_check_v4(
piattr->nexthop,
- subgrp, from)))
+ subgrp, from))) {
subgroup_announce_reset_nhop(
(peer_cap_enhe(peer, afi, safi)
? AF_INET6
: p->family),
attr);
+ nh_reset = true;
+ }
if ((p->family == AF_INET6) &&
(!bgp_subgrp_multiaccess_check_v6(
piattr->mp_nexthop_global,
- subgrp, from)))
+ subgrp, from))) {
subgroup_announce_reset_nhop(
(peer_cap_enhe(peer, afi, safi)
? AF_INET6
: p->family),
attr);
+ nh_reset = true;
+ }
"%s: BGP_PATH_ANNC_NH_SELF, family=%s",
__func__, family2str(family));
subgroup_announce_reset_nhop(family, attr);
+ nh_reset = true;
}
}
* the same interface.
*/
if (p->family == AF_INET6 || peer_cap_enhe(peer, afi, safi)) {
- if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global))
+ if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_global)) {
subgroup_announce_reset_nhop(AF_INET6, attr);
+ nh_reset = true;
+ }
}
+ /*
+ * When the next hop is set to ourselves, if all multipaths have
+ * link-bandwidth announce the cumulative bandwidth as that makes
+ * the most sense. However, don't modify if the link-bandwidth has
+ * been explicitly set by user policy.
+ */
+ if (nh_reset &&
+ bgp_path_info_mpath_chkwtd(bgp, pi) &&
+ (cum_bw = bgp_path_info_mpath_cumbw(pi)) != 0 &&
+ !CHECK_FLAG(attr->rmap_change_flags, BATTR_RMAP_LINK_BW_SET))
+ attr->ecommunity = ecommunity_replace_linkbw(
+ bgp->as, attr->ecommunity, cum_bw);
+
return true;
}
* when the best path has an attribute change anyway.
*/
if (CHECK_FLAG(selected->flags, BGP_PATH_IGP_CHANGED)
- || CHECK_FLAG(selected->flags, BGP_PATH_MULTIPATH_CHG))
+ || CHECK_FLAG(selected->flags, BGP_PATH_MULTIPATH_CHG)
+ || CHECK_FLAG(selected->flags, BGP_PATH_LINK_BW_CHG))
return true;
/*
bgp, afi, safi);
}
}
- UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG);
- bgp_zebra_clear_route_change_flags(rn);
/* If there is a change of interest to peers, reannounce the
* route. */
if (CHECK_FLAG(old_select->flags, BGP_PATH_ATTR_CHANGED)
+ || CHECK_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG)
|| CHECK_FLAG(rn->flags, BGP_NODE_LABEL_CHANGED)) {
group_announce_route(bgp, afi, safi, rn, new_select);
UNSET_FLAG(rn->flags, BGP_NODE_LABEL_CHANGED);
}
+ UNSET_FLAG(old_select->flags, BGP_PATH_MULTIPATH_CHG);
+ UNSET_FLAG(old_select->flags, BGP_PATH_LINK_BW_CHG);
+ bgp_zebra_clear_route_change_flags(rn);
UNSET_FLAG(rn->flags, BGP_NODE_PROCESS_SCHEDULED);
return;
}
bgp_path_info_set_flag(rn, new_select, BGP_PATH_SELECTED);
bgp_path_info_unset_flag(rn, new_select, BGP_PATH_ATTR_CHANGED);
UNSET_FLAG(new_select->flags, BGP_PATH_MULTIPATH_CHG);
+ UNSET_FLAG(new_select->flags, BGP_PATH_LINK_BW_CHG);
}
#ifdef ENABLE_BGP_VNC
#define BGP_PATH_MULTIPATH_CHG (1 << 12)
#define BGP_PATH_RIB_ATTR_CHG (1 << 13)
#define BGP_PATH_ANNC_NH_SELF (1 << 14)
+#define BGP_PATH_LINK_BW_CHG (1 << 15)
/* BGP route type. This can be static, RIP, OSPF, BGP etc. */
uint8_t type;
dst_pi->peer = peer;
dst_pi->attr = attr;
dst_pi->net = rn;
+ dst_pi->flags = src_pi->flags;
+ dst_pi->type = src_pi->type;
+ dst_pi->sub_type = src_pi->sub_type;
+ dst_pi->mpath = src_pi->mpath;
if (src_pi->extra) {
memcpy(dst_pie, src_pi->extra,
sizeof(struct bgp_path_info_extra));
#include "bgpd/bgp_pbr.h"
#include "bgpd/bgp_flowspec_util.h"
#include "bgpd/bgp_encap_types.h"
+#include "bgpd/bgp_mpath.h"
#ifdef ENABLE_BGP_VNC
#include "bgpd/rfapi/bgp_rfapi_cfg.h"
route_set_ecommunity_free,
};
+/* `set extcommunity bandwidth' */
+
+struct rmap_ecomm_lb_set {
+ uint8_t lb_type;
+#define RMAP_ECOMM_LB_SET_VALUE 1
+#define RMAP_ECOMM_LB_SET_CUMUL 2
+#define RMAP_ECOMM_LB_SET_NUM_MPATH 3
+ bool non_trans;
+ uint32_t bw;
+};
+
+static enum route_map_cmd_result_t
+route_set_ecommunity_lb(void *rule, const struct prefix *prefix,
+ route_map_object_t type, void *object)
+{
+ struct rmap_ecomm_lb_set *rels = rule;
+ struct bgp_path_info *path;
+ struct peer *peer;
+ struct ecommunity ecom_lb = {0};
+ struct ecommunity_val lb_eval;
+ uint32_t bw_bytes = 0;
+ uint16_t mpath_count = 0;
+ struct ecommunity *new_ecom;
+ struct ecommunity *old_ecom;
+ as_t as;
+
+ if (type != RMAP_BGP)
+ return RMAP_OKAY;
+
+ path = object;
+ peer = path->peer;
+ if (!peer || !peer->bgp)
+ return RMAP_ERROR;
+
+ /* Build link bandwidth extended community */
+ as = (peer->bgp->as > BGP_AS_MAX) ? BGP_AS_TRANS : peer->bgp->as;
+ if (rels->lb_type == RMAP_ECOMM_LB_SET_VALUE) {
+ bw_bytes = ((uint64_t)(rels->bw * 1000 * 1000))/8;
+ } else if (rels->lb_type == RMAP_ECOMM_LB_SET_CUMUL) {
+ /* process this only for the best path. */
+ if (!CHECK_FLAG(path->flags, BGP_PATH_SELECTED))
+ return RMAP_OKAY;
+
+ bw_bytes = (uint32_t)bgp_path_info_mpath_cumbw(path);
+ if (!bw_bytes)
+ return RMAP_OKAY;
+
+ } else if (rels->lb_type == RMAP_ECOMM_LB_SET_NUM_MPATH) {
+
+ /* process this only for the best path. */
+ if (!CHECK_FLAG(path->flags, BGP_PATH_SELECTED))
+ return RMAP_OKAY;
+
+ bw_bytes = ((uint64_t)(peer->bgp->lb_ref_bw * 1000 * 1000))/8;
+ mpath_count = bgp_path_info_mpath_count(path) + 1;
+ bw_bytes *= mpath_count;
+ }
+
+ encode_lb_extcomm(as, bw_bytes, rels->non_trans, &lb_eval);
+
+ /* add to route or merge with existing */
+ old_ecom = path->attr->ecommunity;
+ if (old_ecom) {
+ new_ecom = ecommunity_dup(old_ecom);
+ ecommunity_add_val(new_ecom, &lb_eval, true, true);
+ if (!old_ecom->refcnt)
+ ecommunity_free(&old_ecom);
+ } else {
+ ecom_lb.size = 1;
+ ecom_lb.val = (uint8_t *)lb_eval.val;
+ new_ecom = ecommunity_dup(&ecom_lb);
+ }
+
+ /* new_ecom will be intern()'d or attr_flush()'d in call stack */
+ path->attr->ecommunity = new_ecom;
+ path->attr->flag |= ATTR_FLAG_BIT(BGP_ATTR_EXT_COMMUNITIES);
+
+ /* Mark that route-map has set link bandwidth; used in attribute
+ * setting decisions.
+ */
+ SET_FLAG(path->attr->rmap_change_flags, BATTR_RMAP_LINK_BW_SET);
+
+ return RMAP_OKAY;
+}
+
+static void *route_set_ecommunity_lb_compile(const char *arg)
+{
+ struct rmap_ecomm_lb_set *rels;
+ uint8_t lb_type;
+ uint32_t bw = 0;
+ char bw_str[40] = {0};
+ char *p, *str;
+ bool non_trans = false;
+
+ str = (char *)arg;
+ p = strchr(arg, ' ');
+ if (p) {
+ int len;
+
+ len = p - arg;
+ memcpy(bw_str, arg, len);
+ non_trans = true;
+ str = bw_str;
+ }
+
+ if (strcmp(str, "cumulative") == 0)
+ lb_type = RMAP_ECOMM_LB_SET_CUMUL;
+ else if (strcmp(str, "num-multipaths") == 0)
+ lb_type = RMAP_ECOMM_LB_SET_NUM_MPATH;
+ else {
+ char *end = NULL;
+
+ bw = strtoul(str, &end, 10);
+ if (*end != '\0')
+ return NULL;
+ lb_type = RMAP_ECOMM_LB_SET_VALUE;
+ }
+
+ rels = XCALLOC(MTYPE_ROUTE_MAP_COMPILED,
+ sizeof(struct rmap_ecomm_lb_set));
+ rels->lb_type = lb_type;
+ rels->bw = bw;
+ rels->non_trans = non_trans;
+
+ return rels;
+}
+
+static void route_set_ecommunity_lb_free(void *rule)
+{
+ XFREE(MTYPE_ROUTE_MAP_COMPILED, rule);
+}
+
+/* Set community rule structure. */
+struct route_map_rule_cmd route_set_ecommunity_lb_cmd = {
+ "extcommunity bandwidth",
+ route_set_ecommunity_lb,
+ route_set_ecommunity_lb_compile,
+ route_set_ecommunity_lb_free,
+};
+
/* `set origin ORIGIN' */
/* For origin set. */
"GP extended community attribute\n"
"Site-of-Origin extended community\n")
+DEFUN (set_ecommunity_lb,
+ set_ecommunity_lb_cmd,
+ "set extcommunity bandwidth <(1-25600)|cumulative|num-multipaths> [non-transitive]",
+ SET_STR
+ "BGP extended community attribute\n"
+ "Link bandwidth extended community\n"
+ "Bandwidth value in Mbps\n"
+ "Cumulative bandwidth of all multipaths (outbound-only)\n"
+ "Internally computed bandwidth based on number of multipaths (outbound-only)\n"
+ "Attribute is set as non-transitive\n")
+{
+ int idx_lb = 3;
+ int ret;
+ char *str;
+
+ str = argv_concat(argv, argc, idx_lb);
+ ret = generic_set_add(vty, VTY_GET_CONTEXT(route_map_index),
+ "extcommunity bandwidth", str);
+ XFREE(MTYPE_TMP, str);
+ return ret;
+}
+
+
+DEFUN (no_set_ecommunity_lb,
+ no_set_ecommunity_lb_cmd,
+ "no set extcommunity bandwidth <(1-25600)|cumulative|num-multipaths> [non-transitive]",
+ NO_STR
+ SET_STR
+ "BGP extended community attribute\n"
+ "Link bandwidth extended community\n"
+ "Bandwidth value in Mbps\n"
+ "Cumulative bandwidth of all multipaths (outbound-only)\n"
+ "Internally computed bandwidth based on number of multipaths (outbound-only)\n"
+ "Attribute is set as non-transitive\n")
+{
+ return generic_set_delete(vty, VTY_GET_CONTEXT(route_map_index),
+ "extcommunity bandwidth", NULL);
+}
+
+ALIAS (no_set_ecommunity_lb,
+ no_set_ecommunity_lb_short_cmd,
+ "no set extcommunity bandwidth",
+ NO_STR
+ SET_STR
+ "BGP extended community attribute\n"
+ "Link bandwidth extended community\n")
+
DEFUN (set_origin,
set_origin_cmd,
"set origin <egp|igp|incomplete>",
route_map_install_set(&route_set_originator_id_cmd);
route_map_install_set(&route_set_ecommunity_rt_cmd);
route_map_install_set(&route_set_ecommunity_soo_cmd);
+ route_map_install_set(&route_set_ecommunity_lb_cmd);
route_map_install_set(&route_set_tag_cmd);
route_map_install_set(&route_set_label_index_cmd);
install_element(RMAP_NODE, &set_ecommunity_soo_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_soo_cmd);
install_element(RMAP_NODE, &no_set_ecommunity_soo_short_cmd);
+ install_element(RMAP_NODE, &set_ecommunity_lb_cmd);
+ install_element(RMAP_NODE, &no_set_ecommunity_lb_cmd);
+ install_element(RMAP_NODE, &no_set_ecommunity_lb_short_cmd);
#ifdef KEEP_OLD_VPN_COMMANDS
install_element(RMAP_NODE, &set_vpn_nexthop_cmd);
install_element(RMAP_NODE, &no_set_vpn_nexthop_cmd);
return CMD_SUCCESS;
}
+/* "bgp bestpath bandwidth" configuration. */
+DEFPY (bgp_bestpath_bw,
+ bgp_bestpath_bw_cmd,
+ "[no$no] bgp bestpath bandwidth [<ignore|skip-missing|default-weight-for-missing>$bw_cfg]",
+ NO_STR
+ "BGP specific commands\n"
+ "Change the default bestpath selection\n"
+ "Link Bandwidth attribute\n"
+ "Ignore link bandwidth (i.e., do regular ECMP, not weighted)\n"
+ "Ignore paths without link bandwidth for ECMP (if other paths have it)\n"
+ "Assign a low default weight (value 1) to paths not having link bandwidth\n")
+{
+ VTY_DECLVAR_CONTEXT(bgp, bgp);
+ afi_t afi;
+ safi_t safi;
+
+ if (no) {
+ bgp->lb_handling = BGP_LINK_BW_ECMP;
+ } else {
+ if (!bw_cfg) {
+ vty_out(vty, "%% Bandwidth configuration must be specified\n");
+ return CMD_ERR_INCOMPLETE;
+ }
+ if (!strcmp(bw_cfg, "ignore"))
+ bgp->lb_handling = BGP_LINK_BW_IGNORE_BW;
+ else if (!strcmp(bw_cfg, "skip-missing"))
+ bgp->lb_handling = BGP_LINK_BW_SKIP_MISSING;
+ else if (!strcmp(bw_cfg, "default-weight-for-missing"))
+ bgp->lb_handling = BGP_LINK_BW_DEFWT_4_MISSING;
+ else
+ return CMD_ERR_NO_MATCH;
+ }
+
+ /* This config is used in route install, so redo that. */
+ FOREACH_AFI_SAFI (afi, safi) {
+ if (!bgp_fibupd_safi(safi))
+ continue;
+ bgp_zebra_announce_table(bgp, afi, safi);
+ }
+
+ return CMD_SUCCESS;
+}
+
/* "no bgp default ipv4-unicast". */
DEFUN (no_bgp_default_ipv4_unicast,
no_bgp_default_ipv4_unicast_cmd,
peer_uptime(peer->uptime, timebuf, BGP_UPTIME_LEN,
use_json, json_peer);
- /*
- * Adding "pfxRcd" field to match with the corresponding
- * CLI. "prefixReceivedCount" will be deprecated in
- * future.
- */
- json_object_int_add(json_peer, "prefixReceivedCount",
- peer->pcount[afi][pfx_rcd_safi]);
json_object_int_add(json_peer, "pfxRcd",
peer->pcount[afi][pfx_rcd_safi]);
vty_out(vty, "\n");
}
+ /* Link bandwidth handling. */
+ if (bgp->lb_handling == BGP_LINK_BW_IGNORE_BW)
+ vty_out(vty, " bgp bestpath bandwidth ignore\n");
+ else if (bgp->lb_handling == BGP_LINK_BW_SKIP_MISSING)
+ vty_out(vty, " bgp bestpath bandwidth skip-missing\n");
+ else if (bgp->lb_handling == BGP_LINK_BW_DEFWT_4_MISSING)
+ vty_out(vty, " bgp bestpath bandwidth default-weight-for-missing\n");
+
/* BGP network import check. */
if (!!CHECK_FLAG(bgp->flags, BGP_FLAG_IMPORT_CHECK)
!= SAVE_BGP_IMPORT_CHECK)
install_element(BGP_NODE, &bgp_bestpath_med_cmd);
install_element(BGP_NODE, &no_bgp_bestpath_med_cmd);
+ /* "bgp bestpath bandwidth" commands */
+ install_element(BGP_NODE, &bgp_bestpath_bw_cmd);
+
/* "no bgp default ipv4-unicast" commands. */
install_element(BGP_NODE, &no_bgp_default_ipv4_unicast_cmd);
install_element(BGP_NODE, &bgp_default_ipv4_unicast_cmd);
return true;
}
+static bool bgp_zebra_use_nhop_weighted(struct bgp *bgp, struct attr *attr,
+ uint64_t tot_bw, uint32_t *nh_weight)
+{
+ uint32_t bw;
+ uint64_t tmp;
+
+ bw = attr->link_bw;
+ /* zero link-bandwidth and link-bandwidth not present are treated
+ * as the same situation.
+ */
+ if (!bw) {
+ /* the only situations should be if we're either told
+ * to skip or use default weight.
+ */
+ if (bgp->lb_handling == BGP_LINK_BW_SKIP_MISSING)
+ return false;
+ *nh_weight = BGP_ZEBRA_DEFAULT_NHOP_WEIGHT;
+ } else {
+ tmp = (uint64_t)bw * 100;
+ *nh_weight = ((uint32_t)(tmp / tot_bw));
+ }
+
+ return true;
+}
+
void bgp_zebra_announce(struct bgp_node *rn, const struct prefix *p,
struct bgp_path_info *info, struct bgp *bgp, afi_t afi,
safi_t safi)
char buf_prefix[PREFIX_STRLEN]; /* filled in if we are debugging */
bool is_evpn;
int nh_updated;
+ bool do_wt_ecmp;
+ uint64_t cum_bw = 0;
/* Don't try to install if we're not connected to Zebra or Zebra doesn't
* know of this instance.
/* Metric is currently based on the best-path only */
metric = info->attr->med;
+
+ /* Determine if we're doing weighted ECMP or not */
+ do_wt_ecmp = bgp_path_info_mpath_chkwtd(bgp, info);
+ if (do_wt_ecmp)
+ cum_bw = bgp_path_info_mpath_cumbw(info);
+
for (mpinfo = info; mpinfo; mpinfo = bgp_path_info_mpath_next(mpinfo)) {
+ uint32_t nh_weight;
+
if (valid_nh_count >= multipath_num)
break;
*mpinfo_cp = *mpinfo;
+ nh_weight = 0;
/* Get nexthop address-family */
if (p->family == AF_INET
else
continue;
+ /* If processing for weighted ECMP, determine the next hop's
+ * weight. Based on user setting, we may skip the next hop
+ * in some situations.
+ */
+ if (do_wt_ecmp) {
+ if (!bgp_zebra_use_nhop_weighted(bgp, mpinfo->attr,
+ cum_bw, &nh_weight))
+ continue;
+ }
api_nh = &api.nexthops[valid_nh_count];
if (nh_family == AF_INET) {
if (bgp_debug_zebra(&api.prefix)) {
}
memcpy(&api_nh->rmac, &(mpinfo->attr->rmac),
sizeof(struct ethaddr));
+ api_nh->weight = nh_weight;
+
valid_nh_count++;
}
snprintf(eth_buf, sizeof(eth_buf), " RMAC %s",
prefix_mac2str(&api_nh->rmac,
buf1, sizeof(buf1)));
- zlog_debug(" nhop [%d]: %s if %u VRF %u %s %s",
+ zlog_debug(" nhop [%d]: %s if %u VRF %u wt %u %s %s",
i + 1, nh_buf, api_nh->ifindex,
- api_nh->vrf_id, label_buf, eth_buf);
+ api_nh->vrf_id, api_nh->weight,
+ label_buf, eth_buf);
}
}
#include "vxlan.h"
+/* Default weight for next hop, if doing weighted ECMP. */
+#define BGP_ZEBRA_DEFAULT_NHOP_WEIGHT 1
+
extern void bgp_zebra_init(struct thread_master *master,
unsigned short instance);
extern void bgp_zebra_init_tm_connect(struct bgp *bgp);
bgp->rib_stale_time = BGP_DEFAULT_RIB_STALE_TIME;
bgp->dynamic_neighbors_limit = BGP_DYNAMIC_NEIGHBORS_LIMIT_DEFAULT;
bgp->dynamic_neighbors_count = 0;
+ bgp->lb_ref_bw = BGP_LINK_BW_REF_BW;
+ bgp->lb_handling = BGP_LINK_BW_ECMP;
bgp->ebgp_requires_policy = DEFAULT_EBGP_POLICY_DISABLED;
bgp->reject_as_sets = BGP_REJECT_AS_SETS_DISABLED;
bgp_addpath_init_bgp_data(&bgp->tx_addpath);
#define BGP_GR_SUCCESS 0
#define BGP_GR_FAILURE 1
+/* Handling of BGP link bandwidth (LB) on receiver - whether and how to
+ * do weighted ECMP. Note: This applies after multipath computation.
+ */
+enum bgp_link_bw_handling {
+ /* Do ECMP if some paths don't have LB - default */
+ BGP_LINK_BW_ECMP,
+ /* Completely ignore LB, just do regular ECMP */
+ BGP_LINK_BW_IGNORE_BW,
+ /* Skip paths without LB, do wECMP on others */
+ BGP_LINK_BW_SKIP_MISSING,
+ /* Do wECMP with default weight for paths not having LB */
+ BGP_LINK_BW_DEFWT_4_MISSING
+};
+
/* BGP instance structure. */
struct bgp {
/* AS number of this BGP instance. */
#define BGP_UPDATE_DELAY_MIN 0
#define BGP_UPDATE_DELAY_MAX 3600
+ /* Reference bandwidth for BGP link-bandwidth. Used when
+ * the LB value has to be computed based on some other
+ * factor (e.g., number of multipaths for the prefix)
+ * Value is in Mbps
+ */
+ uint32_t lb_ref_bw;
+#define BGP_LINK_BW_REF_BW 1
+
/* BGP flags. */
uint32_t flags;
#define BGP_FLAG_ALWAYS_COMPARE_MED (1 << 0)
/* Count of peers in established state */
uint32_t established_peers;
+ /* Weighted ECMP related config. */
+ enum bgp_link_bw_handling lb_handling;
+
QOBJ_FIELDS
};
DECLARE_QOBJ_TYPE(bgp)
beec.val[1] = ECOMMUNITY_OPAQUE_SUBTYPE_ENCAP;
beec.val[6] = ((TunnelType) >> 8) & 0xff;
beec.val[7] = (TunnelType)&0xff;
- ecommunity_add_val(attr.ecommunity, &beec);
+ ecommunity_add_val(attr.ecommunity, &beec, false, false);
}
/*
ecom_value.val[7] =
(l2o->logical_net_id >> 0) & 0xff;
rtlist = ecommunity_new();
- ecommunity_add_val(rtlist, &ecom_value);
+ ecommunity_add_val(rtlist, &ecom_value,
+ false, false);
}
if (l2o->tag_id) {
as_t as = bgp->as;
ecom_value.val[7] = val & 0xff;
if (rtlist == NULL)
rtlist = ecommunity_new();
- ecommunity_add_val(rtlist, &ecom_value);
+ ecommunity_add_val(rtlist, &ecom_value,
+ false, false);
}
}
eval.val[7] = (lni >> 0) & 0xff;
enew = ecommunity_new();
- ecommunity_add_val(enew, &eval);
+ ecommunity_add_val(enew, &eval, false, false);
it->rt_import_list = enew;
for (afi = AFI_IP; afi < AFI_MAX; ++afi) {
&bpi->attr->mp_nexthop_global_in.s_addr, 4);
roec.val[6] = 0;
roec.val[7] = 0;
- ecommunity_add_val(new, &roec);
+ ecommunity_add_val(new, &roec, false, false);
break;
case AF_INET6:
/* No support for IPv6 addresses in extended communities
new = ecommunity_new();
assert(new);
- ecommunity_add_val(new, &roec);
+ ecommunity_add_val(new, &roec, false, false);
if (!new->size) {
ecommunity_free(&new);
localadmin = htons(hc->resolve_nve_roo_local_admin);
memcpy(vnc_gateway_magic.val + 6, (char *)&localadmin, 2);
- ecommunity_add_val(*ecom, &vnc_gateway_magic);
+ ecommunity_add_val(*ecom, &vnc_gateway_magic, false, false);
}
return 0;
e.g. Ubuntu 14.04.
-* Why has SNMP support been disabled?
-=====================================
-FRR used to link against the NetSNMP libraries to provide SNMP
-support. Those libraries sadly link against the OpenSSL libraries
-to provide crypto support for SNMPv3 among others.
-OpenSSL now is not compatible with the GNU GENERAL PUBLIC LICENSE (GPL)
-licence that FRR is distributed under. For more explanation read:
- http://www.gnome.org/~markmc/openssl-and-the-gpl.html
- http://www.gnu.org/licenses/gpl-faq.html#GPLIncompatibleLibs
-Updating the licence to explicitly allow linking against OpenSSL
-would requite the affirmation of all people that ever contributed
-a significant part to Zebra / Quagga or FRR and thus are the collective
-"copyright holder". That's too much work. Using a shrinked down
-version of NetSNMP without OpenSSL or convincing the NetSNMP people
-to change to GnuTLS are maybe good solutions but not reachable
-during the last days before the Sarge release :-(
-
- *BUT*
-
-It is allowed by the used licence mix that you fetch the sources and
-build FRR yourself with SNMP with
- # apt-get -b source -Ppkg.frr.snmp frr
-Just distributing it in binary form, linked against OpenSSL, is forbidden.
-
-
* Debian Policy compliance notes
================================
python3-sphinx,
python3-pytest <!nocheck>,
texinfo (>= 4.7)
-Standards-Version: 4.2.1
+Standards-Version: 4.4.1
Homepage: https://www.frrouting.org/
-Vcs-Browser: https://github.com/FRRouting/frr/
-Vcs-Git: https://github.com/FRRouting/frr.git
+Vcs-Browser: https://github.com/FRRouting/frr/tree/debian/master
+Vcs-Git: https://github.com/FRRouting/frr.git -b debian/master
Package: frr
Architecture: linux-any
Package: frr-doc
Section: doc
Architecture: all
+Multi-Arch: foreign
Depends:
${misc:Depends},
libjs-jquery,
frr binary: spelling-error-in-binary usr/lib/frr/pimd writen written
frr binary: spelling-error-in-binary usr/lib/frr/pimd iif if
+# prefixed man pages for off-PATH daemons
+manpage-without-executable
+
# personal name
spelling-error-in-copyright Ang And
- Write a topology (Graphviz recommended)
- Obtain configuration files
- Write the test itself
+- Format the new code using `black <https://github.com/psf/black>`_
- Create a Pull Request
Topotest File Hierarchy
inside folders named after the equipment.
- Tests must be able to run without any interaction. To make sure your test
conforms with this, run it without the :option:`-s` parameter.
+- Use `black <https://github.com/psf/black>`_ code formatter before creating
+ a pull request. This ensures we have a unified code style.
Tips:
Enable or disable :rfc:`6232` purge originator identification.
+.. index:: [no] lsp-mtu (128-4352)
+.. clicmd:: [no] lsp-mtu (128-4352)
+
+ Configure the maximum size of generated LSPs, in bytes.
+
+
.. _isis-timer:
ISIS Timer
}
/*
- * XPath: /frr-isisd:isis/instance/lsp/generation-interval
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/generation-interval
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/generation-interval
*/
DEFPY(lsp_gen_interval, lsp_gen_interval_cmd,
"lsp-gen-interval [level-1|level-2]$level (1-120)$val",
"Minimum interval in seconds\n")
{
if (!level || strmatch(level, "level-1"))
- nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-1",
- NB_OP_MODIFY, val_str);
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-1/generation-interval",
+ NB_OP_MODIFY, val_str);
if (!level || strmatch(level, "level-2"))
- nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-2",
- NB_OP_MODIFY, val_str);
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-2/generation-interval",
+ NB_OP_MODIFY, val_str);
return nb_cli_apply_changes(vty, NULL);
}
"Minimum interval in seconds\n")
{
if (!level || strmatch(level, "level-1"))
- nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-1",
- NB_OP_MODIFY, NULL);
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-1/generation-interval",
+ NB_OP_MODIFY, NULL);
if (!level || strmatch(level, "level-2"))
- nb_cli_enqueue_change(vty, "./lsp/generation-interval/level-2",
- NB_OP_MODIFY, NULL);
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-2/generation-interval",
+ NB_OP_MODIFY, NULL);
return nb_cli_apply_changes(vty, NULL);
}
-void cli_show_isis_lsp_gen_interval(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults)
-{
- const char *l1 = yang_dnode_get_string(dnode, "./level-1");
- const char *l2 = yang_dnode_get_string(dnode, "./level-2");
-
- if (strmatch(l1, l2))
- vty_out(vty, " lsp-gen-interval %s\n", l1);
- else {
- vty_out(vty, " lsp-gen-interval level-1 %s\n", l1);
- vty_out(vty, " lsp-gen-interval level-2 %s\n", l2);
- }
-}
-
/*
- * XPath: /frr-isisd:isis/instance/lsp/refresh-interval
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval
*/
DEFPY(lsp_refresh_interval, lsp_refresh_interval_cmd,
"lsp-refresh-interval [level-1|level-2]$level (1-65235)$val",
"LSP refresh interval in seconds\n")
{
if (!level || strmatch(level, "level-1"))
- nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-1",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/refresh-interval",
NB_OP_MODIFY, val_str);
if (!level || strmatch(level, "level-2"))
- nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-2",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/refresh-interval",
NB_OP_MODIFY, val_str);
return nb_cli_apply_changes(vty, NULL);
"LSP refresh interval in seconds\n")
{
if (!level || strmatch(level, "level-1"))
- nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-1",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/refresh-interval",
NB_OP_MODIFY, NULL);
if (!level || strmatch(level, "level-2"))
- nb_cli_enqueue_change(vty, "./lsp/refresh-interval/level-2",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/refresh-interval",
NB_OP_MODIFY, NULL);
return nb_cli_apply_changes(vty, NULL);
}
-void cli_show_isis_lsp_ref_interval(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults)
-{
- const char *l1 = yang_dnode_get_string(dnode, "./level-1");
- const char *l2 = yang_dnode_get_string(dnode, "./level-2");
-
- if (strmatch(l1, l2))
- vty_out(vty, " lsp-refresh-interval %s\n", l1);
- else {
- vty_out(vty, " lsp-refresh-interval level-1 %s\n", l1);
- vty_out(vty, " lsp-refresh-interval level-2 %s\n", l2);
- }
-}
-
/*
- * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime
*/
+
DEFPY(max_lsp_lifetime, max_lsp_lifetime_cmd,
"max-lsp-lifetime [level-1|level-2]$level (350-65535)$val",
"Maximum LSP lifetime\n"
"LSP lifetime in seconds\n")
{
if (!level || strmatch(level, "level-1"))
- nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-1",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/maximum-lifetime",
NB_OP_MODIFY, val_str);
if (!level || strmatch(level, "level-2"))
- nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-2",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/maximum-lifetime",
NB_OP_MODIFY, val_str);
return nb_cli_apply_changes(vty, NULL);
"LSP lifetime in seconds\n")
{
if (!level || strmatch(level, "level-1"))
- nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-1",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/maximum-lifetime",
NB_OP_MODIFY, NULL);
if (!level || strmatch(level, "level-2"))
- nb_cli_enqueue_change(vty, "./lsp/maximum-lifetime/level-2",
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/maximum-lifetime",
NB_OP_MODIFY, NULL);
return nb_cli_apply_changes(vty, NULL);
}
-void cli_show_isis_lsp_max_lifetime(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults)
+/* unified LSP timers command
+ * XPath: /frr-isisd:isis/instance/lsp/timers
+ */
+
+DEFPY(lsp_timers, lsp_timers_cmd,
+ "lsp-timers [level-1|level-2]$level gen-interval (1-120)$gen refresh-interval (1-65235)$refresh max-lifetime (350-65535)$lifetime",
+ "LSP-related timers\n"
+ "LSP-related timers for Level 1 only\n"
+ "LSP-related timers for Level 2 only\n"
+ "Minimum interval between regenerating same LSP\n"
+ "Generation interval in seconds\n"
+ "LSP refresh interval\n"
+ "LSP refresh interval in seconds\n"
+ "Maximum LSP lifetime\n"
+ "Maximum LSP lifetime in seconds\n")
{
- const char *l1 = yang_dnode_get_string(dnode, "./level-1");
- const char *l2 = yang_dnode_get_string(dnode, "./level-2");
+ if (!level || strmatch(level, "level-1")) {
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-1/generation-interval",
+ NB_OP_MODIFY, gen_str);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/refresh-interval",
+ NB_OP_MODIFY, refresh_str);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/maximum-lifetime",
+ NB_OP_MODIFY, lifetime_str);
+ }
+ if (!level || strmatch(level, "level-2")) {
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-2/generation-interval",
+ NB_OP_MODIFY, gen_str);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/refresh-interval",
+ NB_OP_MODIFY, refresh_str);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/maximum-lifetime",
+ NB_OP_MODIFY, lifetime_str);
+ }
- if (strmatch(l1, l2))
- vty_out(vty, " max-lsp-lifetime %s\n", l1);
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+DEFPY(no_lsp_timers, no_lsp_timers_cmd,
+ "no lsp-timers [level-1|level-2]$level [gen-interval (1-120) refresh-interval (1-65235) max-lifetime (350-65535)]",
+ NO_STR
+ "LSP-related timers\n"
+ "LSP-related timers for Level 1 only\n"
+ "LSP-related timers for Level 2 only\n"
+ "Minimum interval between regenerating same LSP\n"
+ "Generation interval in seconds\n"
+ "LSP refresh interval\n"
+ "LSP refresh interval in seconds\n"
+ "Maximum LSP lifetime\n"
+ "Maximum LSP lifetime in seconds\n")
+{
+ if (!level || strmatch(level, "level-1")) {
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-1/generation-interval",
+ NB_OP_MODIFY, NULL);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/refresh-interval",
+ NB_OP_MODIFY, NULL);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-1/maximum-lifetime",
+ NB_OP_MODIFY, NULL);
+ }
+ if (!level || strmatch(level, "level-2")) {
+ nb_cli_enqueue_change(
+ vty, "./lsp/timers/level-2/generation-interval",
+ NB_OP_MODIFY, NULL);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/refresh-interval",
+ NB_OP_MODIFY, NULL);
+ nb_cli_enqueue_change(vty,
+ "./lsp/timers/level-2/maximum-lifetime",
+ NB_OP_MODIFY, NULL);
+ }
+
+ return nb_cli_apply_changes(vty, NULL);
+}
+
+void cli_show_isis_lsp_timers(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults)
+{
+ const char *l1_refresh =
+ yang_dnode_get_string(dnode, "./level-1/refresh-interval");
+ const char *l2_refresh =
+ yang_dnode_get_string(dnode, "./level-2/refresh-interval");
+ const char *l1_lifetime =
+ yang_dnode_get_string(dnode, "./level-1/maximum-lifetime");
+ const char *l2_lifetime =
+ yang_dnode_get_string(dnode, "./level-2/maximum-lifetime");
+ const char *l1_gen =
+ yang_dnode_get_string(dnode, "./level-1/generation-interval");
+ const char *l2_gen =
+ yang_dnode_get_string(dnode, "./level-2/generation-interval");
+ if (strmatch(l1_refresh, l2_refresh)
+ && strmatch(l1_lifetime, l2_lifetime) && strmatch(l1_gen, l2_gen))
+ vty_out(vty,
+ " lsp-timers gen-interval %s refresh-interval %s max-lifetime %s\n",
+ l1_gen, l1_refresh, l1_lifetime);
else {
- vty_out(vty, " max-lsp-lifetime level-1 %s\n", l1);
- vty_out(vty, " max-lsp-lifetime level-2 %s\n", l2);
+ vty_out(vty,
+ " lsp-timers level-1 gen-interval %s refresh-interval %s max-lifetime %s\n",
+ l1_gen, l1_refresh, l1_lifetime);
+ vty_out(vty,
+ " lsp-timers level-2 gen-interval %s refresh-interval %s max-lifetime %s\n",
+ l2_gen, l2_refresh, l2_lifetime);
}
}
install_element(ISIS_NODE, &no_lsp_refresh_interval_cmd);
install_element(ISIS_NODE, &max_lsp_lifetime_cmd);
install_element(ISIS_NODE, &no_max_lsp_lifetime_cmd);
+ install_element(ISIS_NODE, &lsp_timers_cmd);
+ install_element(ISIS_NODE, &no_lsp_timers_cmd);
install_element(ISIS_NODE, &area_lsp_mtu_cmd);
install_element(ISIS_NODE, &no_area_lsp_mtu_cmd);
},
},
{
- .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval",
+ .xpath = "/frr-isisd:isis/instance/lsp/timers",
.cbs = {
- .cli_show = cli_show_isis_lsp_ref_interval,
+ .cli_show = cli_show_isis_lsp_timers,
},
},
{
- .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval/level-1",
+ .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval",
.cbs = {
.modify = isis_instance_lsp_refresh_interval_level_1_modify,
},
},
{
- .xpath = "/frr-isisd:isis/instance/lsp/refresh-interval/level-2",
- .cbs = {
- .modify = isis_instance_lsp_refresh_interval_level_2_modify,
- },
- },
- {
- .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime",
- .cbs = {
- .cli_show = cli_show_isis_lsp_max_lifetime,
- },
- },
- {
- .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-1",
+ .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime",
.cbs = {
.modify = isis_instance_lsp_maximum_lifetime_level_1_modify,
},
},
{
- .xpath = "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-2",
+ .xpath = "/frr-isisd:isis/instance/lsp/timers/level-1/generation-interval",
.cbs = {
- .modify = isis_instance_lsp_maximum_lifetime_level_2_modify,
+ .modify = isis_instance_lsp_generation_interval_level_1_modify,
},
},
{
- .xpath = "/frr-isisd:isis/instance/lsp/generation-interval",
+ .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval",
.cbs = {
- .cli_show = cli_show_isis_lsp_gen_interval,
+ .modify = isis_instance_lsp_refresh_interval_level_2_modify,
},
},
{
- .xpath = "/frr-isisd:isis/instance/lsp/generation-interval/level-1",
+ .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime",
.cbs = {
- .modify = isis_instance_lsp_generation_interval_level_1_modify,
+ .modify = isis_instance_lsp_maximum_lifetime_level_2_modify,
},
},
{
- .xpath = "/frr-isisd:isis/instance/lsp/generation-interval/level-2",
+ .xpath = "/frr-isisd:isis/instance/lsp/timers/level-2/generation-interval",
.cbs = {
.modify = isis_instance_lsp_generation_interval_level_2_modify,
},
bool show_defaults);
void cli_show_isis_domain_pwd(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
-void cli_show_isis_lsp_gen_interval(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults);
-void cli_show_isis_lsp_ref_interval(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults);
-void cli_show_isis_lsp_max_lifetime(struct vty *vty, struct lyd_node *dnode,
- bool show_defaults);
+void cli_show_isis_lsp_timers(struct vty *vty, struct lyd_node *dnode,
+ bool show_defaults);
void cli_show_isis_lsp_mtu(struct vty *vty, struct lyd_node *dnode,
bool show_defaults);
void cli_show_isis_spf_min_interval(struct vty *vty, struct lyd_node *dnode,
}
/*
- * XPath: /frr-isisd:isis/instance/lsp/refresh-interval/level-1
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval
*/
int isis_instance_lsp_refresh_interval_level_1_modify(
enum nb_event event, const struct lyd_node *dnode,
}
/*
- * XPath: /frr-isisd:isis/instance/lsp/refresh-interval/level-2
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval
*/
int isis_instance_lsp_refresh_interval_level_2_modify(
enum nb_event event, const struct lyd_node *dnode,
}
/*
- * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime/level-1
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime
*/
int isis_instance_lsp_maximum_lifetime_level_1_modify(
enum nb_event event, const struct lyd_node *dnode,
}
/*
- * XPath: /frr-isisd:isis/instance/lsp/maximum-lifetime/level-2
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime
*/
int isis_instance_lsp_maximum_lifetime_level_2_modify(
enum nb_event event, const struct lyd_node *dnode,
}
/*
- * XPath: /frr-isisd:isis/instance/lsp/generation-interval/level-1
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-1/generation-interval
*/
int isis_instance_lsp_generation_interval_level_1_modify(
enum nb_event event, const struct lyd_node *dnode,
}
/*
- * XPath: /frr-isisd:isis/instance/lsp/generation-interval/level-2
+ * XPath: /frr-isisd:isis/instance/lsp/timers/level-2/generation-interval
*/
int isis_instance_lsp_generation_interval_level_2_modify(
enum nb_event event, const struct lyd_node *dnode,
enum isis_metric_style default_style;
area->max_lsp_lifetime[0] = yang_get_default_uint16(
- "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-1");
+ "/frr-isisd:isis/instance/lsp/timers/level-1/maximum-lifetime");
area->max_lsp_lifetime[1] = yang_get_default_uint16(
- "/frr-isisd:isis/instance/lsp/maximum-lifetime/level-2");
+ "/frr-isisd:isis/instance/lsp/timers/level-2/maximum-lifetime");
area->lsp_refresh[0] = yang_get_default_uint16(
- "/frr-isisd:isis/instance/lsp/refresh-interval/level-1");
+ "/frr-isisd:isis/instance/lsp/timers/level-1/refresh-interval");
area->lsp_refresh[1] = yang_get_default_uint16(
- "/frr-isisd:isis/instance/lsp/refresh-interval/level-2");
+ "/frr-isisd:isis/instance/lsp/timers/level-2/refresh-interval");
area->lsp_gen_interval[0] = yang_get_default_uint16(
- "/frr-isisd:isis/instance/lsp/generation-interval/level-1");
+ "/frr-isisd:isis/instance/lsp/timers/level-1/generation-interval");
area->lsp_gen_interval[1] = yang_get_default_uint16(
- "/frr-isisd:isis/instance/lsp/generation-interval/level-2");
+ "/frr-isisd:isis/instance/lsp/timers/level-2/generation-interval");
area->min_spf_interval[0] = yang_get_default_uint16(
"/frr-isisd:isis/instance/spf/minimum-interval/level-1");
area->min_spf_interval[1] = yang_get_default_uint16(
vty_init(master, true);
lib_cmd_init();
- yang_init();
+ yang_init(true);
nb_init(master, NULL, 0);
vty_stdio(vty_do_exit);
/* clang-format off */
-#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__)
-/* gcc versions before 5.x miscalculate the size for structs with variable
- * length arrays (they just count it as size 0)
- */
-struct frr_yang_module_info_size3 {
- /* YANG module name. */
- const char *name;
-
- /* Northbound callbacks. */
- const struct {
- /* Data path of this YANG node. */
- const char *xpath;
-
- /* Callbacks implemented for this node. */
- struct nb_callbacks cbs;
-
- /* Priority - lower priorities are processed first. */
- uint32_t priority;
- } nodes[3];
-};
-
-const struct frr_yang_module_info_size3 frr_interface_info_size3 asm("frr_interface_info") = {
-#else
const struct frr_yang_module_info frr_interface_info = {
-#endif
.name = "frr-interface",
.nodes = {
{
log_ref_vty_init();
lib_error_init();
- yang_init();
+ yang_init(true);
debug_init_cli();
return NULL;
}
+/*
+ * Helper that locates a nexthop in an nhg config list. Note that
+ * this uses a specific matching / equality rule that's different from
+ * the complete match performed by 'nexthop_same()'.
+ */
+static struct nexthop *nhg_nh_find(const struct nexthop_group *nhg,
+ const struct nexthop *nh)
+{
+ struct nexthop *nexthop;
+ int ret;
+
+ /* We compare: vrf, gateway, and interface */
+
+ for (nexthop = nhg->nexthop; nexthop; nexthop = nexthop->next) {
+
+ /* Compare vrf and type */
+ if (nexthop->vrf_id != nh->vrf_id)
+ continue;
+ if (nexthop->type != nh->type)
+ continue;
+
+ /* Compare gateway */
+ switch (nexthop->type) {
+ case NEXTHOP_TYPE_IPV4:
+ case NEXTHOP_TYPE_IPV6:
+ ret = nexthop_g_addr_cmp(nexthop->type,
+ &nexthop->gate, &nh->gate);
+ if (ret != 0)
+ continue;
+ break;
+ case NEXTHOP_TYPE_IPV4_IFINDEX:
+ case NEXTHOP_TYPE_IPV6_IFINDEX:
+ ret = nexthop_g_addr_cmp(nexthop->type,
+ &nexthop->gate, &nh->gate);
+ if (ret != 0)
+ continue;
+ /* Intentional Fall-Through */
+ case NEXTHOP_TYPE_IFINDEX:
+ if (nexthop->ifindex != nh->ifindex)
+ continue;
+ break;
+ case NEXTHOP_TYPE_BLACKHOLE:
+ if (nexthop->bh_type != nh->bh_type)
+ continue;
+ break;
+ }
+
+ return nexthop;
+ }
+
+ return NULL;
+}
+
static bool
nexthop_group_equal_common(const struct nexthop_group *nhg1,
const struct nexthop_group *nhg2,
nh->next = NULL;
}
+/* Unlink a nexthop from the list it's on, unconditionally */
+static void nexthop_unlink(struct nexthop_group *nhg, struct nexthop *nexthop)
+{
+
+ if (nexthop->prev)
+ nexthop->prev->next = nexthop->next;
+ else {
+ assert(nhg->nexthop == nexthop);
+ assert(nexthop->prev == NULL);
+ nhg->nexthop = nexthop->next;
+ }
+
+ if (nexthop->next)
+ nexthop->next->prev = nexthop->prev;
+
+ nexthop->prev = NULL;
+ nexthop->next = NULL;
+}
+
/*
* Copy a list of nexthops in 'nh' to an nhg, enforcing canonical sort order
*/
listnode_add_sort(nhgc->nhg_list, nh);
}
+/*
+ * Remove config info about a nexthop from group 'nhgc'. Note that we
+ * use only a subset of the available attributes here to determine
+ * a 'match'.
+ * Note that this doesn't change the list of nexthops, only the config
+ * information.
+ */
static void nexthop_group_unsave_nhop(struct nexthop_group_cmd *nhgc,
const char *nhvrf_name,
const union sockunion *addr,
- const char *intf, const char *labels,
- const uint32_t weight)
+ const char *intf)
{
struct nexthop_hold *nh;
struct listnode *node;
for (ALL_LIST_ELEMENTS_RO(nhgc->nhg_list, node, nh)) {
if (nhgc_cmp_helper(nhvrf_name, nh->nhvrf_name) == 0
&& nhgc_addr_cmp_helper(addr, nh->addr) == 0
- && nhgc_cmp_helper(intf, nh->intf) == 0
- && nhgc_cmp_helper(labels, nh->labels) == 0
- && weight == nh->weight)
+ && nhgc_cmp_helper(intf, nh->intf) == 0)
break;
}
int lbl_ret = 0;
bool legal;
int backup_idx = idx;
- bool add_update = false;
+ bool yes = !no;
if (bi_str == NULL)
backup_idx = NHH_BACKUP_IDX_INVALID;
return CMD_WARNING_CONFIG_FAILED;
}
- nh = nexthop_exists(&nhgc->nhg, &nhop);
+ /* Look for an existing nexthop in the config. Note that the test
+ * here tests only some attributes - it's not a complete comparison.
+ * Note that we've got two kinds of objects to manage: 'nexthop_hold'
+ * that represent config that may or may not be valid (yet), and
+ * actual nexthops that have been validated and parsed.
+ */
+ nh = nhg_nh_find(&nhgc->nhg, &nhop);
- if (no || nh) {
- /* Remove or replace cases */
+ /* Always attempt to remove old config info. */
+ nexthop_group_unsave_nhop(nhgc, vrf_name, addr, intf);
- /* Remove existing config */
- nexthop_group_unsave_nhop(nhgc, vrf_name, addr, intf, label,
- weight);
- if (nh) {
- /* Remove nexthop object */
- _nexthop_del(&nhgc->nhg, nh);
+ /* Remove any existing nexthop, for delete and replace cases. */
+ if (nh) {
+ nexthop_unlink(&nhgc->nhg, nh);
- if (nhg_hooks.del_nexthop)
- nhg_hooks.del_nexthop(nhgc, nh);
+ if (nhg_hooks.del_nexthop)
+ nhg_hooks.del_nexthop(nhgc, nh);
- nexthop_free(nh);
- nh = NULL;
- }
+ nexthop_free(nh);
}
-
- add_update = !no;
-
- if (add_update) {
- /* Add or replace cases */
-
- /* If valid config, add nexthop object */
+ if (yes) {
+ /* Add/replace case: capture nexthop if valid, and capture
+ * config info always.
+ */
if (legal) {
nh = nexthop_new();
struct nb_node *nb_node;
uint32_t priority;
+ if (i > YANG_MODULE_MAX_NODES) {
+ zlog_err(
+ "%s: %s.yang has more than %u nodes. Please increase YANG_MODULE_MAX_NODES to fix this problem.",
+ __func__, module->name, YANG_MODULE_MAX_NODES);
+ exit(1);
+ }
+
nb_node = nb_node_find(module->nodes[i].xpath);
if (!nb_node) {
flog_warn(EC_LIB_YANG_UNKNOWN_DATA_PATH,
/* The YANG list doesn't contain key leafs. */
#define F_NB_NODE_KEYLESS_LIST 0x02
+/*
+ * HACK: old gcc versions (< 5.x) have a bug that prevents C99 flexible arrays
+ * from working properly on shared libraries. For those compilers, use a fixed
+ * size array to work around the problem.
+ */
+#define YANG_MODULE_MAX_NODES 1024
+
struct frr_yang_module_info {
/* YANG module name. */
const char *name;
/* Priority - lower priorities are processed first. */
uint32_t priority;
+#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__)
+ } nodes[YANG_MODULE_MAX_NODES + 1];
+#else
} nodes[];
+#endif
};
/* Northbound error codes. */
}
/* clang-format off */
-#if defined(__GNUC__) && ((__GNUC__ - 0) < 5) && !defined(__clang__)
-/*
- * gcc versions before 5.x miscalculate the size for structs with variable
- * length arrays (they just count it as size 0)
- */
-struct frr_yang_module_info_sizen {
- /* YANG module name. */
- const char *name;
-
- /* Northbound callbacks. */
- const struct {
- /* Data path of this YANG node. */
- const char *xpath;
-
- /* Callbacks implemented for this node. */
- struct nb_callbacks cbs;
-
- /* Priority - lower priorities are processed first. */
- uint32_t priority;
- } nodes[28];
-};
-
-const struct frr_yang_module_info_sizen frr_route_map_info_sizen asm("frr_route_map_info") = {
-#else
const struct frr_yang_module_info frr_route_map_info = {
-#endif
.name = "frr-route-map",
.nodes = {
{
}
}
-struct ly_ctx *yang_ctx_new_setup(void)
+struct ly_ctx *yang_ctx_new_setup(bool embedded_modules)
{
struct ly_ctx *ctx;
const char *yang_models_path = YANG_MODELS_PATH;
ctx = ly_ctx_new(yang_models_path, LY_CTX_DISABLE_SEARCHDIR_CWD);
if (!ctx)
return NULL;
- ly_ctx_set_module_imp_clb(ctx, yang_module_imp_clb, NULL);
+
+ if (embedded_modules)
+ ly_ctx_set_module_imp_clb(ctx, yang_module_imp_clb, NULL);
+
return ctx;
}
-void yang_init(void)
+void yang_init(bool embedded_modules)
{
/* Initialize libyang global parameters that affect all containers. */
ly_set_log_clb(ly_log_cb, 1);
ly_log_options(LY_LOLOG | LY_LOSTORE);
/* Initialize libyang container for native models. */
- ly_native_ctx = yang_ctx_new_setup();
+ ly_native_ctx = yang_ctx_new_setup(embedded_modules);
if (!ly_native_ctx) {
flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__);
exit(1);
/*
* Create and set up a libyang context (for use by the translator)
+ *
+ * embedded_modules
+ * Specify whether libyang should attempt to look for embedded YANG modules.
*/
-extern struct ly_ctx *yang_ctx_new_setup(void);
+extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules);
/*
* Enable or disable libyang verbose debugging.
/*
* Initialize the YANG subsystem. Should be called only once during the
* daemon initialization process.
+ *
+ * embedded_modules
+ * Specify whether libyang should attempt to look for embedded YANG modules.
*/
-extern void yang_init(void);
+extern void yang_init(bool embedded_modules);
/*
* Finish the YANG subsystem gracefully. Should be called only when the daemon
RB_INSERT(yang_translators, &yang_translators, translator);
/* Initialize the translator libyang context. */
- translator->ly_ctx = yang_ctx_new_setup();
+ translator->ly_ctx = yang_ctx_new_setup(false);
if (!translator->ly_ctx) {
flog_warn(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__);
goto error;
void yang_translator_init(void)
{
- ly_translator_ctx = yang_ctx_new_setup();
+ ly_translator_ctx = yang_ctx_new_setup(true);
if (!ly_translator_ctx) {
flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__);
exit(1);
bshdr = (struct bsm_hdr *)(buf + PIM_MSG_HEADER_LEN);
pim_inet4_dump("<bsr?>", bshdr->bsr_addr.addr, bsr_str,
sizeof(bsr_str));
+ if (bshdr->hm_len > 32) {
+ zlog_warn("Bad hashmask length for IPv4; got %" PRIu8
+ ", expected value in range 0-32",
+ bshdr->hm_len);
+ pim->bsm_dropped++;
+ return -1;
+ }
pim->global_scope.hashMasklen = bshdr->hm_len;
frag_tag = ntohs(bshdr->frag_tag);
if (uj) {
json = json_object_new_object();
- json_object_int_add(json, "Number of Received BSMs",
- pim->bsm_rcvd);
- json_object_int_add(json, "Number of Forwared BSMs",
- pim->bsm_sent);
- json_object_int_add(json, "Number of Dropped BSMs",
- pim->bsm_dropped);
+ json_object_int_add(json, "bsmRx", pim->bsm_rcvd);
+ json_object_int_add(json, "bsmTx", pim->bsm_sent);
+ json_object_int_add(json, "bsmDropped", pim->bsm_dropped);
} else {
vty_out(vty, "BSM Statistics :\n");
vty_out(vty, "----------------\n");
json_row = json_object_new_object();
json_object_string_add(json_row, "If Name", ifp->name);
+ json_object_int_add(json_row, "bsmDroppedConfig",
+ pim_ifp->pim_ifstat_bsm_cfg_miss);
json_object_int_add(
- json_row,
- "Number of BSMs dropped due to config miss",
- pim_ifp->pim_ifstat_bsm_cfg_miss);
- json_object_int_add(
- json_row, "Number of unicast BSMs dropped",
+ json_row, "bsmDroppedUnicast",
pim_ifp->pim_ifstat_ucast_bsm_cfg_miss);
json_object_int_add(json_row,
- "Number of BSMs dropped due to invalid scope zone",
+ "bsmDroppedInvalidScopeZone",
pim_ifp->pim_ifstat_bsm_invalid_sz);
json_object_object_add(json, ifp->name, json_row);
}
json_object_string_add(json, "bsr", bsr_str);
json_object_int_add(json, "priority",
pim->global_scope.current_bsr_prio);
- json_object_int_add(json, "fragment_tag",
+ json_object_int_add(json, "fragmentTag",
pim->global_scope.bsm_frag_tag);
json_object_string_add(json, "state", bsr_state);
json_object_string_add(json, "upTime", uptime);
- json_object_string_add(json, "last_bsm_seen", last_bsm_seen);
+ json_object_string_add(json, "lastBsmSeen", last_bsm_seen);
}
else {
#define PIM_OPTION_UNSET(options, option_mask) ((options) &= ~(option_mask))
#define PIM_OPTION_IS_SET(options, option_mask) ((options) & (option_mask))
-#define PIM_TLV_GET_UINT16(buf) ntohs(*(const uint16_t *)(buf))
-#define PIM_TLV_GET_UINT32(buf) ntohl(*(const uint32_t *)(buf))
+#define PIM_TLV_GET_UINT16(buf) \
+ ({ \
+ uint16_t _tmp; \
+ memcpy(&_tmp, (buf), sizeof(uint16_t)); \
+ ntohs(_tmp); \
+ })
+#define PIM_TLV_GET_UINT32(buf) \
+ ({ \
+ uint32_t _tmp; \
+ memcpy(&_tmp, (buf), sizeof(uint32_t)); \
+ ntohl(_tmp); \
+ })
#define PIM_TLV_GET_TYPE(buf) PIM_TLV_GET_UINT16(buf)
#define PIM_TLV_GET_LENGTH(buf) PIM_TLV_GET_UINT16(buf)
#define PIM_TLV_GET_HOLDTIME(buf) PIM_TLV_GET_UINT16(buf)
zprivs_init(&bgpd_privs);
master = thread_master_create(NULL);
- yang_init();
+ yang_init(true);
nb_init(master, NULL, 0);
bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE);
bgp_option_set(BGP_OPT_NO_LISTEN);
cmd_init(1);
vty_init(master, false);
lib_cmd_init();
- yang_init();
+ yang_init(true);
nb_init(master, NULL, 0);
/* OSPF vty inits. */
vty_init(master, false);
lib_cmd_init();
- yang_init();
+ yang_init(true);
nb_init(master, NULL, 0);
test_init(argc, argv);
struct cmd_element *cmd;
cmd_init(1);
- yang_init();
+ yang_init(true);
nb_init(master, NULL, 0);
install_node(&bgp_node, NULL);
cmd_hostname_set("test");
vty_init(master, false);
lib_cmd_init();
- yang_init();
+ yang_init(true);
nb_init(master, modules, array_size(modules));
/* Create artificial data. */
C>* 192.168.7.0/26 is directly connected, r1-eth7, XX:XX:XX
C>* 192.168.8.0/26 is directly connected, r1-eth8, XX:XX:XX
C>* 192.168.9.0/26 is directly connected, r1-eth9, XX:XX:XX
-O 192.168.0.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX
-O 192.168.3.0/26 [110/10] is directly connected, r1-eth3, XX:XX:XX
-S>* 1.1.1.1/32 [1/0] is directly connected, r1-eth0, XX:XX:XX
-S>* 1.1.1.2/32 [1/0] is directly connected, r1-eth1, XX:XX:XX
-S>* 4.5.6.10/32 [1/0] via 192.168.0.2, r1-eth0, XX:XX:XX
-S>* 4.5.6.11/32 [1/0] via 192.168.0.2, r1-eth0, XX:XX:XX
-S>* 4.5.6.12/32 [1/0] is directly connected, r1-eth0, XX:XX:XX
-S>* 4.5.6.13/32 [1/0] unreachable (blackhole), XX:XX:XX
-S>* 4.5.6.14/32 [1/0] unreachable (blackhole), XX:XX:XX
-S 4.5.6.15/32 [255/0] via 192.168.0.2, r1-eth0, XX:XX:XX
-S>* 4.5.6.7/32 [1/0] unreachable (blackhole), XX:XX:XX
-S>* 4.5.6.8/32 [1/0] unreachable (blackhole), XX:XX:XX
-S>* 4.5.6.9/32 [1/0] unreachable (ICMP unreachable), XX:XX:XX
+O 192.168.0.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
+O 192.168.3.0/26 [110/10] is directly connected, r1-eth3, weight 1, XX:XX:XX
+S>* 1.1.1.1/32 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX
+S>* 1.1.1.2/32 [1/0] is directly connected, r1-eth1, weight 1, XX:XX:XX
+S>* 4.5.6.10/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX
+S>* 4.5.6.11/32 [1/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX
+S>* 4.5.6.12/32 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX
+S>* 4.5.6.13/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX
+S>* 4.5.6.14/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX
+S 4.5.6.15/32 [255/0] via 192.168.0.2, r1-eth0, weight 1, XX:XX:XX
+S>* 4.5.6.7/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX
+S>* 4.5.6.8/32 [1/0] unreachable (blackhole), weight 1, XX:XX:XX
+S>* 4.5.6.9/32 [1/0] unreachable (ICMP unreachable), weight 1, XX:XX:XX
C * fe80::/64 is directly connected, r1-eth7, XX:XX:XX
C * fe80::/64 is directly connected, r1-eth8, XX:XX:XX
C * fe80::/64 is directly connected, r1-eth9, XX:XX:XX
-O fc00:0:0:4::/64 [110/10] is directly connected, r1-eth4, XX:XX:XX
-S>* 4:5::6:10/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX
-S>* 4:5::6:11/128 [1/0] via fc00::2, r1-eth0, XX:XX:XX
-S>* 4:5::6:12/128 [1/0] is directly connected, r1-eth0, XX:XX:XX
-S 4:5::6:15/128 [255/0] via fc00::2, r1-eth0, XX:XX:XX
-S>* 4:5::6:7/128 [1/0] unreachable (blackhole), XX:XX:XX
-S>* 4:5::6:8/128 [1/0] unreachable (blackhole), XX:XX:XX
-S>* 4:5::6:9/128 [1/0] unreachable (ICMP unreachable), XX:XX:XX
+O fc00:0:0:4::/64 [110/10] is directly connected, r1-eth4, weight 1, XX:XX:XX
+S>* 4:5::6:10/128 [1/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX
+S>* 4:5::6:11/128 [1/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX
+S>* 4:5::6:12/128 [1/0] is directly connected, r1-eth0, weight 1, XX:XX:XX
+S 4:5::6:15/128 [255/0] via fc00::2, r1-eth0, weight 1, XX:XX:XX
+S>* 4:5::6:7/128 [1/0] unreachable (blackhole), weight 1, XX:XX:XX
+S>* 4:5::6:8/128 [1/0] unreachable (blackhole), weight 1, XX:XX:XX
+S>* 4:5::6:9/128 [1/0] unreachable (ICMP unreachable), weight 1, XX:XX:XX
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class BFDTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 4 routers.
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
"Sets up the pytest environment"
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname)),
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)),
)
router.load_config(
- TopoRouter.RD_BFD,
- os.path.join(CWD, '{}/bfdd.conf'.format(rname))
+ TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
# daemon exists.
for router in router_list.values():
# Check for Version
- if router.has_version('<', '5.1'):
- tgen.set_error('Unsupported FRR version')
+ if router.has_version("<", "5.1"):
+ tgen.set_error("Unsupported FRR version")
break
+
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
# Check IPv6 routing tables.
logger.info("Checking IPv6 routes for convergence")
for router in tgen.routers().values():
- if router.name == 'r2':
+ if router.name == "r2":
continue
- json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name)
+ json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ipv6 route json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=40,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ipv6 route json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bfd peers to go up')
+ logger.info("waiting for bfd peers to go up")
for router in tgen.routers().values():
- if router.name == 'r2':
+ if router.name == "r2":
continue
- json_file = '{}/{}/peers.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=32, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
+
def test_bfd_loss_intermediate():
"""
Assert that BFD notices the bfd link down failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('removing IPv6 address from r2 to simulate loss of connectivity')
+ logger.info("removing IPv6 address from r2 to simulate loss of connectivity")
# Disable r2-eth0 ipv6 address
- cmd = 'vtysh -c \"configure terminal\" -c \"interface r2-eth1\" -c "no ipv6 address 2001:db8:4::2/64\"'
- tgen.net['r2'].cmd(cmd)
-
+ cmd = 'vtysh -c "configure terminal" -c "interface r2-eth1" -c "no ipv6 address 2001:db8:4::2/64"'
+ tgen.net["r2"].cmd(cmd)
+
# Wait the minimum time we can before checking that BGP/BFD
# converged.
- logger.info('waiting for BFD converge down')
+ logger.info("waiting for BFD converge down")
# Check that BGP converged quickly.
for router in tgen.routers().values():
- if router.name == 'r2':
+ if router.name == "r2":
continue
- json_file = '{}/{}/peers_down.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers_down.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=32, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
- logger.info('waiting for BGP entries to become stale')
+ logger.info("waiting for BGP entries to become stale")
for router in tgen.routers().values():
- if router.name == 'r2':
+ if router.name == "r2":
continue
- json_file = '{}/{}/bgp_ipv6_routes_down.json'.format(CWD, router.name)
+ json_file = "{}/{}/bgp_ipv6_routes_down.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show bgp ipv6 json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bgp ipv6 json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=50, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
logger.info("Checking IPv6 routes on r1 should still be present")
for router in tgen.routers().values():
- if router.name == 'r2':
+ if router.name == "r2":
continue
- if router.name == 'r3':
+ if router.name == "r3":
continue
- json_file = '{}/r1/ipv6_routes.json'.format(CWD)
+ json_file = "{}/r1/ipv6_routes.json".format(CWD)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ipv6 route json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=30,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ipv6 route json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
+
def test_bfd_comes_back_again():
"""
Assert that BFD notices the bfd link up
and that ipv6 entries appear back
"""
tgen = get_topogen()
- logger.info('re-adding IPv6 address from r2 to simulate connectivity is back')
+ logger.info("re-adding IPv6 address from r2 to simulate connectivity is back")
# adds back r2-eth0 ipv6 address
- cmd = 'vtysh -c \"configure terminal\" -c \"interface r2-eth1\" -c "ipv6 address 2001:db8:4::2/64\"'
- tgen.net['r2'].cmd(cmd)
+ cmd = 'vtysh -c "configure terminal" -c "interface r2-eth1" -c "ipv6 address 2001:db8:4::2/64"'
+ tgen.net["r2"].cmd(cmd)
# Wait the minimum time we can before checking that BGP/BFD
# converged.
- logger.info('waiting for BFD to converge up')
+ logger.info("waiting for BFD to converge up")
# Check that BGP converged quickly.
for router in tgen.routers().values():
- if router.name == 'r2':
+ if router.name == "r2":
continue
- json_file = '{}/{}/peers.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=16, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
-
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class BFDTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 4 routers
for routern in range(1, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BFD,
- os.path.join(CWD, '{}/bfdd.conf'.format(rname))
+ TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
# daemon exists.
for router in router_list.values():
# Check for Version
- if router.has_version('<', '5.1'):
- tgen.set_error('Unsupported FRR version')
+ if router.has_version("<", "5.1"):
+ tgen.set_error("Unsupported FRR version")
break
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bfd peers to go up')
+ logger.info("waiting for bfd peers to go up")
for router in tgen.routers().values():
- json_file = '{}/{}/peers.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bgp peers to go up')
+ logger.info("waiting for bgp peers to go up")
for router in tgen.routers().values():
- ref_file = '{}/{}/bgp_summary.json'.format(CWD, router.name)
+ ref_file = "{}/{}/bgp_summary.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip bgp summary json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip bgp summary json", expected
+ )
_, res = topotest.run_and_expect(test_func, None, count=125, wait=1.0)
- assertmsg = '{}: bgp did not converge'.format(router.name)
+ assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bgp peers converge')
+ logger.info("waiting for bgp peers converge")
for router in tgen.routers().values():
- ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name)
+ ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip bgp json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip bgp json", expected
+ )
_, res = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
- assertmsg = '{}: bgp did not converge'.format(router.name)
+ assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
pytest.skip(tgen.errors)
# Disable r1-eth0 link.
- tgen.gears['r1'].link_enable('r1-eth0', enabled=False)
+ tgen.gears["r1"].link_enable("r1-eth0", enabled=False)
# Wait the minimum time we can before checking that BGP/BFD
# converged.
- logger.info('waiting for BFD converge')
+ logger.info("waiting for BFD converge")
# Check that BGP converged quickly.
for router in tgen.routers().values():
- json_file = '{}/{}/peers.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
# Load the same file as previous test, but expect R1 to be down.
- if router.name == 'r1':
+ if router.name == "r1":
for peer in expected:
- if peer['peer'] == '192.168.0.2':
- peer['status'] = 'down'
+ if peer["peer"] == "192.168.0.2":
+ peer["status"] = "down"
else:
for peer in expected:
- if peer['peer'] == '192.168.0.1':
- peer['status'] = 'down'
+ if peer["peer"] == "192.168.0.1":
+ peer["status"] = "down"
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, res = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert res is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for BGP re convergence')
+ logger.info("waiting for BGP re convergence")
# Check that BGP converged quickly.
for router in tgen.routers().values():
- ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name)
+ ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
# Load the same file as previous test, but set networks to None
# to test absence.
- if router.name == 'r1':
- expected['routes']['10.254.254.2/32'] = None
- expected['routes']['10.254.254.3/32'] = None
- expected['routes']['10.254.254.4/32'] = None
+ if router.name == "r1":
+ expected["routes"]["10.254.254.2/32"] = None
+ expected["routes"]["10.254.254.3/32"] = None
+ expected["routes"]["10.254.254.4/32"] = None
else:
- expected['routes']['10.254.254.1/32'] = None
-
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip bgp json', expected)
- _, res = topotest.run_and_expect(
- test_func,
- None,
- count=3,
- wait=1
+ expected["routes"]["10.254.254.1/32"] = None
+
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip bgp json", expected
)
- assertmsg = '{}: bgp did not converge'.format(router.name)
+ _, res = topotest.run_and_expect(test_func, None, count=3, wait=1)
+ assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
class BFDTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 4 routers.
for routern in range(1, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BFD,
- os.path.join(CWD, '{}/bfdd.conf'.format(rname))
+ TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_OSPF,
- os.path.join(CWD, '{}/ospfd.conf'.format(rname))
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_OSPF6,
- os.path.join(CWD, '{}/ospf6d.conf'.format(rname))
+ TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname))
)
# Initialize all routers.
# daemon exists.
for router in router_list.values():
# Check for Version
- if router.has_version('<', '5.1'):
- tgen.set_error('Unsupported FRR version')
+ if router.has_version("<", "5.1"):
+ tgen.set_error("Unsupported FRR version")
break
# Check IPv4 routing tables.
logger.info("Checking IPv4 routes for convergence")
for router in tgen.routers().values():
- json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name)
+ json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip route json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=160,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ip route json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
# Check IPv6 routing tables.
logger.info("Checking IPv6 routes for convergence")
for router in tgen.routers().values():
- json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name)
+ json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ipv6 route json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=160,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show ipv6 route json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bfd peers to go up')
+ logger.info("waiting for bfd peers to go up")
for router in tgen.routers().values():
- json_file = '{}/{}/peers.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class BFDTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 4 routers
for routern in range(1, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r4"])
def setup_module(mod):
# check for zebra capability
for rname, router in router_list.iteritems():
- if router.check_capability(
- TopoRouter.RD_ZEBRA,
- '--vrfwnetns'
- ) == False:
- return pytest.skip('Skipping BFD Topo1 VRF NETNS feature. VRF NETNS backend not available on FRR')
-
- if os.system('ip netns list') != 0:
- return pytest.skip('Skipping BFD Topo1 VRF NETNS Test. NETNS not available on System')
-
- logger.info('Testing with VRF Namespace support')
-
- cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi',
- 'ip netns add {0}-cust1',
- 'ip link set dev {0}-eth0 netns {0}-cust1',
- 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up']
- cmds2 = ['ip link set dev {0}-eth1 netns {0}-cust1',
- 'ip netns exec {0}-cust1 ifconfig {0}-eth1 up',
- 'ip link set dev {0}-eth2 netns {0}-cust1',
- 'ip netns exec {0}-cust1 ifconfig {0}-eth2 up']
+ if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False:
+ return pytest.skip(
+ "Skipping BFD Topo1 VRF NETNS feature. VRF NETNS backend not available on FRR"
+ )
+
+ if os.system("ip netns list") != 0:
+ return pytest.skip(
+ "Skipping BFD Topo1 VRF NETNS Test. NETNS not available on System"
+ )
+
+ logger.info("Testing with VRF Namespace support")
+
+ cmds = [
+ "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi",
+ "ip netns add {0}-cust1",
+ "ip link set dev {0}-eth0 netns {0}-cust1",
+ "ip netns exec {0}-cust1 ifconfig {0}-eth0 up",
+ ]
+ cmds2 = [
+ "ip link set dev {0}-eth1 netns {0}-cust1",
+ "ip netns exec {0}-cust1 ifconfig {0}-eth1 up",
+ "ip link set dev {0}-eth2 netns {0}-cust1",
+ "ip netns exec {0}-cust1 ifconfig {0}-eth2 up",
+ ]
for rname, router in router_list.iteritems():
# create VRF rx-cust1 and link rx-eth0 to rx-cust1
for cmd in cmds:
output = tgen.net[rname].cmd(cmd.format(rname))
- if rname == 'r2':
+ if rname == "r2":
for cmd in cmds2:
output = tgen.net[rname].cmd(cmd.format(rname))
for rname, router in router_list.iteritems():
router.load_config(
TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname)),
- '--vrfwnetns'
+ os.path.join(CWD, "{}/zebra.conf".format(rname)),
+ "--vrfwnetns",
)
router.load_config(
- TopoRouter.RD_BFD,
- os.path.join(CWD, '{}/bfdd.conf'.format(rname))
+ TopoRouter.RD_BFD, os.path.join(CWD, "{}/bfdd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
# daemon exists.
for router in router_list.values():
# Check for Version
- if router.has_version('<', '5.1'):
- tgen.set_error('Unsupported FRR version')
+ if router.has_version("<", "5.1"):
+ tgen.set_error("Unsupported FRR version")
break
+
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
# move back rx-eth0 to default VRF
# delete rx-vrf
- cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1',
- 'ip netns delete {0}-cust1']
- cmds2 = ['ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1',
- 'ip netns exec {0}-cust2 ip link set {0}-eth1 netns 1']
+ cmds = [
+ "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1",
+ "ip netns delete {0}-cust1",
+ ]
+ cmds2 = [
+ "ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1",
+ "ip netns exec {0}-cust2 ip link set {0}-eth1 netns 1",
+ ]
router_list = tgen.routers()
for rname, router in router_list.iteritems():
- if rname == 'r2':
+ if rname == "r2":
for cmd in cmds2:
tgen.net[rname].cmd(cmd.format(rname))
for cmd in cmds:
tgen.net[rname].cmd(cmd.format(rname))
tgen.stop_topology()
+
def test_bfd_connection():
"Assert that the BFD peers can find themselves."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bfd peers to go up')
+ logger.info("waiting for bfd peers to go up")
for router in tgen.routers().values():
- json_file = '{}/{}/peers.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=8, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bgp peers to go up')
+ logger.info("waiting for bgp peers to go up")
for router in tgen.routers().values():
- ref_file = '{}/{}/bgp_summary.json'.format(CWD, router.name)
+ ref_file = "{}/{}/bgp_summary.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip bgp vrf {}-cust1 summary json'.format(router.name), expected)
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip bgp vrf {}-cust1 summary json".format(router.name),
+ expected,
+ )
_, res = topotest.run_and_expect(test_func, None, count=125, wait=1.0)
- assertmsg = '{}: bgp did not converge'.format(router.name)
+ assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bgp peers converge')
+ logger.info("waiting for bgp peers converge")
for router in tgen.routers().values():
- ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name)
+ ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip bgp vrf {}-cust1 json'.format(router.name), expected)
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip bgp vrf {}-cust1 json".format(router.name),
+ expected,
+ )
_, res = topotest.run_and_expect(test_func, None, count=40, wait=0.5)
- assertmsg = '{}: bgp did not converge'.format(router.name)
+ assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
pytest.skip(tgen.errors)
# Disable r2-eth0 link
- router2 = tgen.gears['r2']
- topotest.interface_set_status(router2, 'r2-eth0', ifaceaction=False, vrf_name='r2-cust1')
+ router2 = tgen.gears["r2"]
+ topotest.interface_set_status(
+ router2, "r2-eth0", ifaceaction=False, vrf_name="r2-cust1"
+ )
# Wait the minimum time we can before checking that BGP/BFD
# converged.
- logger.info('waiting for BFD converge')
+ logger.info("waiting for BFD converge")
# Check that BGP converged quickly.
for router in tgen.routers().values():
- json_file = '{}/{}/peers.json'.format(CWD, router.name)
+ json_file = "{}/{}/peers.json".format(CWD, router.name)
expected = json.loads(open(json_file).read())
# Load the same file as previous test, but expect R1 to be down.
- if router.name == 'r1':
+ if router.name == "r1":
for peer in expected:
- if peer['peer'] == '192.168.0.2':
- peer['status'] = 'down'
+ if peer["peer"] == "192.168.0.2":
+ peer["status"] = "down"
else:
for peer in expected:
- if peer['peer'] == '192.168.0.1':
- peer['status'] = 'down'
+ if peer["peer"] == "192.168.0.1":
+ peer["status"] = "down"
- test_func = partial(topotest.router_json_cmp,
- router, 'show bfd peers json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, router, "show bfd peers json", expected
+ )
_, res = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert res is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for BGP re convergence')
+ logger.info("waiting for BGP re convergence")
# Check that BGP converged quickly.
for router in tgen.routers().values():
- ref_file = '{}/{}/bgp_prefixes.json'.format(CWD, router.name)
+ ref_file = "{}/{}/bgp_prefixes.json".format(CWD, router.name)
expected = json.loads(open(ref_file).read())
# Load the same file as previous test, but set networks to None
# to test absence.
- if router.name == 'r1':
- expected['routes']['10.254.254.2/32'] = None
- expected['routes']['10.254.254.3/32'] = None
- expected['routes']['10.254.254.4/32'] = None
+ if router.name == "r1":
+ expected["routes"]["10.254.254.2/32"] = None
+ expected["routes"]["10.254.254.3/32"] = None
+ expected["routes"]["10.254.254.4/32"] = None
else:
- expected['routes']['10.254.254.1/32'] = None
-
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip bgp vrf {}-cust1 json'.format(router.name), expected)
- _, res = topotest.run_and_expect(
- test_func,
- None,
- count=3,
- wait=1
+ expected["routes"]["10.254.254.1/32"] = None
+
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip bgp vrf {}-cust1 json".format(router.name),
+ expected,
)
- assertmsg = '{}: bgp did not converge'.format(router.name)
+ _, res = topotest.run_and_expect(test_func, None, count=3, wait=1)
+ assertmsg = "{}: bgp did not converge".format(router.name)
assert res is None, assertmsg
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
-sys.path.append(os.path.join(CWD, '../lib/'))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../lib/"))
# Required to instantiate the topology builder class.
from mininet.topo import Topo
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, reset_config_on_routers, create_static_routes,
- verify_rib, verify_admin_distance_for_static_routes
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ create_static_routes,
+ verify_rib,
+ verify_admin_distance_for_static_routes,
)
from lib.topolog import logger
from lib.bgp import (
- verify_bgp_convergence, create_router_bgp, verify_router_id,
- modify_as_number, verify_as_numbers, clear_bgp_and_verify,
- verify_bgp_timers_and_functionality
+ verify_bgp_convergence,
+ create_router_bgp,
+ verify_router_id,
+ modify_as_number,
+ verify_as_numbers,
+ clear_bgp_and_verify,
+ verify_bgp_timers_and_functionality,
)
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology creation
jsonFile = "{}/bgp_basic_functionality.json".format(CWD)
try:
- with open(jsonFile, 'r') as topoJson:
+ with open(jsonFile, "r") as topoJson:
topo = json.load(topoJson)
except IOError:
assert False, "Could not read file {}".format(jsonFile)
-#Global Variable
+# Global Variable
KEEPALIVETIMER = 2
HOLDDOWNTIMER = 6
+
class CreateTopo(Topo):
"""
Test BasicTopo - topology 1
global BGP_CONVERGENCE
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
- assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}". \
- format(BGP_CONVERGENCE)
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error: {}".format(
+ BGP_CONVERGENCE
+ )
logger.info("Running setup_module() done")
# Stop toplogy and Remove tmp files
tgen.stop_topology()
- logger.info("Testsuite end time: {}".
- format(time.asctime(time.localtime(time.time()))))
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
logger.info("=" * 40)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
# Modify router id
input_dict = {
- 'r1': {
- "bgp": {
- 'router_id': '12.12.12.12'
- }
- },
- 'r2': {
- "bgp": {
- 'router_id': '22.22.22.22'
- }
- },
- 'r3': {
- "bgp": {
- 'router_id': '33.33.33.33'
- }
- },
+ "r1": {"bgp": {"router_id": "12.12.12.12"}},
+ "r2": {"bgp": {"router_id": "22.22.22.22"}},
+ "r3": {"bgp": {"router_id": "33.33.33.33"}},
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}".\
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Verifying router id once modified
result = verify_router_id(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}".\
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Delete router id
input_dict = {
- 'r1': {
- "bgp": {
- 'del_router_id': True
- }
- },
- 'r2': {
- "bgp": {
- 'del_router_id': True
- }
- },
- 'r3': {
- "bgp": {
- 'del_router_id': True
- }
- },
+ "r1": {"bgp": {"del_router_id": True}},
+ "r2": {"bgp": {"del_router_id": True}},
+ "r3": {"bgp": {"del_router_id": True}},
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Verifying router id once deleted
# Once router-id is deleted, highest interface ip should become
# router-id
result = verify_router_id(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
write_test_header(tc_name)
input_dict = {
- "r1": {
- "bgp": {
- "local_as": 131079
- }
- },
- "r2": {
- "bgp": {
- "local_as": 131079
- }
- },
- "r3": {
- "bgp": {
- "local_as": 131079
- }
- },
- "r4": {
- "bgp": {
- "local_as": 131080
- }
- }
+ "r1": {"bgp": {"local_as": 131079}},
+ "r2": {"bgp": {"local_as": 131079}},
+ "r3": {"bgp": {"local_as": 131079}},
+ "r4": {"bgp": {"local_as": 131080}},
}
result = modify_as_number(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
result = verify_as_numbers(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
"unicast": {
"neighbor": {
"r2": {
- "dest_link":{
+ "dest_link": {
"r1": {
"keepalivetimer": KEEPALIVETIMER,
- "holddowntimer": HOLDDOWNTIMER
+ "holddowntimer": HOLDDOWNTIMER,
}
}
}
}
}
result = create_router_bgp(tgen, topo, deepcopy(input_dict))
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so timer modification would take place
- clear_bgp_and_verify(tgen, topo, 'r1')
+ clear_bgp_and_verify(tgen, topo, "r1")
# Verifying bgp timers functionality
result = verify_bgp_timers_and_functionality(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
# Api call to create static routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 9,
- "admin_distance": 100,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {
+ "network": "10.0.20.1/32",
+ "no_of_ip": 9,
+ "admin_distance": 100,
+ "next_hop": "10.0.0.2",
+ }
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
- dut = 'r3'
- protocol = 'bgp'
- next_hop = ['10.0.0.2', '10.0.0.5']
- result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop,
- protocol=protocol)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ dut = "r3"
+ protocol = "bgp"
+ next_hop = ["10.0.0.2", "10.0.0.5"]
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, next_hop=next_hop, protocol=protocol
+ )
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "admin_distance": 10,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {
+ "network": "10.0.20.1/32",
+ "admin_distance": 10,
+ "next_hop": "10.0.0.2",
+ }
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Verifying admin distance once modified
result = verify_admin_distance_for_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "20.0.0.0/32",
- "no_of_network": 10
- },
- {
- "network": "30.0.0.0/32",
- "no_of_network": 10
- }
+ {"network": "20.0.0.0/32", "no_of_network": 10},
+ {"network": "30.0.0.0/32", "no_of_network": 10},
]
}
}
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
- dut = 'r2'
+ dut = "r2"
protocol = "bgp"
- result = verify_rib(tgen, 'ipv4', dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
reset_config_on_routers(tgen)
# clear ip bgp
- result = clear_bgp_and_verify(tgen, topo, 'r1')
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ result = clear_bgp_and_verify(tgen, topo, "r1")
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
tgen = get_topogen()
if BGP_CONVERGENCE is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
# Creating configuration from JSON
reset_config_on_routers(tgen)
- for routerN in sorted(topo['routers'].keys()):
- for bgp_neighbor in \
- topo['routers'][routerN]['bgp']['address_family']['ipv4'][
- 'unicast']['neighbor'].keys():
+ for routerN in sorted(topo["routers"].keys()):
+ for bgp_neighbor in topo["routers"][routerN]["bgp"]["address_family"]["ipv4"][
+ "unicast"
+ ]["neighbor"].keys():
# Adding ['source_link'] = 'lo' key:value pair
- topo['routers'][routerN]['bgp']['address_family']['ipv4'][
- 'unicast']['neighbor'][bgp_neighbor]["dest_link"] = {
- 'lo': {
- "source_link": "lo",
- }
- }
+ topo["routers"][routerN]["bgp"]["address_family"]["ipv4"]["unicast"][
+ "neighbor"
+ ][bgp_neighbor]["dest_link"] = {"lo": {"source_link": "lo",}}
# Creating configuration from JSON
build_config_from_json(tgen, topo)
input_dict = {
"r1": {
- "static_routes": [{
- "network": "1.0.2.17/32",
- "next_hop": "10.0.0.2"
- },
- {
- "network": "1.0.3.17/32",
- "next_hop": "10.0.0.6"
- }
+ "static_routes": [
+ {"network": "1.0.2.17/32", "next_hop": "10.0.0.2"},
+ {"network": "1.0.3.17/32", "next_hop": "10.0.0.6"},
]
},
"r2": {
- "static_routes": [{
- "network": "1.0.1.17/32",
- "next_hop": "10.0.0.1"
- },
- {
- "network": "1.0.3.17/32",
- "next_hop": "10.0.0.10"
- }
+ "static_routes": [
+ {"network": "1.0.1.17/32", "next_hop": "10.0.0.1"},
+ {"network": "1.0.3.17/32", "next_hop": "10.0.0.10"},
]
},
"r3": {
- "static_routes": [{
- "network": "1.0.1.17/32",
- "next_hop": "10.0.0.5"
- },
- {
- "network": "1.0.2.17/32",
- "next_hop": "10.0.0.9"
- },
- {
- "network": "1.0.4.17/32",
- "next_hop": "10.0.0.14"
- }
+ "static_routes": [
+ {"network": "1.0.1.17/32", "next_hop": "10.0.0.5"},
+ {"network": "1.0.2.17/32", "next_hop": "10.0.0.9"},
+ {"network": "1.0.4.17/32", "next_hop": "10.0.0.14"},
]
},
- "r4": {
- "static_routes": [{
- "network": "1.0.3.17/32",
- "next_hop": "10.0.0.13"
- }]
- }
+ "r4": {"static_routes": [{"network": "1.0.3.17/32", "next_hop": "10.0.0.13"}]},
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
# Api call verify whether BGP is converged
result = verify_bgp_convergence(tgen, topo)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# 2nd arg is number of routes to send
peer = int(argv[1])
numRoutes = int(argv[2])
-if (peer <= 10):
+if peer <= 10:
asnum = 99
else:
- asnum = peer+100
+ asnum = peer + 100
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n' % (i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes per PE - different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.202.%s.0/24 med 100 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n' % (i, peer, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.203.%s.0/24 med %i next-hop 10.0.%i.%i origin igp\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
# Announce numRoutes equal routes with different med per PE and different neighbor AS, but same source AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (i, peer, (((peer-1) / 5) + 1), peer+100, asnum))
+ stdout.write(
+ "announce route 10.204.%s.0/24 med %i next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (i, peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+ )
stdout.flush()
# Announce 2 different route per peer
-stdout.write('announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n' % (peer, (((peer-1) / 5) + 1), peer+100))
-stdout.write('announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n' % (peer, (((peer-1) / 5) + 1), peer+100, asnum))
+stdout.write(
+ "announce route 10.205.%i.0/24 next-hop 10.0.%i.%i origin igp\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100)
+)
+stdout.write(
+ "announce route 10.206.%i.0/24 next-hop 10.0.%i.%i origin igp as-path [ %i 200 ]\n"
+ % (peer, (((peer - 1) / 5) + 1), peer + 100, asnum)
+)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
"10.0.1.101":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.102":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.103":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.104":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.105":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.106":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.107":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.108":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.109":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.110":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.111":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.112":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.113":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.114":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.115":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.116":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.117":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.118":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.119":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.120":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
}
},
"10.0.1.101":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.102":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.103":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.104":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.1.105":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.106":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.107":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.108":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.109":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.2.110":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.111":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.112":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.113":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.114":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.3.115":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.116":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.117":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.118":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.119":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
},
"10.0.4.120":{
"outq":0,
"inq":0,
- "prefixReceivedCount":42,
+ "pfxRcd":42,
"state":"Established"
}
},
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
tgen = get_topogen(self)
# Create the BGP router
- router = tgen.add_router('r1')
+ router = tgen.add_router("r1")
# Setup Switches - 1 switch per 5 peering routers
for swNum in range(1, (total_ebgp_peers + 4) / 5 + 1):
- switch = tgen.add_switch('s{}'.format(swNum))
+ switch = tgen.add_switch("s{}".format(swNum))
switch.add_link(router)
# Add 'total_ebgp_peers' number of eBGP ExaBGP neighbors
- for peerNum in range(1, total_ebgp_peers+1):
- swNum = ((peerNum - 1) / 5 + 1)
+ for peerNum in range(1, total_ebgp_peers + 1):
+ swNum = (peerNum - 1) / 5 + 1
- peer_ip = '10.0.{}.{}'.format(swNum, peerNum + 100)
- peer_route = 'via 10.0.{}.1'.format(swNum)
- peer = tgen.add_exabgp_peer('peer{}'.format(peerNum),
- ip=peer_ip, defaultRoute=peer_route)
+ peer_ip = "10.0.{}.{}".format(swNum, peerNum + 100)
+ peer_route = "via 10.0.{}.1".format(swNum)
+ peer = tgen.add_exabgp_peer(
+ "peer{}".format(peerNum), ip=peer_ip, defaultRoute=peer_route
+ )
- switch = tgen.gears['s{}'.format(swNum)]
+ switch = tgen.gears["s{}".format(swNum)]
switch.add_link(peer)
#
#####################################################
+
def setup_module(module):
tgen = Topogen(BGPECMPTopo1, module.__name__)
tgen.start_topology()
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
router.start()
# Starting Hosts and init ExaBGP on each of them
- topotest.sleep(10, 'starting BGP on all {} peers'.format(total_ebgp_peers))
+ topotest.sleep(10, "starting BGP on all {} peers".format(total_ebgp_peers))
peer_list = tgen.exabgp_peers()
for pname, peer in peer_list.iteritems():
peer_dir = os.path.join(CWD, pname)
- env_file = os.path.join(CWD, 'exabgp.env')
+ env_file = os.path.join(CWD, "exabgp.env")
peer.start(peer_dir, env_file)
logger.info(pname)
pytest.skip(tgen.errors)
# Expected result
- router = tgen.gears['r1']
- if router.has_version('<', '3.0'):
- reffile = os.path.join(CWD, 'r1/summary20.txt')
+ router = tgen.gears["r1"]
+ if router.has_version("<", "3.0"):
+ reffile = os.path.join(CWD, "r1/summary20.txt")
else:
- reffile = os.path.join(CWD, 'r1/summary.txt')
+ reffile = os.path.join(CWD, "r1/summary.txt")
expected = json.loads(open(reffile).read())
with 'json') and compare with `data` contents.
"""
output = router.vtysh_cmd(cmd, isjson=True)
- if 'ipv4Unicast' in output:
- output['ipv4Unicast']['vrfName'] = \
- output['ipv4Unicast']['vrfName'].replace(
- 'default', 'Default')
- elif 'vrfName' in output:
- output['vrfName'] = output['vrfName'].replace('default', 'Default')
+ if "ipv4Unicast" in output:
+ output["ipv4Unicast"]["vrfName"] = output["ipv4Unicast"]["vrfName"].replace(
+ "default", "Default"
+ )
+ elif "vrfName" in output:
+ output["vrfName"] = output["vrfName"].replace("default", "Default")
return topotest.json_cmp(output, data)
test_func = functools.partial(
- _output_summary_cmp, router, 'show ip bgp summary json', expected)
+ _output_summary_cmp, router, "show ip bgp summary json", expected
+ )
_, res = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
- assertmsg = 'BGP router network did not converge'
+ assertmsg = "BGP router network did not converge"
assert res is None, assertmsg
pytest.skip(tgen.errors)
expect = {
- 'routerId': '10.0.255.1',
- 'routes': {
- },
+ "routerId": "10.0.255.1",
+ "routes": {},
}
for net in range(1, 5):
for subnet in range(0, 10):
- netkey = '10.20{}.{}.0/24'.format(net, subnet)
- expect['routes'][netkey] = []
+ netkey = "10.20{}.{}.0/24".format(net, subnet)
+ expect["routes"][netkey] = []
for _ in range(0, 10):
- peer = {'multipath': True, 'valid': True}
- expect['routes'][netkey].append(peer)
+ peer = {"multipath": True, "valid": True}
+ expect["routes"][netkey].append(peer)
- test_func = functools.partial(topotest.router_json_cmp,
- tgen.gears['r1'], 'show ip bgp json', expect)
+ test_func = functools.partial(
+ topotest.router_json_cmp, tgen.gears["r1"], "show ip bgp json", expect
+ )
_, res = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assertmsg = 'expected multipath routes in "show ip bgp" output'
assert res is None, assertmsg
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import time
import json
import pytest
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from mininet.topo import Topo
from lib.common_config import (
- start_topology, write_test_header,
+ start_topology,
+ write_test_header,
write_test_footer,
- verify_rib, create_static_routes, check_address_types,
- interface_status, reset_config_on_routers
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ interface_status,
+ reset_config_on_routers,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify)
+from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology and configuration creation
ADDR_TYPES = check_address_types()
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
- assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
- " {}".format(BGP_CONVERGENCE))
-
- link_data = [val for links, val in
- topo["routers"]["r2"]["links"].iteritems()
- if "r3" in links]
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ link_data = [
+ val
+ for links, val in topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links
+ ]
for adt in ADDR_TYPES:
NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
if adt == "ipv4":
- NEXT_HOPS[adt] = sorted(
- NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ NEXT_HOPS[adt] = sorted(NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
elif adt == "ipv6":
NEXT_HOPS[adt] = sorted(
- NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+ NEXT_HOPS[adt], key=lambda x: int(x.split(":")[-3], 16)
+ )
INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
- link_data = [val for links, val in
- topo["routers"]["r3"]["links"].iteritems()
- if "r2" in links]
+ link_data = [
+ val
+ for links, val in topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links
+ ]
INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
input_dict_static = {
dut: {
"static_routes": [
- {
- "network": NETWORK["ipv4"],
- "next_hop": NEXT_HOP_IP["ipv4"]
- },
- {
- "network": NETWORK["ipv6"],
- "next_hop": NEXT_HOP_IP["ipv6"]
- }
+ {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]},
]
}
}
logger.info("Configuring static route on router %s", dut)
result = create_static_routes(tgen, input_dict_static)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_2 = {
dut: {
"bgp": {
"address_family": {
"ipv4": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ },
}
}
}
logger.info("Configuring redistribute static route on router %s", dut)
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
elif test_type == "advertise_nw":
input_dict_nw = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": NETWORK["ipv4"]}
- ]
+ "advertise_networks": [{"network": NETWORK["ipv4"]}]
}
},
"ipv6": {
"unicast": {
- "advertise_networks": [
- {"network": NETWORK["ipv6"]}
- ]
+ "advertise_networks": [{"network": NETWORK["ipv6"]}]
}
- }
+ },
}
}
}
}
- logger.info("Advertising networks %s %s from router %s",
- NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ logger.info(
+ "Advertising networks %s %s from router %s",
+ NETWORK["ipv4"],
+ NETWORK["ipv6"],
+ dut,
+ )
result = create_router_bgp(tgen, topo, input_dict_nw)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
"r3": {
"bgp": {
"address_family": {
- "ipv4": {
- "unicast": {
- "maximum_paths": {
- "ebgp": ecmp_num,
- }
- }
- },
- "ipv6": {
- "unicast": {
- "maximum_paths": {
- "ebgp": ecmp_num,
- }
- }
- }
+ "ipv4": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}},
+ "ipv6": {"unicast": {"maximum_paths": {"ebgp": ecmp_num,}}},
}
}
}
logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Clear bgp
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_2 = {
"r2": {
"address_family": {
"ipv4": {
"unicast": {
- "redistribute": [{
- "redist_type": "static",
- "delete": True
-
- }]
+ "redistribute": [{"redist_type": "static", "delete": True}]
}
},
"ipv6": {
"unicast": {
- "redistribute": [{
- "redist_type": "static",
- "delete": True
-
- }]
+ "redistribute": [{"redist_type": "static", "delete": True}]
}
- }
+ },
}
}
}
logger.info("Remove redistribute static")
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3 are deleted", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=[], protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=[],
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name)
logger.info("Enable redistribute static")
input_dict_2 = {
"r2": {
"bgp": {
"address_family": {
- "ipv4": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- },
- "ipv6": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- for intf_num in range(len(INTF_LIST_R2)+1, 16):
- intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+ for intf_num in range(len(INTF_LIST_R2) + 1, 16):
+ intf_val = INTF_LIST_R2[intf_num : intf_num + 16]
- input_dict_1 = {
- "r2": {
- "interface_list": [intf_val],
- "status": "down"
- }
- }
- logger.info("Shutting down neighbor interface {} on r2".
- format(intf_val))
+ input_dict_1 = {"r2": {"interface_list": [intf_val], "status": "down"}}
+ logger.info("Shutting down neighbor interface {} on r2".format(intf_val))
result = interface_status(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
if intf_num + 16 < 32:
else:
check_hops = []
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=check_hops,
- protocol=protocol)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, next_hop=check_hops, protocol=protocol
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- input_dict_1 = {
- "r2": {
- "interface_list": INTF_LIST_R2,
- "status": "up"
- }
- }
+ input_dict_1 = {"r2": {"interface_list": INTF_LIST_R2, "status": "up"}}
logger.info("Enabling all neighbor interface {} on r2")
result = interface_status(tgen, topo, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
result = verify_rib(
- tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
input_dict_2 = {
{
"network": NETWORK[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
- "delete": True
+ "delete": True,
}
]
}
logger.info("Remove static routes")
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
logger.info("Verifying %s routes on r3 are removed", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_2,
- next_hop=[], protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_2,
+ next_hop=[],
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name)
for addr_type in ADDR_TYPES:
# Enable static routes
input_dict_4 = {
"r2": {
"static_routes": [
- {
- "network": NETWORK[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type]
- }
+ {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]}
]
}
}
logger.info("Enable static route")
result = create_static_routes(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_4,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
def test_ecmp_remove_nw_advertise(request):
reset_config_on_routers(tgen)
static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_3 = {
"r2": {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [{
- "network": NETWORK["ipv4"],
- "delete": True
- }]
- }
- },
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"], "delete": True}
+ ]
+ }
+ },
"ipv6": {
"unicast": {
- "advertise_networks": [{
- "network": NETWORK["ipv6"],
- "delete": True
- }]
- }
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"], "delete": True}
+ ]
}
- }
+ },
}
}
}
+ }
logger.info("Withdraw advertised networks")
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=[], protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=[],
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name)
static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
import time
import json
import pytest
+
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from mininet.topo import Topo
from lib.common_config import (
- start_topology, write_test_header,
+ start_topology,
+ write_test_header,
write_test_footer,
- verify_rib, create_static_routes, check_address_types,
- interface_status, reset_config_on_routers
+ verify_rib,
+ create_static_routes,
+ check_address_types,
+ interface_status,
+ reset_config_on_routers,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify)
+from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology and configuration creation
for addr_type in ADDR_TYPES:
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
- assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
- " {}".format(BGP_CONVERGENCE))
-
- link_data = [val for links, val in
- topo["routers"]["r2"]["links"].iteritems()
- if "r3" in links]
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
+
+ link_data = [
+ val
+ for links, val in topo["routers"]["r2"]["links"].iteritems()
+ if "r3" in links
+ ]
for adt in ADDR_TYPES:
NEXT_HOPS[adt] = [val[adt].split("/")[0] for val in link_data]
if adt == "ipv4":
- NEXT_HOPS[adt] = sorted(
- NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
+ NEXT_HOPS[adt] = sorted(NEXT_HOPS[adt], key=lambda x: int(x.split(".")[2]))
elif adt == "ipv6":
NEXT_HOPS[adt] = sorted(
- NEXT_HOPS[adt], key=lambda x: int(x.split(':')[-3], 16))
+ NEXT_HOPS[adt], key=lambda x: int(x.split(":")[-3], 16)
+ )
INTF_LIST_R2 = [val["interface"].split("/")[0] for val in link_data]
INTF_LIST_R2 = sorted(INTF_LIST_R2, key=lambda x: int(x.split("eth")[1]))
- link_data = [val for links, val in
- topo["routers"]["r3"]["links"].iteritems()
- if "r2" in links]
+ link_data = [
+ val
+ for links, val in topo["routers"]["r3"]["links"].iteritems()
+ if "r2" in links
+ ]
INTF_LIST_R3 = [val["interface"].split("/")[0] for val in link_data]
INTF_LIST_R3 = sorted(INTF_LIST_R3, key=lambda x: int(x.split("eth")[1]))
input_dict_static = {
dut: {
"static_routes": [
- {
- "network": NETWORK["ipv4"],
- "next_hop": NEXT_HOP_IP["ipv4"]
- },
- {
- "network": NETWORK["ipv6"],
- "next_hop": NEXT_HOP_IP["ipv6"]
- }
+ {"network": NETWORK["ipv4"], "next_hop": NEXT_HOP_IP["ipv4"]},
+ {"network": NETWORK["ipv6"], "next_hop": NEXT_HOP_IP["ipv6"]},
]
}
}
logger.info("Configuring static route on router %s", dut)
result = create_static_routes(tgen, input_dict_static)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_2 = {
dut: {
"bgp": {
"address_family": {
"ipv4": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
+ "unicast": {"redistribute": [{"redist_type": "static"}]}
+ },
}
}
}
logger.info("Configuring redistribute static route on router %s", dut)
result = create_router_bgp(tgen, topo, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
elif test_type == "advertise_nw":
input_dict_nw = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": NETWORK["ipv4"]}
- ]
+ "advertise_networks": [{"network": NETWORK["ipv4"]}]
}
},
"ipv6": {
"unicast": {
- "advertise_networks": [
- {"network": NETWORK["ipv6"]}
- ]
+ "advertise_networks": [{"network": NETWORK["ipv6"]}]
}
- }
+ },
}
}
}
}
- logger.info("Advertising networks %s %s from router %s",
- NETWORK["ipv4"], NETWORK["ipv6"], dut)
+ logger.info(
+ "Advertising networks %s %s from router %s",
+ NETWORK["ipv4"],
+ NETWORK["ipv6"],
+ dut,
+ )
result = create_router_bgp(tgen, topo, input_dict_nw)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
@pytest.mark.parametrize("ecmp_num", ["8", "16", "32"])
"r3": {
"bgp": {
"address_family": {
- "ipv4": {
- "unicast": {
- "maximum_paths": {
- "ibgp": ecmp_num,
- }
- }
- },
- "ipv6": {
- "unicast": {
- "maximum_paths": {
- "ibgp": ecmp_num,
- }
- }
- }
+ "ipv4": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}},
+ "ipv6": {"unicast": {"maximum_paths": {"ibgp": ecmp_num,}}},
}
}
}
logger.info("Configuring bgp maximum-paths %s on router r3", ecmp_num)
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Clear bgp
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_2 = {
"r2": {
"address_family": {
"ipv4": {
"unicast": {
- "redistribute": [{
- "redist_type": "static",
- "delete": True
-
- }]
+ "redistribute": [{"redist_type": "static", "delete": True}]
}
},
"ipv6": {
"unicast": {
- "redistribute": [{
- "redist_type": "static",
- "delete": True
-
- }]
+ "redistribute": [{"redist_type": "static", "delete": True}]
}
- }
+ },
}
}
}
logger.info("Remove redistribute static")
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3 are deleted", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=[], protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=[],
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name)
logger.info("Enable redistribute static")
input_dict_2 = {
"r2": {
"bgp": {
"address_family": {
- "ipv4": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- },
- "ipv6": {
- "unicast": {
- "redistribute": [{
- "redist_type": "static"
- }]
- }
- }
+ "ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
+ "ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- for intf_num in range(len(INTF_LIST_R2)+1, 16):
- intf_val = INTF_LIST_R2[intf_num:intf_num+16]
+ for intf_num in range(len(INTF_LIST_R2) + 1, 16):
+ intf_val = INTF_LIST_R2[intf_num : intf_num + 16]
- input_dict_1 = {
- "r2": {
- "interface_list": [intf_val],
- "status": "down"
- }
- }
- logger.info("Shutting down neighbor interface {} on r2".
- format(intf_val))
+ input_dict_1 = {"r2": {"interface_list": [intf_val], "status": "down"}}
+ logger.info("Shutting down neighbor interface {} on r2".format(intf_val))
result = interface_status(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
if intf_num + 16 < 32:
else:
check_hops = []
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=check_hops,
- protocol=protocol)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, next_hop=check_hops, protocol=protocol
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- input_dict_1 = {
- "r2": {
- "interface_list": INTF_LIST_R2,
- "status": "up"
- }
- }
+ input_dict_1 = {"r2": {"interface_list": INTF_LIST_R2, "status": "up"}}
logger.info("Enabling all neighbor interface {} on r2")
result = interface_status(tgen, topo, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
static_or_nw(tgen, topo, tc_name, "redist_static", "r2")
for addr_type in ADDR_TYPES:
- input_dict_1 = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict_1 = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
result = verify_rib(
- tgen, addr_type, dut, input_dict_1,
- next_hop=NEXT_HOPS[addr_type], protocol=protocol)
+ tgen,
+ addr_type,
+ dut,
+ input_dict_1,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
input_dict_2 = {
{
"network": NETWORK[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
- "delete": True
+ "delete": True,
}
]
}
logger.info("Remove static routes")
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
logger.info("Verifying %s routes on r3 are removed", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_2,
- next_hop=[], protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_2,
+ next_hop=[],
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name)
for addr_type in ADDR_TYPES:
# Enable static routes
input_dict_4 = {
"r2": {
"static_routes": [
- {
- "network": NETWORK[addr_type],
- "next_hop": NEXT_HOP_IP[addr_type]
- }
+ {"network": NETWORK[addr_type], "next_hop": NEXT_HOP_IP[addr_type]}
]
}
}
logger.info("Enable static route")
result = create_static_routes(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict_4,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict_4,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
reset_config_on_routers(tgen)
static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_3 = {
"r2": {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [{
- "network": NETWORK["ipv4"],
- "delete": True
- }]
- }
- },
+ "advertise_networks": [
+ {"network": NETWORK["ipv4"], "delete": True}
+ ]
+ }
+ },
"ipv6": {
"unicast": {
- "advertise_networks": [{
- "network": NETWORK["ipv6"],
- "delete": True
- }]
- }
+ "advertise_networks": [
+ {"network": NETWORK["ipv6"], "delete": True}
+ ]
}
- }
+ },
}
}
}
+ }
logger.info("Withdraw advertised networks")
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=[], protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=[],
+ protocol=protocol,
+ expected=False,
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Routes still" " present in RIB".format(tc_name)
static_or_nw(tgen, topo, tc_name, "advertise_nw", "r2")
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "static_routes": [
- {
- "network": NETWORK[addr_type]
- }
- ]
- }
- }
+ input_dict = {"r3": {"static_routes": [{"network": NETWORK[addr_type]}]}}
logger.info("Verifying %s routes on r3", addr_type)
- result = verify_rib(tgen, addr_type, dut, input_dict,
- next_hop=NEXT_HOPS[addr_type],
- protocol=protocol)
+ result = verify_rib(
+ tgen,
+ addr_type,
+ dut,
+ input_dict,
+ next_hop=NEXT_HOPS[addr_type],
+ protocol=protocol,
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
if __name__ == "__main__":
# Required to instantiate the topology builder class.
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, reset_config_on_routers,
- verify_rib, create_static_routes,
- create_prefix_lists, verify_prefix_lists,
- create_route_maps, check_address_types
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ create_prefix_lists,
+ verify_prefix_lists,
+ create_route_maps,
+ check_address_types,
)
from lib.topolog import logger
from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify, verify_best_path_as_per_bgp_attribute,
- verify_best_path_as_per_admin_distance, modify_as_number,
- verify_as_numbers
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp_and_verify,
+ verify_best_path_as_per_bgp_attribute,
+ verify_best_path_as_per_admin_distance,
+ modify_as_number,
+ verify_as_numbers,
)
from lib.topojson import build_topo_from_json, build_config_from_json
# Checking BGP convergence
result = verify_bgp_convergence(tgen, topo)
- assert result is True, ("setup_module :Failed \n Error:"
- " {}".format(result))
+ assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
logger.info("Running setup_module() done")
# Stop toplogy and Remove tmp files
tgen.stop_topology()
- logger.info("Testsuite end time: %s",
- time.asctime(time.localtime(time.time())))
+ logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
logger.info("=" * 40)
##
#####################################################
+
def test_next_hop_attribute(request):
"""
Verifying route are not getting installed in, as next_hop is
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "200.50.2.0/32"
- },
- {
- "network": "200.60.2.0/32"
- }
+ {"network": "200.50.2.0/32"},
+ {"network": "200.60.2.0/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
- {
- "network": "200:50:2::/128"
- },
- {
- "network": "200:60:2::/128"
- }
+ {"network": "200:50:2::/128"},
+ {"network": "200:60:2::/128"},
]
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r1"
protocol = "bgp"
# Verification should fail as nexthop-self is not enabled
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: "\
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
+ assert result is not True, (
+ "Testcase {} : Failed \n Error: "
"{} routes are not present in RIB".format(addr_type, tc_name)
+ )
# Configure next-hop-self to bgp neighbor
input_dict_1 = {
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r1"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "200.50.2.0/32"
- },
- {
- "network": "200.60.2.0/32"
- }
+ {"network": "200.50.2.0/32"},
+ {"network": "200.60.2.0/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
- {
- "network": "200:50:2::/128"
- },
- {
- "network": "200:60:2::/128"
- }
+ {"network": "200:50:2::/128"},
+ {"network": "200:60:2::/128"},
]
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "path"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- {"r7": input_dict["r7"]},
- attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Modify AS-Path and verify best path is changed
# Create Prefix list
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_ls_1_ipv4": [{
- "seqid": 10,
- "network": "200.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_ls_1_ipv4": [
+ {
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
},
"ipv6": {
- "pf_ls_1_ipv6": [{
- "seqid": 10,
- "network": "200::/8",
- "le": "128",
- "action": "permit"
- }]
- }
+ "pf_ls_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": "200::/8",
+ "le": "128",
+ "action": "permit",
+ }
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r3": {
"route_maps": {
- "RMAP_AS_PATH": [{
- "action": "permit",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_1_ipv4"
- }
+ "RMAP_AS_PATH": [
+ {
+ "action": "permit",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
+ "set": {"path": {"as_num": "111 222", "as_action": "prepend"}},
},
- "set": {
- "path": {
- "as_num": "111 222",
- "as_action": "prepend"
- }
- }
- },
- {
- "action": "permit",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_1_ipv6"
- }
+ {
+ "action": "permit",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
+ "set": {"path": {"as_num": "111 222", "as_action": "prepend"}},
},
- "set": {
- "path": {
- "as_num": "111 222",
- "as_action": "prepend"
- }
- }
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"dest_link": {
"r3": {
"route_maps": [
- {"name": "RMAP_AS_PATH",
- "direction": "in"}
+ {
+ "name": "RMAP_AS_PATH",
+ "direction": "in",
+ }
]
}
}
"dest_link": {
"r3": {
"route_maps": [
- {"name": "RMAP_AS_PATH",
- "direction": "in"}
+ {
+ "name": "RMAP_AS_PATH",
+ "direction": "in",
+ }
]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "path"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- {"r7": input_dict["r7"]},
- attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "200.50.2.0/32"
- },
- {
- "network": "200.60.2.0/32"
- }
+ {"network": "200.50.2.0/32"},
+ {"network": "200.60.2.0/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
- {
- "network": "200:50:2::/128"
- },
- {
- "network": "200:60:2::/128"
- }
+ {"network": "200:50:2::/128"},
+ {"network": "200:60:2::/128"},
]
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Prefix list
input_dict_2 = {
"r2": {
"prefix_lists": {
"ipv4": {
- "pf_ls_1_ipv4": [{
- "seqid": 10,
- "network": "200.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_ls_1_ipv4": [
+ {
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
},
"ipv6": {
- "pf_ls_1_ipv6": [{
- "seqid": 10,
- "network": "200::/8",
- "le": "128",
- "action": "permit"
- }]
- }
+ "pf_ls_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": "200::/8",
+ "le": "128",
+ "action": "permit",
+ }
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r2": {
"route_maps": {
- "RMAP_LOCAL_PREF": [{
- "action": "permit",
- "seq_id": "10",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_1_ipv4"
- }
+ "RMAP_LOCAL_PREF": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
+ "set": {"locPrf": 1111},
},
- "set": {
- "locPrf": 1111
- }
- },
- {
- "action": "permit",
- "seq_id": "20",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_1_ipv6"
- }
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
+ "set": {"locPrf": 1111},
},
- "set": {
- "locPrf": 1111
- }
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"dest_link": {
"r2-link1": {
"route_maps": [
- {"name": "RMAP_LOCAL_PREF",
- "direction": "in"}
+ {
+ "name": "RMAP_LOCAL_PREF",
+ "direction": "in",
+ }
]
}
}
"dest_link": {
"r2-link1": {
"route_maps": [
- {"name": "RMAP_LOCAL_PREF",
- "direction": "in"}
+ {
+ "name": "RMAP_LOCAL_PREF",
+ "direction": "in",
+ }
]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "locPrf"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- {"r7": input_dict["r7"]},
- attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Modify route map
input_dict_3 = {
"r2": {
"route_maps": {
- "RMAP_LOCAL_PREF": [{
- "action": "permit",
- "seq_id": "10",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_1_ipv4"
- }
+ "RMAP_LOCAL_PREF": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
+ "set": {"locPrf": 50},
},
- "set": {
- "locPrf": 50
- }
- },
- {
- "action": "permit",
- "seq_id": "20",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_1_ipv6"
- }
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
+ "set": {"locPrf": 50},
},
- "set": {
- "locPrf": 50
- }
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "locPrf"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- {"r7": input_dict["r7"]},
- attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "200.50.2.0/32"
- },
- {
- "network": "200.60.2.0/32"
- }
+ {"network": "200.50.2.0/32"},
+ {"network": "200.60.2.0/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
- {
- "network": "200:50:2::/128"
- },
- {
- "network": "200:60:2::/128"
- }
+ {"network": "200:50:2::/128"},
+ {"network": "200:60:2::/128"},
]
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Prefix list
input_dict_2 = {
"r1": {
"prefix_lists": {
"ipv4": {
- "pf_ls_1_ipv4": [{
- "seqid": 10,
- "network": "200.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_ls_1_ipv4": [
+ {
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
},
"ipv6": {
- "pf_ls_1_ipv6": [{
- "seqid": 10,
- "network": "200::/8",
- "le": "128",
- "action": "permit"
- }]
- }
+ "pf_ls_1_ipv6": [
+ {
+ "seqid": 10,
+ "network": "200::/8",
+ "le": "128",
+ "action": "permit",
+ }
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r1": {
"route_maps": {
- "RMAP_WEIGHT": [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_1_ipv4"
- }
+ "RMAP_WEIGHT": [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
+ "set": {"weight": 500},
},
- "set": {
- "weight": 500
- }
- },
- {
- "action": "permit",
- "seq_id": "10",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_1_ipv6"
- }
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
+ "set": {"weight": 500},
},
- "set": {
- "weight": 500
- }
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"dest_link": {
"r1": {
"route_maps": [
- {"name": "RMAP_WEIGHT",
- "direction": "in"}
+ {
+ "name": "RMAP_WEIGHT",
+ "direction": "in",
+ }
]
}
}
"dest_link": {
"r1": {
"route_maps": [
- {"name": "RMAP_WEIGHT",
- "direction": "in"}
+ {
+ "name": "RMAP_WEIGHT",
+ "direction": "in",
+ }
]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "weight"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- {"r7": input_dict["r7"]},
- attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Modify route map
input_dict_3 = {
"r1": {
"route_maps": {
- "RMAP_WEIGHT": [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_1_ipv4"
- }
+ "RMAP_WEIGHT": [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
+ "set": {"weight": 1000},
},
- "set": {
- "weight": 1000
- }
- },
- {
- "action": "permit",
- "seq_id": "10",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_1_ipv6"
- }
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
+ "set": {"weight": 1000},
},
- "set": {
- "weight": 1000
- }
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "weight"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- {"r7": input_dict["r7"]},
- attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "200.50.2.0/32"
- },
- {
- "network": "200.60.2.0/32"
- }
+ {"network": "200.50.2.0/32"},
+ {"network": "200.60.2.0/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
- {
- "network": "200:50:2::/128"
- },
- {
- "network": "200:60:2::/128"
- }
+ {"network": "200:50:2::/128"},
+ {"network": "200:60:2::/128"},
]
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
- }
+ },
}
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to create static routes
input_dict_3 = {
"r5": {
"static_routes": [
- {
- "network": "200.50.2.0/32",
- "next_hop": "Null0"
- },
- {
- "network": "200.60.2.0/32",
- "next_hop": "Null0"
- },
- {
- "network": "200:50:2::/128",
- "next_hop": "Null0"
- },
- {
- "network": "200:60:2::/128",
- "next_hop": "Null0"
- }
+ {"network": "200.50.2.0/32", "next_hop": "Null0"},
+ {"network": "200.60.2.0/32", "next_hop": "Null0"},
+ {"network": "200:50:2::/128", "next_hop": "Null0"},
+ {"network": "200:60:2::/128", "next_hop": "Null0"},
]
}
}
result = create_static_routes(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "origin"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- {"r4": input_dict["r4"]},
- attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, {"r4": input_dict["r4"]}, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "200.50.2.0/32"
- },
- {
- "network": "200.60.2.0/32"
- }
+ {"network": "200.50.2.0/32"},
+ {"network": "200.60.2.0/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
- {
- "network": "200:50:2::/128"
- },
- {
- "network": "200:60:2::/128"
- }
+ {"network": "200:50:2::/128"},
+ {"network": "200:60:2::/128"},
]
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"advertise_networks": [
- {
- "network": "200.50.2.0/32"
- },
- {
- "network": "200.60.2.0/32"
- }
+ {"network": "200.50.2.0/32"},
+ {"network": "200.60.2.0/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
- {
- "network": "200:50:2::/128"
- },
- {
- "network": "200:60:2::/128"
- }
+ {"network": "200:50:2::/128"},
+ {"network": "200:60:2::/128"},
]
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Prefix list
input_dict_2 = {
"r2": {
"prefix_lists": {
"ipv4": {
- "pf_ls_r2_ipv4": [{
- "seqid": 10,
- "network": "200.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_ls_r2_ipv4": [
+ {
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
},
"ipv6": {
- "pf_ls_r2_ipv6": [{
- "seqid": 20,
- "network": "200::/8",
- "le": "128",
- "action": "permit"
- }]
- }
+ "pf_ls_r2_ipv6": [
+ {
+ "seqid": 20,
+ "network": "200::/8",
+ "le": "128",
+ "action": "permit",
+ }
+ ]
+ },
}
},
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_ls_r3_ipv4": [{
- "seqid": 10,
- "network": "200.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_ls_r3_ipv4": [
+ {
+ "seqid": 10,
+ "network": "200.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
},
"ipv6": {
- "pf_ls_r3_ipv6": [{
- "seqid": 20,
- "network": "200::/8",
- "le": "128",
- "action": "permit"
- }]
- }
+ "pf_ls_r3_ipv6": [
+ {
+ "seqid": 20,
+ "network": "200::/8",
+ "le": "128",
+ "action": "permit",
+ }
+ ]
+ },
}
- }
+ },
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r2": {
"route_maps": {
- "RMAP_MED_R2": [{
- "action": "permit",
- "seq_id": "10",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_r2_ipv4"
- }
+ "RMAP_MED_R2": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_r2_ipv4"}},
+ "set": {"metric": 100},
},
- "set": {
- "metric": 100
- }
- },
- {
- "action": "permit",
- "seq_id": "20",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_r2_ipv6"
- }
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_r2_ipv6"}},
+ "set": {"metric": 100},
},
- "set": {
- "metric": 100
- }
- }]
+ ]
}
},
"r3": {
"route_maps": {
- "RMAP_MED_R3": [{
- "action": "permit",
- "seq_id": "10",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_r3_ipv4"
- }
+ "RMAP_MED_R3": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}},
+ "set": {"metric": 10},
},
- "set": {
- "metric": 10
- }
- },
- {
- "action": "permit",
- "seq_id": "20",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_r3_ipv6"
- }
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}},
+ "set": {"metric": 10},
},
- "set": {
- "metric": 10
- }
- }]
+ ]
}
- }
+ },
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"dest_link": {
"r2-link1": {
"route_maps": [
- {"name": "RMAP_MED_R2",
- "direction": "in"}
+ {
+ "name": "RMAP_MED_R2",
+ "direction": "in",
+ }
]
}
}
},
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}},
}
}
},
"dest_link": {
"r2-link1": {
"route_maps": [
- {"name": "RMAP_MED_R2",
- "direction": "in"}
+ {
+ "name": "RMAP_MED_R2",
+ "direction": "in",
+ }
]
}
}
},
- "r1": {
- "dest_link": {
- "r2": {"next_hop_self": True}
- }
- }
+ "r1": {"dest_link": {"r2": {"next_hop_self": True}}},
}
}
- }
+ },
}
}
},
"ipv4": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- },
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}},
"r5": {
"dest_link": {
"r3": {
"route_maps": [
- {"name": "RMAP_MED_R3",
- "direction": "in"}
+ {
+ "name": "RMAP_MED_R3",
+ "direction": "in",
+ }
]
}
}
- }
+ },
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
- "r1": {
- "dest_link": {
- "r3": {"next_hop_self": True}
- }
- },
+ "r1": {"dest_link": {"r3": {"next_hop_self": True}}},
"r5": {
"dest_link": {
"r3": {
"route_maps": [
- {"name": "RMAP_MED_R3",
- "direction": "in"}
+ {
+ "name": "RMAP_MED_R3",
+ "direction": "in",
+ }
]
}
}
- }
+ },
}
}
- }
+ },
}
}
- }
+ },
}
- result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = create_router_bgp(tgen, topo, input_dict_4)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "metric"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- input_dict, attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, input_dict, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Modify route-map to set med value
input_dict_3 = {
"r3": {
"route_maps": {
- "RMAP_MED_R3": [{
- "action": "permit",
- "seq_id": "10",
- "match": {
- "ipv4": {
- "prefix_lists": "pf_ls_r3_ipv4"
- }
+ "RMAP_MED_R3": [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}},
+ "set": {"metric": 200},
},
- "set": {
- "metric": 200
- }
- },
- {
- "action": "permit",
- "seq_id": "20",
- "match": {
- "ipv6": {
- "prefix_lists": "pf_ls_r3_ipv6"
- }
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}},
+ "set": {"metric": 200},
},
- "set": {
- "metric": 200
- }
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "metric"
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_bgp_attribute(tgen, addr_type, dut,
- input_dict, attribute)
+ result = verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, dut, input_dict, attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
{
"network": "200.50.2.0/32",
"admin_distance": 80,
- "next_hop": "10.0.0.14"
+ "next_hop": "10.0.0.14",
},
{
"network": "200.50.2.0/32",
"admin_distance": 60,
- "next_hop": "10.0.0.18"
+ "next_hop": "10.0.0.18",
},
{
"network": "200:50:2::/128",
"admin_distance": 80,
- "next_hop": "fd00::1"
+ "next_hop": "fd00::1",
},
{
"network": "200:50:2::/128",
"admin_distance": 60,
- "next_hop": "fd00::1"
- }
+ "next_hop": "fd00::1",
+ },
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
input_dict_2 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "admin_distance"
input_dict = {
- "ipv4": {
- "r2": {
- "static_routes": [{
- "network": "200.50.2.0/32",
- "admin_distance": 80,
- "next_hop": "10.0.0.14"
- },
- {
- "network": "200.50.2.0/32",
- "admin_distance": 60,
- "next_hop": "10.0.0.18"
- }
- ]
- }
- },
- "ipv6": {
- "r2": {
- "static_routes": [{
- "network": "200:50:2::/128",
- "admin_distance": 80,
- "next_hop": "fd00::1"
- },
- {
- "network": "200:50:2::/128",
- "admin_distance": 60,
- "next_hop": "fd00::1"
- }]
+ "ipv4": {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "200.50.2.0/32",
+ "admin_distance": 80,
+ "next_hop": "10.0.0.14",
+ },
+ {
+ "network": "200.50.2.0/32",
+ "admin_distance": 60,
+ "next_hop": "10.0.0.18",
+ },
+ ]
}
- }
+ },
+ "ipv6": {
+ "r2": {
+ "static_routes": [
+ {
+ "network": "200:50:2::/128",
+ "admin_distance": 80,
+ "next_hop": "fd00::1",
+ },
+ {
+ "network": "200:50:2::/128",
+ "admin_distance": 60,
+ "next_hop": "fd00::1",
+ },
+ ]
+ }
+ },
}
for addr_type in ADDR_TYPES:
- result = verify_best_path_as_per_admin_distance(tgen, addr_type, dut,
- input_dict[addr_type],
- attribute)
+ result = verify_best_path_as_per_admin_distance(
+ tgen, addr_type, dut, input_dict[addr_type], attribute
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, reset_config_on_routers,
- verify_rib, create_static_routes,
- create_prefix_lists, verify_prefix_lists
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ verify_rib,
+ create_static_routes,
+ create_prefix_lists,
+ verify_prefix_lists,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify
-)
+from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology creation
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
- logger.info("="*40)
+ logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# Api call verify whether BGP is converged
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
- assert BGP_CONVERGENCE is True, ("setup_module :Failed \n Error:"
- " {}".format(BGP_CONVERGENCE))
+ assert BGP_CONVERGENCE is True, "setup_module :Failed \n Error:" " {}".format(
+ BGP_CONVERGENCE
+ )
logger.info("Running setup_module() done")
# Stop toplogy and Remove tmp files
tgen.stop_topology()
- logger.info("Testsuite end time: {}".
- format(time.asctime(time.localtime(time.time()))))
- logger.info("="*40)
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
#####################################################
#
# Create Static routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "20.0.20.1/32",
- "no_of_ip": 1,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1": [{"seqid": 10, "network": "any", "action": "permit"}]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure bgp neighbor with prefix list
input_dict_3 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"dest_link": {
"r3": {
"prefix_lists": [
- {
- "name": "pf_list_1",
- "direction": "in"
- }
+ {"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
# Create Static routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 1,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Static routes
input_dict_1 = {
"r1": {
- "static_routes": [{
- "network": "20.0.20.1/32",
- "no_of_ip": 1,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "20.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
input_dict_5 = {
"r3": {
- "static_routes": [{
- "network": "10.0.0.2/30",
- "no_of_ip": 1,
- "next_hop": "10.0.0.9"
- }]
+ "static_routes": [
+ {"network": "10.0.0.2/30", "no_of_ip": 1, "next_hop": "10.0.0.9"}
+ ]
}
}
result = create_static_routes(tgen, input_dict_5)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
"r1": {
"prefix_lists": {
"ipv4": {
- "pf_list_1": [{
- "seqid": 10,
- "network": "20.0.20.1/32",
- "action": "permit"
- }]
+ "pf_list_1": [
+ {"seqid": 10, "network": "20.0.20.1/32", "action": "permit"}
+ ]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
# Configure bgp neighbor with prefix list
"prefix_lists": [
{
"name": "pf_list_1",
- "direction": "out"
+ "direction": "out",
}
]
}
},
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
- ]
+ {"redist_type": "connected"},
+ ],
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
+
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
# Create Static Routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 1,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 1, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
"prefix_lists": {
"ipv4": {
"pf_list_1": [
- {
- "seqid": "10",
- "network": "10.0.20.1/32",
- "action": "deny"
- },
- {
- "seqid": "11",
- "network": "any",
- "action": "permit"
- }
+ {"seqid": "10", "network": "10.0.20.1/32", "action": "deny"},
+ {"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure bgp neighbor with prefix list
input_dict_3 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"dest_link": {
"r3": {
"prefix_lists": [
- {
- "name": "pf_list_1",
- "direction": "in"
- }
+ {"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
- }
+ },
}
# Configure prefix list to bgp neighbor
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
"prefix_lists": {
"ipv4": {
"pf_list_1": [
- {
- "seqid": "10",
- "network": "10.0.20.1/32",
- "action": "deny"
- }
+ {"seqid": "10", "network": "10.0.20.1/32", "action": "deny"}
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_prefix_lists(tgen, input_dict_2)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Delete prefix list
input_dict_2 = {
"seqid": "10",
"network": "10.0.20.1/32",
"action": "deny",
- "delete": True
+ "delete": True,
}
]
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
# Create Static Routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Static Routes
input_dict_1 = {
"r2": {
- "static_routes": [{
- "network": "20.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.1"
- }]
+ "static_routes": [
+ {"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"}
+ ]
}
}
result = create_static_routes(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
- "action": "deny"
+ "action": "deny",
},
- {
- "seqid": "11",
- "network": "any",
- "action": "permit"
- }
+ {"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_4 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"prefix_lists": [
{
"name": "pf_list_1",
- "direction": "out"
+ "direction": "out",
}
]
}
}
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
# Create Static Routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1": [{
- "seqid": "10",
- "network": "10.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_3 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"unicast": {
"neighbor": {
"r1": {
- "dest_link":{
+ "dest_link": {
"r3": {
"prefix_lists": [
- {
- "name": "pf_list_1",
- "direction": "in"
- }
+ {"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Modify prefix list
input_dict_1 = {
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
- "action": "deny"
+ "action": "deny",
},
- {
- "seqid": "11",
- "network": "any",
- "action": "permit"
- }
+ {"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
# Create Static Routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
- "action": "deny"
+ "action": "deny",
},
- {
- "seqid": "11",
- "network": "any",
- "action": "permit"
- }
+ {"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
}
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_2 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"dest_link": {
"r3": {
"prefix_lists": [
- {
- "name": "pf_list_1",
- "direction": "in"
- }
+ {"name": "pf_list_1", "direction": "in"}
]
}
}
}
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1": [{
- "seqid": "10",
- "network": "10.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
}
}
}
-
}
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
# Create Static Routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1": [{
- "seqid": "10",
- "network": "10.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
}
}
}
-
}
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_2 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"prefix_lists": [
{
"name": "pf_list_1",
- "direction": "out"
+ "direction": "out",
}
]
}
}
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Modify ip prefix list
input_dict_1 = {
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
- "action": "deny"
+ "action": "deny",
},
- {
- "seqid": "11",
- "network": "any",
- "action": "permit"
- }
+ {"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
-
}
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
# Create Static Routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
"seqid": "10",
"network": "10.0.0.0/8",
"le": "32",
- "action": "deny"
+ "action": "deny",
},
- {
- "seqid": "11",
- "network": "any",
- "action": "permit"
- }
+ {"seqid": "11", "network": "any", "action": "permit"},
]
}
}
}
-
}
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_2 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"unicast": {
"neighbor": {
"r4": {
- "dest_link":{
+ "dest_link": {
"r3": {
"prefix_lists": [
{
"name": "pf_list_1",
- "direction": "out"
+ "direction": "out",
}
]
}
}
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
- result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
# Modify ip prefix list
input_dict_1 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1": [{
- "seqid": "10",
- "network": "10.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
}
}
}
-
}
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
# Create Static Routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": "10.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.2"
- }]
+ "static_routes": [
+ {"network": "10.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.2"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Static Routes
input_dict_1 = {
"r2": {
- "static_routes": [{
- "network": "20.0.20.1/32",
- "no_of_ip": 9,
- "next_hop": "10.0.0.1"
- }]
+ "static_routes": [
+ {"network": "20.0.20.1/32", "no_of_ip": 9, "next_hop": "10.0.0.1"}
+ ]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
# Create ip prefix list
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1": [{
- "seqid": "10",
- "network": "10.0.0.0/8",
- "le": "32",
- "action": "permit"
- }]
+ "pf_list_1": [
+ {
+ "seqid": "10",
+ "network": "10.0.0.0/8",
+ "le": "32",
+ "action": "permit",
+ }
+ ]
}
}
}
-
}
result = create_prefix_lists(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure prefix list to bgp neighbor
input_dict_4 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
}
"prefix_lists": [
{
"name": "pf_list_1",
- "direction": "out"
+ "direction": "out",
}
]
}
}
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
result = verify_rib(tgen, "ipv4", dut, input_dict, protocol=protocol)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
- result = verify_rib(tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: Routes still" \
- " present in RIB".format(tc_name)
+ result = verify_rib(
+ tgen, "ipv4", dut, input_dict_1, protocol=protocol, expected=False
+ )
+ assert (
+ result is not True
+ ), "Testcase {} : Failed \n Error: Routes still" " present in RIB".format(tc_name)
write_test_footer(tc_name)
# Required to instantiate the topology builder class.
from lib.topojson import *
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, verify_bgp_community,
- verify_rib, delete_route_maps, create_bgp_community_lists,
- interface_status, create_route_maps, create_prefix_lists,
- verify_route_maps, check_address_types,
- shutdown_bringup_interface, verify_prefix_lists, reset_config_on_routers)
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ verify_bgp_community,
+ verify_rib,
+ delete_route_maps,
+ create_bgp_community_lists,
+ interface_status,
+ create_route_maps,
+ create_prefix_lists,
+ verify_route_maps,
+ check_address_types,
+ shutdown_bringup_interface,
+ verify_prefix_lists,
+ reset_config_on_routers,
+)
from lib.topolog import logger
from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify, verify_bgp_attributes)
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp_and_verify,
+ verify_bgp_attributes,
+)
from lib.topojson import build_topo_from_json, build_config_from_json
# Global variables
bgp_convergence = False
-NETWORK = {
- "ipv4": ["11.0.20.1/32", "20.0.20.1/32"],
- "ipv6": ["1::1/128", "2::1/128"]
-}
+NETWORK = {"ipv4": ["11.0.20.1/32", "20.0.20.1/32"], "ipv6": ["1::1/128", "2::1/128"]}
MASK = {"ipv4": "32", "ipv6": "128"}
-NEXT_HOP = {
- "ipv4": "10.0.0.2",
- "ipv6": "fd00::2"
-}
+NEXT_HOP = {"ipv4": "10.0.0.2", "ipv6": "fd00::2"}
ADDR_TYPES = check_address_types()
# Api call verify whether BGP is converged
bgp_convergence = verify_bgp_convergence(tgen, topo)
- assert bgp_convergence is True, ("setup_module :Failed \n Error:"
- " {}".format(bgp_convergence))
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
logger.info("Running setup_module() done")
# Stop toplogy and Remove tmp files
tgen.stop_topology()
- logger.info("Testsuite end time: {}".
- format(time.asctime(time.localtime(time.time()))))
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
logger.info("=" * 40)
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
- }
+ },
+ },
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_2 = {
"r4": {
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_5 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "action": "permit",
- "network": NETWORK["ipv4"][0]
- }],
- "pf_list_2_ipv4": [{
- "seqid": 10,
- "action": "permit",
- "network": NETWORK["ipv4"][1]
- }]
+ "pf_list_1_ipv4": [
+ {
+ "seqid": 10,
+ "action": "permit",
+ "network": NETWORK["ipv4"][0],
+ }
+ ],
+ "pf_list_2_ipv4": [
+ {
+ "seqid": 10,
+ "action": "permit",
+ "network": NETWORK["ipv4"][1],
+ }
+ ],
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "action": "permit",
- "network": NETWORK["ipv6"][0]
- }],
- "pf_list_2_ipv6": [{
- "seqid": 100,
- "action": "permit",
- "network": NETWORK["ipv6"][1]
- }]
- }
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 100,
+ "action": "permit",
+ "network": NETWORK["ipv6"][0],
+ }
+ ],
+ "pf_list_2_ipv6": [
+ {
+ "seqid": 100,
+ "action": "permit",
+ "network": NETWORK["ipv6"][1],
+ }
+ ],
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
for addr_type in ADDR_TYPES:
input_dict_6 = {
- "r3": {
- "route_maps": {
- "rmap_match_tag_1_{}".format(addr_type): [{
- "action": "deny",
- "match": {
- addr_type: {
- "prefix_lists":
- "pf_list_1_{}".format(addr_type)
+ "r3": {
+ "route_maps": {
+ "rmap_match_tag_1_{}".format(addr_type): [
+ {
+ "action": "deny",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
}
- }
- }],
- "rmap_match_tag_2_{}".format(addr_type): [{
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists":
- "pf_list_2_{}".format(addr_type)
+ ],
+ "rmap_match_tag_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_2_{}".format(addr_type)
+ }
+ },
}
- }
- }]
+ ],
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_7 = {
"dest_link": {
"r3": {
"route_maps": [
- {"name":
- "rmap_match_tag_1_ipv4",
- "direction": "in"},
- {"name":
- "rmap_match_tag_1_ipv4",
- "direction": "out"}
+ {
+ "name": "rmap_match_tag_1_ipv4",
+ "direction": "in",
+ },
+ {
+ "name": "rmap_match_tag_1_ipv4",
+ "direction": "out",
+ },
]
}
}
"dest_link": {
"r3": {
"route_maps": [
- {"name":
- "rmap_match_tag_1_ipv6",
- "direction": "in"},
- {"name":
- "rmap_match_tag_1_ipv6",
- "direction": "out"}
+ {
+ "name": "rmap_match_tag_1_ipv6",
+ "direction": "in",
+ },
+ {
+ "name": "rmap_match_tag_1_ipv6",
+ "direction": "out",
+ },
]
}
}
}
}
}
- }
+ },
}
}
}
result = create_router_bgp(tgen, topo, input_dict_7)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for adt in ADDR_TYPES:
# Verifying RIB routes
{
"network": [NETWORK[adt][1]],
"no_of_ip": 9,
- "next_hop": NEXT_HOP[adt]
+ "next_hop": NEXT_HOP[adt],
}
]
}
}
- result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol,
- expected=False)
+ result = verify_rib(
+ tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are not present in rib \n Error: {}".format(
- tc_name, result)
+ "routes are not present in rib \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
{
"network": [NETWORK[adt][0]],
"no_of_ip": 9,
- "next_hop": NEXT_HOP[adt]
+ "next_hop": NEXT_HOP[adt],
}
]
}
}
- result = verify_rib(tgen, adt, dut, input_dict, protocol=protocol,
- expected=False)
+ result = verify_rib(
+ tgen, adt, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n "
- "routes are not present in rib \n Error: {}".format(
- tc_name, result)
+ "routes are not present in rib \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
-@pytest.mark.parametrize("prefix_action, rmap_action", [("permit", "permit"),
- ("permit", "deny"), ("deny", "permit"),
- ("deny", "deny")])
+@pytest.mark.parametrize(
+ "prefix_action, rmap_action",
+ [("permit", "permit"), ("permit", "deny"), ("deny", "permit"), ("deny", "deny")],
+)
def test_route_map_with_action_values_combination_of_prefix_action_p0(
- request, prefix_action, rmap_action):
+ request, prefix_action, rmap_action
+):
"""
TC_36:
Test permit/deny statements operation in route-maps with a permutation and
{
"network": NETWORK[adt][0],
"no_of_ip": 9,
- "next_hop": NEXT_HOP[adt]
+ "next_hop": NEXT_HOP[adt],
}
]
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
- }
+ },
+ },
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Permit in perfix list and route-map
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": prefix_action
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": prefix_action}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": prefix_action
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": prefix_action}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": rmap_action,
- "match": {
- addr_type: {
- "prefix_lists":
- "pf_list_1_{}".format(addr_type)
- }
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": rmap_action,
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
}
- }
- ]
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_7 = {
"dest_link": {
"r3": {
"route_maps": [
- {"name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"}
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
]
}
}
"dest_link": {
"r3": {
"route_maps": [
- {"name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"}
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
]
}
}
}
}
}
- }
+ },
}
}
}
result = create_router_bgp(tgen, topo, input_dict_7)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
dut = "r3"
protocol = "bgp"
}
}
- #tgen.mininet_cli()
- result = verify_rib(tgen, adt, dut, input_dict_2, protocol=protocol,
- expected=False)
+ # tgen.mininet_cli()
+ result = verify_rib(
+ tgen, adt, dut, input_dict_2, protocol=protocol, expected=False
+ )
if "deny" in [prefix_action, rmap_action]:
assert result is not True, "Testcase {} : Failed \n "
- "Routes are still present \n Error: {}".\
- format(tc_name, result)
+ "Routes are still present \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
else:
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
def test_route_map_multiple_seq_different_match_set_clause_p0(request):
# Create Static routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": NETWORK[adt][0],
- "no_of_ip": 1,
- "next_hop": NEXT_HOP[adt]
- }]
+ "static_routes": [
+ {
+ "network": NETWORK[adt][0],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP[adt],
+ }
+ ]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [
- {
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists":
- "pf_list_2_{}".format(addr_type)
- }
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_2_{}".format(addr_type)
+ }
+ },
+ "set": {"path": {"as_num": 500}},
},
- "set": {
- "path": {
- "as_num": 500
- }
- }
- },
- {
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists":
- "pf_list_2_{}".format(addr_type)
- }
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_2_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150,},
},
- "set": {
- "locPrf": 150,
- }
- },
- {
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists":
- "pf_list_1_{}".format(addr_type)
- }
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 50},
},
- "set": {
- "metric": 50
- }
- }
- ]
- }
- }
+ ]
+ }
+ }
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
},
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for adt in ADDR_TYPES:
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_list1": [{
- "set": {
- "metric": 50,
- }
- }],
- }
- }
+ "r3": {"route_maps": {"rmap_match_pf_list1": [{"set": {"metric": 50,}}],}}
}
static_routes = [NETWORK[adt][0]]
time.sleep(2)
- result = verify_bgp_attributes(tgen, adt, dut, static_routes,
- "rmap_match_pf_list1", input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_attributes(
+ tgen, adt, dut, static_routes, "rmap_match_pf_list1", input_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
dut = "r4"
- result = verify_bgp_attributes(tgen, adt, dut, static_routes,
- "rmap_match_pf_list1", input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_attributes(
+ tgen, adt, dut, static_routes, "rmap_match_pf_list1", input_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Testcase " + tc_name + " :Passed \n")
# Create Static routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": NETWORK[adt][0],
- "no_of_ip": 1,
- "next_hop": NEXT_HOP[adt]
- }]
+ "static_routes": [
+ {
+ "network": NETWORK[adt][0],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP[adt],
+ }
+ ]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
input_dict_3 = {
"rmap_match_pf_1": [
{
"action": "permit",
- "set": {
- "metric": 50,
- "locPrf": 150,
- "weight": 4000
- }
+ "set": {"metric": 50, "locPrf": 150, "weight": 4000},
}
]
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name": "rmap_match_pf_1",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name": "rmap_match_pf_1",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
},
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name": "rmap_match_pf_1",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name": "rmap_match_pf_1",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
time.sleep(2)
for adt in ADDR_TYPES:
input_dict_4 = {
"r3": {
"route_maps": {
- "rmap_match_pf_1": [
- {
- "action": "permit",
- "set": {
- "metric": 50,
- }
- }
- ]
+ "rmap_match_pf_1": [{"action": "permit", "set": {"metric": 50,}}]
}
}
}
# Verifying RIB routes
static_routes = [NETWORK[adt][0]]
- result = verify_bgp_attributes(tgen, adt, "r3", static_routes,
- "rmap_match_pf_1", input_dict_3)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
-
- result = verify_bgp_attributes(tgen, adt, "r4", static_routes,
- "rmap_match_pf_1", input_dict_4)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_attributes(
+ tgen, adt, "r3", static_routes, "rmap_match_pf_1", input_dict_3
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ result = verify_bgp_attributes(
+ tgen, adt, "r4", static_routes, "rmap_match_pf_1", input_dict_4
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
logger.info("Testcase " + tc_name + " :Passed \n")
# Create Static routes
input_dict = {
"r1": {
- "static_routes": [{
- "network": NETWORK[adt][0],
- "no_of_ip": 1,
- "next_hop": NEXT_HOP[adt]
- }]
+ "static_routes": [
+ {
+ "network": NETWORK[adt][0],
+ "no_of_ip": 1,
+ "next_hop": NEXT_HOP[adt],
+ }
+ ]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create ip prefix list
input_dict_2 = {
"r1": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
"r1": {
"route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "set": {
- "metric": 50,
- "locPrf": 150,
- }
- }
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {"action": "permit", "set": {"metric": 50, "locPrf": 150,}}
]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
"r3": {
"dest_link": {
"r1": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "out",
+ }
+ ]
}
}
}
"r3": {
"dest_link": {
"r1": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "out",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create ip prefix list
input_dict_5 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
for addr_type in ADDR_TYPES:
input_dict_6 = {
"r3": {
"route_maps": {
- "rmap_match_pf_2_{}".format(addr_type): [{
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
"action": "permit",
"match": {
addr_type: {
- "prefix_lists":
- "pf_list_1_{}".format(addr_type)
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
}
- }
+ },
}
]
}
}
result = create_route_maps(tgen, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_7 = {
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv4",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv4",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv4",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv4",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
},
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_7)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for adt in ADDR_TYPES:
# Verifying RIB routes
static_routes = [NETWORK[adt][0]]
- result = verify_bgp_attributes(tgen, adt, "r3", static_routes,
- "rmap_match_pf_1", input_dict_3)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_attributes(
+ tgen, adt, "r3", static_routes, "rmap_match_pf_1", input_dict_3
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
-
-
# Required to instantiate the topology builder class.
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, create_static_routes,
- verify_rib, delete_route_maps, create_bgp_community_lists,
- interface_status, create_route_maps, create_prefix_lists,
- verify_route_maps, check_address_types, verify_bgp_community,
- shutdown_bringup_interface, verify_prefix_lists, reset_config_on_routers,
- verify_create_community_list)
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ create_static_routes,
+ verify_rib,
+ delete_route_maps,
+ create_bgp_community_lists,
+ interface_status,
+ create_route_maps,
+ create_prefix_lists,
+ verify_route_maps,
+ check_address_types,
+ verify_bgp_community,
+ shutdown_bringup_interface,
+ verify_prefix_lists,
+ reset_config_on_routers,
+ verify_create_community_list,
+)
from lib.topolog import logger
from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify, verify_bgp_attributes)
+ verify_bgp_convergence,
+ create_router_bgp,
+ clear_bgp_and_verify,
+ verify_bgp_attributes,
+)
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology and configuration creation
# Global variables
# Global variables
bgp_convergence = False
-NETWORK = {
- "ipv4": ["11.0.20.1/32", "11.0.20.2/32"],
- "ipv6": ["2::1/128", "2::2/128"]
-}
+NETWORK = {"ipv4": ["11.0.20.1/32", "11.0.20.2/32"], "ipv6": ["2::1/128", "2::2/128"]}
bgp_convergence = False
BGP_CONVERGENCE = False
"""
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
- logger.info("="*40)
+ logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# Api call verify whether BGP is converged
bgp_convergence = verify_bgp_convergence(tgen, topo)
- assert bgp_convergence is True, ("setup_module :Failed \n Error:"
- " {}".format(bgp_convergence))
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
logger.info("Running setup_module() done")
# Stop toplogy and Remove tmp files
tgen.stop_topology()
- logger.info("Testsuite end time: {}".format(
- time.asctime(time.localtime(time.time()))))
- logger.info("="*40)
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
#####################################################
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit",
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit",}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 10,
- "network": "any",
- "action": "permit",
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 10, "network": "any", "action": "permit",}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
- # Create route map
+ # Create route map
input_dict_3 = {
"r3": {
"route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_" + addr_type
- }
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {"prefix_lists": "pf_list_1_" + addr_type}
+ },
+ "set": {"locPrf": 150, "weight": 100},
},
- "set": {
- "locPrf": 150,
- "weight": 100
- }
- },
],
- "rmap_match_pf_2_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_" + addr_type
- }
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {"prefix_lists": "pf_list_1_" + addr_type}
+ },
+ "set": {"metric": 50},
},
- "set": {
- "metric": 50
- }
- },
- ]
+ ],
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv4",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+ }
+ }
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
# dual stack changes
for addr_type in ADDR_TYPES:
- result4 = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result4)
+ tc_name, result4
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
# dual stack changes
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result4 = verify_bgp_attributes(tgen, addr_type, dut, routes[
- addr_type],rmap_name, input_dict_3)
+ result4 = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result4)
+ tc_name, result4
+ )
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
# dual stack changes
for addr_type in ADDR_TYPES:
- result4 = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result4)
+ tc_name, result4
+ )
# Verifying BGP set attributes
dut = "r4"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
# dual stack changes
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_2_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
# Uncomment next line for debugging
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit",
- }],
- "pf_list_2_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit",}
+ ],
+ "pf_list_2_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ],
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 10,
- "network": "any",
- "action": "permit",
- }],
- "pf_list_2_ipv6": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 10, "network": "any", "action": "permit",}
+ ],
+ "pf_list_2_ipv6": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ],
+ },
}
}
- }
+ }
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150,},
}
- },
- "set": {
- "locPrf": 150,
- }
- }],
- "rmap_match_pf_2_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
+ ],
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 50},
}
- },
- "set": {
- "metric": 50
- }
- }]
+ ],
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv4",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
# dual stack changes
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result4 = verify_bgp_attributes(tgen, addr_type, dut, routes[
- addr_type],rmap_name, input_dict_3)
+ result4 = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result4)
+ tc_name, result4
+ )
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
# dual stack changes
for addr_type in ADDR_TYPES:
- result4 = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result4 = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result4 is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result4)
+ tc_name, result4
+ )
# Verifying BGP set attributes
dut = "r4"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_2_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[
- addr_type],rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Modify set/match clause of in-used route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 1000,
- }
- }],
- "rmap_match_pf_2_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "metric": 2000
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 1000,},
+ }
+ ],
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 2000},
+ }
+ ],
}
- }]
- }
- }
+ }
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r4"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_2_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_tag_1_{}".format(addr_type): [{
- "action": "deny",
- "match": {
- addr_type: {
- "tag": "4001"
- }
- }
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_match_tag_1_{}".format(addr_type): [
+ {"action": "deny", "match": {addr_type: {"tag": "4001"}}}
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Delete route maps
for addr_type in ADDR_TYPES:
- input_dict = {
- "r3": {
- "route_maps": ["rmap_match_tag_1_{}".format(addr_type)]
- }
- }
+ input_dict = {"r3": {"route_maps": ["rmap_match_tag_1_{}".format(addr_type)]}}
result = delete_route_maps(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
result = verify_route_maps(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
# Uncomment next line for debugging
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit",
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit",}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit",
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit",}
+ ]
+ },
}
}
- }
+ }
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150, "weight": 100},
}
- },
- "set": {
- "locPrf": 150,
- "weight": 100
- }
- }],
- "rmap_match_pf_2_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
+ ],
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 50},
}
- },
- "set": {
- "metric": 50
- }
- }]
+ ],
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv4",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r4"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_2_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Modify ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "deny"
- }]
- },
- "ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "deny"
- }]
- }
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "deny"}
+ ]
+ },
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "deny"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
sleep(5)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(
- tc_name, result)
+ "routes are not present \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
"Expected behaviour: routes are not present \n "
- "Error: {}".format(
- tc_name, result)
+ "Error: {}".format(tc_name, result)
write_test_footer(tc_name)
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
- }
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150,},
}
- },
- "set": {
- "locPrf": 150,
- }
- }],
- "rmap_match_pf_2_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "metric": 50
- }
- }]
+ ],
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 50},
+ }
+ ],
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
for addr_type in ADDR_TYPES:
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv4",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r4"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_2_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Remove/Delete prefix list
input_dict_3 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit",
- "delete": True
- }]
- },
+ "pf_list_1_ipv4": [
+ {
+ "seqid": 10,
+ "network": "any",
+ "action": "permit",
+ "delete": True,
+ }
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit",
- "delete": True
- }]
+ "pf_list_1_ipv6": [
+ {
+ "seqid": 100,
+ "network": "any",
+ "action": "permit",
+ "delete": True,
+ }
+ ]
+ },
}
}
- }
}
result = create_prefix_lists(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_prefix_lists(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(
- tc_name, result)
+ "routes are not present \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".\
- format(tc_name, result)
+ "routes are not present \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_5 = {
- "r1": {
- "route_maps": {
- "rm_r1_out_{}".format(addr_type): [{
- "action": "permit",
- "set": {
- "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"}
- }
- }]
+ "r1": {
+ "route_maps": {
+ "rm_r1_out_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "set": {
+ "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"}
+ },
+ }
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_6 = {
- "r1": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r3": {
- "dest_link": {
- "r1": {
- "route_maps": [{
- "name": "rm_r1_out_ipv4",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r3": {
- "dest_link": {
- "r1": {
- "route_maps": [{
- "name": "rm_r1_out_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "rm_r1_out_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "rm_r1_out_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_6)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
# Create standard large commumity-list
- input_dict_1 = {
- "r3": {
- "bgp_community_lists": [
- {
- "community_type": "standard",
- "action": "permit",
- "name": "rmap_lcomm_{}".format(addr_type),
- "value": "1:1:1 1:2:3 2:1:1 2:2:2",
- "large": True
- }
- ]
- }
+ input_dict_1 = {
+ "r3": {
+ "bgp_community_lists": [
+ {
+ "community_type": "standard",
+ "action": "permit",
+ "name": "rmap_lcomm_{}".format(addr_type),
+ "value": "1:1:1 1:2:3 2:1:1 2:2:2",
+ "large": True,
+ }
+ ]
}
- result = create_bgp_community_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ }
+ result = create_bgp_community_lists(tgen, input_dict_1)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
# Verify BGP large community is created
result = verify_create_community_list(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
- # Create route map
+ # Create route map
input_dict_2 = {
+ "r3": {
+ "route_maps": {
+ "rm_r3_in_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "large-community-list": {
+ "id": "rmap_lcomm_" + addr_type
+ }
+ }
+ },
+ }
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_2)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ # Configure neighbor for route map
+ input_dict_3 = {
"r3": {
- "route_maps": {
- "rm_r3_in_{}".format(addr_type): [{
- "action": "permit",
- "match": {
- addr_type : {
- "large-community-list": {"id": "rmap_lcomm_"+
- addr_type}
- }
- }
- }]
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rm_r3_in_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rm_r3_in_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
}
}
- }
- result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
- # Configure neighbor for route map
- input_dict_3 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name": "rm_r3_in_ipv4",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name": "rm_r3_in_ipv6",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ }
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
sleep(5)
# Verifying RIB routes
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verify large-community-list
dut = "r3"
networks = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
- }
- input_dict_4 = {
- "largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2"
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
+ input_dict_4 = {"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2"}
for addr_type in ADDR_TYPES:
- result = verify_bgp_community(tgen, addr_type, dut, networks[
- addr_type],input_dict_4)
+ result = verify_bgp_community(
+ tgen, addr_type, dut, networks[addr_type], input_dict_4
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
# Uncomment next line for debugging
# Api call to advertise networks
input_dict_nw1 = {
- "r1": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "advertise_networks": [
- {"network": "10.0.30.1/32"}
- ]
- }
- },
- "ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
- }
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"advertise_networks": [{"network": "10.0.30.1/32"}]}
+ },
+ "ipv6": {
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
}
+ }
result = create_router_bgp(tgen, topo, input_dict_nw1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to advertise networks
input_dict_nw2 = {
- "r1": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "advertise_networks": [
- {"network": "20.0.30.1/32"}
- ]
- }
- },
- "ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "2::1/128"}
- ]
- }
- }
- }
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {"advertise_networks": [{"network": "20.0.30.1/32"}]}
+ },
+ "ipv6": {
+ "unicast": {"advertise_networks": [{"network": "2::1/128"}]}
+ },
}
}
}
+ }
result = create_router_bgp(tgen, topo, input_dict_nw2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_2_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_2_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_2_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_2_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- input_dict_3_addr_type ={}
+ input_dict_3_addr_type = {}
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150},
}
- },
- "set": {
- "locPrf": 150
- }
- }]
+ ]
+ }
}
}
- }
input_dict_3_addr_type[addr_type] = input_dict_3
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 200
- }
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 200},
+ }
+ ]
+ }
}
}
- }
input_dict_3_addr_type[addr_type] = input_dict_3
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_6 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_6)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
- routes = {
- "ipv4": ["10.0.30.1/32"],
- "ipv6": ["1::1/128"]
- }
+ routes = {"ipv4": ["10.0.30.1/32"], "ipv6": ["1::1/128"]}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3_addr_type[addr_type])
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_bgp_attributes(
+ tgen,
+ addr_type,
+ dut,
+ routes[addr_type],
+ rmap_name,
+ input_dict_3_addr_type[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
# Verifying BGP set attributes
- routes = {
- "ipv4": ["20.0.30.1/32"],
- "ipv6": ["2::1/128"]
- }
+ routes = {"ipv4": ["20.0.30.1/32"], "ipv6": ["2::1/128"]}
for addr_type in ADDR_TYPES:
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict_5 = {
"r1": {
"route_maps": {
- "rm_r1_out_{}".format(addr_type): [{
- "action": "permit",
- "set": {
- "large_community": {
- "num": "1:1:1 1:2:3 2:1:1 2:2:2"}
+ "rm_r1_out_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "set": {
+ "large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"}
+ },
}
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
for addr_type in ADDR_TYPES:
input_dict_6 = {
- "r1": {
- "bgp": {
- "address_family": {
- addr_type: {
- "unicast": {
- "neighbor": {
- "r3": {
- "dest_link": {
- "r1": {
- "route_maps": [{
- "name":
- "rm_r1_out_{}".format(addr_type),
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r1": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "rm_r1_out_{}".format(
+ addr_type
+ ),
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
}
result = create_router_bgp(tgen, topo, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create ip prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
- # Create standard large commumity-list
+ # Create standard large commumity-list
input_dict_1 = {
"r3": {
"bgp_community_lists": [
"action": "permit",
"name": "rmap_lcomm_{}".format(addr_type),
"value": "1:1:1 1:2:3 2:1:1 2:2:2",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verify BGP large community is created
result = verify_create_community_list(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
"r3": {
"route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 150,
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150,},
}
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
for addr_type in ADDR_TYPES:
- # Create route map
+ # Create route map
input_dict_3 = {
"r3": {
"route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type : {
- "large_community_list": {"id": "rmap_lcomm_"+
- addr_type}
- }
- },
- "set": {
- "locPrf": 150,
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "large_community_list": {
+ "id": "rmap_lcomm_" + addr_type
+ }
+ }
+ },
+ "set": {"locPrf": 150,},
}
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
for addr_type in ADDR_TYPES:
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- addr_type: {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_{}".format(addr_type),
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ addr_type: {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_{}".format(
+ addr_type
+ ),
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# sleep(10)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict_2 = {
"r3": {
"prefix_lists": {
- "ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "deny"
- }]
- },
- "ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "deny"
- }]
- }
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "deny"}
+ ]
+ },
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "deny"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 150,
- }
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150,},
+ }
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
}
}
}
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error"
- "Routes are still present: {}".format(
- tc_name, result)
+ "Routes are still present: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
# Remove applied rmap from neighbor
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
"direction": "in",
- "delete": True
- }]
+ "delete": True,
+ }
+ ]
}
}
}
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
"direction": "in",
- "delete": True
- }]
+ "delete": True,
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 150,
- "weight": 100
- }
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "5",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150, "weight": 100},
+ }
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
}
}
}
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# clear bgp, so config changes would be reflected
dut = "r3"
result = clear_bgp_and_verify(tgen, topo, dut)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Flap interface to see if route-map properties are intact
# Shutdown interface
# Verify BGP convergence once interface is up
result = verify_bgp_convergence(tgen, topo)
- assert result is True, (
- "setup_module :Failed \n Error:" " {}".format(result))
+ assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_no_match_set_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "5"
- }],
- "rmap_no_match_set_2_{}".format(addr_type): [{
- "action": "deny",
- "seq_id": "5"
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_no_match_set_1_{}".format(addr_type): [
+ {"action": "permit", "seq_id": "5"}
+ ],
+ "rmap_no_match_set_2_{}".format(addr_type): [
+ {"action": "deny", "seq_id": "5"}
+ ],
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_no_match_set_1_ipv4",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_no_match_set_1_ipv4",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_no_match_set_2_ipv4",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_no_match_set_2_ipv4",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
},
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_no_match_set_1_ipv6",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_no_match_set_1_ipv6",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_no_match_set_2_ipv6",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_no_match_set_2_ipv6",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".format(
- tc_name, result)
+ "routes are not present \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
input_dict_2 = {
"r3": {
"prefix_lists": {
- "ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
- "ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "ipv4": {
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
+ "ipv6": {
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
- input_dict_3_addr_type ={}
+ input_dict_3_addr_type = {}
for addr_type in ADDR_TYPES:
input_dict_3 = {
"r3": {
"route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
"action": "permit",
"match": {
addr_type: {
- "prefix_lists": "pf_list_1_{}".format(
- addr_type)
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
}
},
- "set": {
- "metric": 50
+ "set": {"metric": 50},
+ }
+ ],
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
}
- }],
- "rmap_match_pf_2_{}".format(addr_type): [{
+ },
+ "set": {"locPrf": 150},
+ }
+ ],
+ "rmap_match_pf_3_{}".format(addr_type): [
+ {
"action": "permit",
"match": {
addr_type: {
- "prefix_lists": "pf_list_1_{}".format(
- addr_type)
- }},
- "set": {
- "locPrf": 150
- }
- }],
- "rmap_match_pf_3_{}".format(addr_type): [{
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(
- addr_type)
- }},
- "set": {
- "weight": 1000
- }
- }]
- }
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"weight": 1000},
}
- }
+ ],
+ }
+ }
+ }
input_dict_3_addr_type[addr_type] = input_dict_3
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv4",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv4",
+ "direction": "out",
+ }
+ ]
}
}
},
"r5": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_3_ipv4",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_3_ipv4",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
},
"r1": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
}
}
},
"r4": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
}
}
},
"r5": {
"dest_link": {
"r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_3_ipv6",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_3_ipv6",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
rmap_name = "rmap_match_pf_1"
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[
- addr_type],rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r4"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
rmap_name = "rmap_match_pf_2"
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_2_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3_addr_type[addr_type],
- expected=False)
+ result = verify_bgp_attributes(
+ tgen,
+ addr_type,
+ dut,
+ routes[addr_type],
+ rmap_name,
+ input_dict_3_addr_type[addr_type],
+ expected=False,
+ )
assert result is not True, "Testcase {} : Failed \n"
- "Attributes are not set \n Error: {}".format(
- tc_name, result)
+ "Attributes are not set \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
dut = "r5"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r5"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
rmap_name = "rmap_match_pf_3"
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_3_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3_addr_type[addr_type],
- expected=False)
+ result = verify_bgp_attributes(
+ tgen,
+ addr_type,
+ dut,
+ routes[addr_type],
+ rmap_name,
+ input_dict_3_addr_type[addr_type],
+ expected=False,
+ )
assert result is not True, "Testcase {} : Failed \n"
- "Attributes are not set \n Error: {}".format(
- tc_name, result)
+ "Attributes are not set \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
},
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 150,
- "weight": 100,
- "metric": 50
- }
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150, "weight": 100, "metric": 50},
+ }
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
rmap_name = "rmap_match_pf_1"
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "10",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 150
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150},
+ "continue": "30",
},
- "continue": "30"
- },
- {
- "action": "permit",
- "seq_id": "20",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 200},
},
- "set": {
- "metric": 200
- }
- },
- {
- "action": "permit",
- "seq_id": "30",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
+ {
+ "action": "permit",
+ "seq_id": "30",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 100},
},
- "set": {
- "metric": 100
- }
- }
- ]
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
rmap_name = "rmap_match_pf_1"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
- }
- seq_id = {
- "ipv4": ["10", "30"],
- "ipv6": ["10", "30"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
+ seq_id = {"ipv4": ["10", "30"], "ipv6": ["10", "30"]}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[
- addr_type],rmap_name, input_dict_3, seq_id[addr_type])
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_bgp_attributes(
+ tgen,
+ addr_type,
+ dut,
+ routes[addr_type],
+ rmap_name,
+ input_dict_3,
+ seq_id[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "seq_id": "10",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "goto": "30",
+ },
+ {
+ "action": "permit",
+ "seq_id": "20",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 100},
+ },
+ {
+ "action": "permit",
+ "seq_id": "30",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 200},
+ },
+ ]
+ }
+ }
+ }
+ result = create_route_maps(tgen, input_dict_3)
+ # tgen.mininet_cli()
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
+
+ # Configure neighbor for route map
+ input_dict_4 = {
"r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "seq_id": "10",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
}
- },
- "goto": "30"
},
- {
- "action": "permit",
- "seq_id": "20",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "metric": 100
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
}
},
- {
- "action": "permit",
- "seq_id": "30",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "metric": 200
- }
- }
- ]
+ }
}
}
- }
- result = create_route_maps(tgen, input_dict_3)
- # tgen.mininet_cli()
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
-
- # Configure neighbor for route map
- input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
rmap_name = "rmap_match_pf_1"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
- }
- seq_id = {
- "ipv4": ["10", "30"],
- "ipv6": ["10", "30"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
+ seq_id = {"ipv4": ["10", "30"], "ipv6": ["10", "30"]}
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[
- addr_type],rmap_name, input_dict_3, seq_id[addr_type])
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = verify_bgp_attributes(
+ tgen,
+ addr_type,
+ dut,
+ routes[addr_type],
+ rmap_name,
+ input_dict_3,
+ seq_id[addr_type],
+ )
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 150
- },
- "call": "rmap_match_pf_2_{}".format(addr_type)
- }],
- "rmap_match_pf_2_{}".format(addr_type): [{
- "action": "permit",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "metric": 200
- }
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150},
+ "call": "rmap_match_pf_2_{}".format(addr_type),
+ }
+ ],
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "permit",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 200},
+ }
+ ],
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv6",
- "direction": "in"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv6",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Verifying BGP set attributes
dut = "r3"
routes = {
- "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
- "ipv6": ["1::1/128", "1::2/128"]
+ "ipv4": ["10.0.20.1/32", "10.0.20.2/32"],
+ "ipv6": ["1::1/128", "1::2/128"],
}
rmap_name = "rmap_match_pf_1"
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_1_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
rmap_name = "rmap_match_pf_2"
for addr_type in ADDR_TYPES:
rmap_name = "rmap_match_pf_2_{}".format(addr_type)
- result = verify_bgp_attributes(tgen, addr_type, dut, routes[addr_type],
- rmap_name, input_dict_3)
+ result = verify_bgp_attributes(
+ tgen, addr_type, dut, routes[addr_type], rmap_name, input_dict_3
+ )
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"r3": {
"prefix_lists": {
"ipv4": {
- "pf_list_1_ipv4": [{
- "seqid": 10,
- "network": "any",
- "action": "permit"
- }]
- },
+ "pf_list_1_ipv4": [
+ {"seqid": 10, "network": "any", "action": "permit"}
+ ]
+ },
"ipv6": {
- "pf_list_1_ipv6": [{
- "seqid": 100,
- "network": "any",
- "action": "permit"
- }]
- }
+ "pf_list_1_ipv6": [
+ {"seqid": 100, "network": "any", "action": "permit"}
+ ]
+ },
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
for addr_type in ADDR_TYPES:
input_dict_3 = {
- "r3": {
- "route_maps": {
- "rmap_match_pf_1_{}".format(addr_type): [{
- "action": "deny",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "locPrf": 150,
- }
- }],
- "rmap_match_pf_2_{}".format(addr_type): [{
- "action": "deny",
- "match": {
- addr_type: {
- "prefix_lists": "pf_list_1_{}".format(addr_type)
- }
- },
- "set": {
- "metric": 50
- }
- }]
+ "r3": {
+ "route_maps": {
+ "rmap_match_pf_1_{}".format(addr_type): [
+ {
+ "action": "deny",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"locPrf": 150,},
+ }
+ ],
+ "rmap_match_pf_2_{}".format(addr_type): [
+ {
+ "action": "deny",
+ "match": {
+ addr_type: {
+ "prefix_lists": "pf_list_1_{}".format(addr_type)
+ }
+ },
+ "set": {"metric": 50},
+ }
+ ],
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r3": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r1": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_1_ipv4",
- "direction": "in"
- }]
- }
- }
- },
- "r4": {
- "dest_link": {
- "r3": {
- "route_maps": [{
- "name":
- "rmap_match_pf_2_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r3": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r1": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_1_ipv4",
+ "direction": "in",
+ }
+ ]
+ }
+ }
+ },
+ "r4": {
+ "dest_link": {
+ "r3": {
+ "route_maps": [
+ {
+ "name": "rmap_match_pf_2_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ },
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
protocol = "bgp"
input_dict = topo["routers"]
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".\
- format(tc_name, result)
+ "routes are not present \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
# Verifying RIB routes
dut = "r4"
protocol = "bgp"
for addr_type in ADDR_TYPES:
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are not present \n Error: {}".\
- format(tc_name, result)
+ "routes are not present \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
input_dict = {
"r1": {
"static_routes": [
- {
- "network": NETWORK[addr_type],
- "next_hop": "Null0",
- "tag": 4001
- }
+ {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
- }
+ },
+ },
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
input_dict_3 = {
- "r1": {
- "route_maps": {
- "rmap_match_tag_1_{}".format(addr_type): [{
- "action": "permit",
- "match": {
- addr_type: {
- "tag": "4001"
- }
- }
- }]
+ "r1": {
+ "route_maps": {
+ "rmap_match_tag_1_{}".format(addr_type): [
+ {"action": "permit", "match": {addr_type: {"tag": "4001"}}}
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r1": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r3": {
- "dest_link": {
- "r1": {
- "route_maps": [{
- "name":
- "rmap_match_tag_1_ipv4",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r3": {
- "dest_link": {
- "r1": {
- "route_maps": [{
- "name":
- "rmap_match_tag_1_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "rmap_match_tag_1_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "rmap_match_tag_1_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
+ }
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
input_dict = {
"r1": {
"static_routes": [
- {
- "network": NETWORK[addr_type],
- "next_hop": "Null0",
- "tag": 4001
- }
+ {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001}
]
}
}
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol)
+ result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict = {
"r1": {
"static_routes": [
- {
- "network": NETWORK[addr_type],
- "next_hop": "Null0",
- "tag": 4001
- }
+ {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001}
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Api call to redistribute static routes
input_dict_1 = {
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
},
"unicast": {
"redistribute": [
{"redist_type": "static"},
- {"redist_type": "connected"}
+ {"redist_type": "connected"},
]
}
- }
- }
+ },
+ },
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Create route map
input_dict_3 = {
- "r1": {
- "route_maps": {
- "rmap_match_tag_1_{}".format(addr_type): [{
- "action": "deny",
- "match": {
- addr_type: {
- "tag": "4001"
- }
- }
- }]
+ "r1": {
+ "route_maps": {
+ "rmap_match_tag_1_{}".format(addr_type): [
+ {"action": "deny", "match": {addr_type: {"tag": "4001"}}}
+ ]
+ }
}
}
- }
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
# Configure neighbor for route map
input_dict_4 = {
- "r1": {
- "bgp": {
- "address_family": {
- "ipv4": {
- "unicast": {
- "neighbor": {
- "r3": {
- "dest_link": {
- "r1": {
- "route_maps": [{
- "name":
- "rmap_match_tag_1_ipv4",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- },
- "ipv6": {
- "unicast": {
- "neighbor": {
- "r3": {
- "dest_link": {
- "r1": {
- "route_maps": [{
- "name":
- "rmap_match_tag_1_ipv6",
- "direction": "out"
- }]
- }
- }
- }
- }
- }
- }
- }
- }
- }
+ "r1": {
+ "bgp": {
+ "address_family": {
+ "ipv4": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "rmap_match_tag_1_ipv4",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "ipv6": {
+ "unicast": {
+ "neighbor": {
+ "r3": {
+ "dest_link": {
+ "r1": {
+ "route_maps": [
+ {
+ "name": "rmap_match_tag_1_ipv6",
+ "direction": "out",
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ }
+ }
+ }
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r3"
input_dict = {
"r1": {
"static_routes": [
- {
- "network": NETWORK[addr_type],
- "next_hop": "Null0",
- "tag": 4001
- }
+ {"network": NETWORK[addr_type], "next_hop": "Null0", "tag": 4001}
]
}
}
- result = verify_rib(tgen, addr_type, dut, input_dict,
- protocol=protocol, expected=False)
+ result = verify_rib(
+ tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n"
- "routes are denied \n Error: {}".format(
- tc_name, result)
+ "routes are denied \n Error: {}".format(tc_name, result)
logger.info("Expected behaviour: {}".format(result))
write_test_footer(tc_name)
# Uncomment next line for debugging
# tgen.mininet_cli()
+
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
tgen = get_topogen(self)
for routern in range(1, 2):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
def setup_module(mod):
"Sets up the pytest environment"
for rname, router in tgen.routers().iteritems():
router.run("/bin/bash {}/setup_vrfs".format(CWD))
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# After loading the configurations, this function loads configured daemons.
tgen.start_router()
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
+
def teardown_module(mod):
"Teardown the pytest environment"
# This function tears down the whole topology.
tgen.stop_topology()
+
def test_vrf_route_leak():
logger.info("Ensure that routes are leaked back and forth")
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- r1 = tgen.gears['r1']
+ r1 = tgen.gears["r1"]
donna = r1.vtysh_cmd("show ip route vrf DONNA json", isjson=True)
route0 = donna["10.0.0.0/24"][0]
- assert route0['protocol'] == "connected"
+ assert route0["protocol"] == "connected"
route1 = donna["10.0.1.0/24"][0]
- assert route1['protocol'] == "bgp"
- assert route1['selected'] == True
- nhop = route1['nexthops'][0]
- assert nhop['fib'] == True
+ assert route1["protocol"] == "bgp"
+ assert route1["selected"] == True
+ nhop = route1["nexthops"][0]
+ assert nhop["fib"] == True
route2 = donna["10.0.2.0/24"][0]
- assert route2['protocol'] == "connected"
+ assert route2["protocol"] == "connected"
route3 = donna["10.0.3.0/24"][0]
- assert route3['protocol'] == "bgp"
- assert route3['selected'] == True
- nhop = route3['nexthops'][0]
- assert nhop['fib'] == True
+ assert route3["protocol"] == "bgp"
+ assert route3["selected"] == True
+ nhop = route3["nexthops"][0]
+ assert nhop["fib"] == True
eva = r1.vtysh_cmd("show ip route vrf EVA json", isjson=True)
route0 = eva["10.0.0.0/24"][0]
- assert route0['protocol'] == "bgp"
- assert route0['selected'] == True
- nhop = route0['nexthops'][0]
- assert nhop['fib'] == True
+ assert route0["protocol"] == "bgp"
+ assert route0["selected"] == True
+ nhop = route0["nexthops"][0]
+ assert nhop["fib"] == True
route1 = eva["10.0.1.0/24"][0]
- assert route1['protocol'] == "connected"
+ assert route1["protocol"] == "connected"
route2 = eva["10.0.2.0/24"][0]
- assert route2['protocol'] == "bgp"
- assert route2['selected'] == True
- nhop = route2['nexthops'][0]
- assert nhop['fib'] == True
+ assert route2["protocol"] == "bgp"
+ assert route2["selected"] == True
+ nhop = route2["nexthops"][0]
+ assert nhop["fib"] == True
route3 = eva["10.0.3.0/24"][0]
- assert route3['protocol'] == "connected"
- #tgen.mininet_cli()
+ assert route3["protocol"] == "connected"
+ # tgen.mininet_cli()
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_aggregate_address_origin():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r2']
+ router = tgen.gears["r2"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
expected = {
- '192.168.255.1': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 3
- }
- }
+ "192.168.255.1": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 3}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_aggregate_address_has_metric(router):
output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.0/24 json"))
- expected = {
- 'paths': [
- {
- 'origin': 'IGP'
- }
- ]
- }
+ expected = {"paths": [{"origin": "IGP"}]}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_converge, router)
test_func = functools.partial(_bgp_aggregate_address_has_metric, router)
success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
- assert result is None, 'Failed to see applied ORIGIN (igp) for aggregated prefix in "{}"'.format(router)
+ assert (
+ result is None
+ ), 'Failed to see applied ORIGIN (igp) for aggregated prefix in "{}"'.format(router)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_maximum_prefix_invalid():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r2']
+ router = tgen.gears["r2"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
expected = {
- '192.168.255.1': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 3
- }
- }
+ "192.168.255.1": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 3}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_aggregate_address_has_metric(router):
output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.0/24 json"))
- expected = {
- 'paths': [
- {
- 'metric': 123
- }
- ]
- }
+ expected = {"paths": [{"metric": 123}]}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_converge, router)
test_func = functools.partial(_bgp_aggregate_address_has_metric, router)
success, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5)
- assert result is None, 'Failed to see applied metric for aggregated prefix in "{}"'.format(router)
+ assert (
+ result is None
+ ), 'Failed to see applied metric for aggregated prefix in "{}"'.format(router)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_as_wide_bgp_identifier():
tgen = get_topogen()
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
- expected = {
- '192.168.255.1': {
- 'bgpState': 'Established'
- }
- }
+ expected = {"192.168.255.1": {"bgpState": "Established"}}
return topotest.json_cmp(output, expected)
def _bgp_failed(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
expected = {
- '192.168.255.1': {
- 'lastNotificationReason': 'OPEN Message Error/Bad BGP Identifier'
+ "192.168.255.1": {
+ "lastNotificationReason": "OPEN Message Error/Bad BGP Identifier"
}
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_bgp_converge, tgen.gears['r1'])
+ test_func = functools.partial(_bgp_converge, tgen.gears["r1"])
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
- assert result is None, 'Failed to converge: "{}"'.format(tgen.gears['r1'])
+ assert result is None, 'Failed to converge: "{}"'.format(tgen.gears["r1"])
- test_func = functools.partial(_bgp_failed, tgen.gears['r3'])
+ test_func = functools.partial(_bgp_failed, tgen.gears["r3"])
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
- assert result is None, 'Bad BGP Identifier notification not sent: "{}"'.format(tgen.gears['r3'])
+ assert result is None, 'Bad BGP Identifier notification not sent: "{}"'.format(
+ tgen.gears["r3"]
+ )
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_maximum_prefix_invalid():
tgen = get_topogen()
def _bgp_converge(router):
while True:
- output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
- if output['192.168.255.1']['bgpState'] == 'Established':
- if output['192.168.255.1']['addressFamilyInfo']['ipv4Unicast']['acceptedPrefixCounter'] == 2:
+ output = json.loads(
+ tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")
+ )
+ if output["192.168.255.1"]["bgpState"] == "Established":
+ if (
+ output["192.168.255.1"]["addressFamilyInfo"]["ipv4Unicast"][
+ "acceptedPrefixCounter"
+ ]
+ == 2
+ ):
return True
def _bgp_comm_list_delete(router):
- output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json"))
- if '333:333' in output['paths'][0]['community']['list']:
+ output = json.loads(
+ tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json")
+ )
+ if "333:333" in output["paths"][0]["community"]["list"]:
return False
return True
- if _bgp_converge('r2'):
- assert _bgp_comm_list_delete('r2') == True
+ if _bgp_converge("r2"):
+ assert _bgp_comm_list_delete("r2") == True
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_default_originate_route_map():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r2']
+ router = tgen.gears["r2"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
expected = {
- '192.168.255.1': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 1
- }
- }
+ "192.168.255.1": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 1}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_default_route_has_metric(router):
output = json.loads(router.vtysh_cmd("show ip bgp 0.0.0.0/0 json"))
- expected = {
- 'paths': [
- {
- 'metric': 123
- }
- ]
- }
+ expected = {"paths": [{"metric": 123}]}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_converge, router)
test_func = functools.partial(_bgp_default_route_has_metric, router)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
- assert result is None, 'Failed to see applied metric for default route in "{}"'.format(router)
+ assert (
+ result is None
+ ), 'Failed to see applied metric for default route in "{}"'.format(router)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_maximum_prefix_invalid():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r1']
+ router = tgen.gears["r1"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json"))
expected = {
- '192.168.255.2': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 2
- }
- }
+ "192.168.255.2": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_distance_change(router):
- router.vtysh_cmd("""
+ router.vtysh_cmd(
+ """
configure terminal
router bgp 65000
address-family ipv4 unicast
distance bgp 123 123 123
- """)
+ """
+ )
def _bgp_check_distance_change(router):
output = json.loads(router.vtysh_cmd("show ip route 172.16.255.254/32 json"))
- expected = {
- '172.16.255.254/32': [
- {
- 'protocol': 'bgp',
- 'distance': 123
- }
- ]
- }
+ expected = {"172.16.255.254/32": [{"protocol": "bgp", "distance": 123}]}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_converge, router)
test_func = functools.partial(_bgp_check_distance_change, router)
success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5)
- assert result is None, 'Failed to see applied BGP distance in RIB "{}"'.format(router)
+ assert result is None, 'Failed to see applied BGP distance in RIB "{}"'.format(
+ router
+ )
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 7):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r3'])
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r5"])
+ switch.add_link(tgen.gears["r6"])
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r5'])
- switch.add_link(tgen.gears['r6'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_ebgp_requires_policy():
tgen = get_topogen()
pytest.skip(tgen.errors)
def _bgp_converge(router):
- output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
- expected = {
- '192.168.255.1': {
- 'bgpState': 'Established'
- }
- }
+ output = json.loads(
+ tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")
+ )
+ expected = {"192.168.255.1": {"bgpState": "Established"}}
return topotest.json_cmp(output, expected)
def _bgp_has_routes(router):
- output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 routes json"))
- expected = {
- 'routes': {
- '172.16.255.254/32': [
- {
- 'valid': True
- }
- ]
- }
- }
+ output = json.loads(
+ tgen.gears[router].vtysh_cmd(
+ "show ip bgp neighbor 192.168.255.1 routes json"
+ )
+ )
+ expected = {"routes": {"172.16.255.254/32": [{"valid": True}]}}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_bgp_converge, 'r2')
+ test_func = functools.partial(_bgp_converge, "r2")
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert success is True, 'Failed bgp convergence (r2) in "{}"'.format(router)
- test_func = functools.partial(_bgp_has_routes, 'r2')
+ test_func = functools.partial(_bgp_has_routes, "r2")
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert success is True, 'eBGP policy is not working (r2) in "{}"'.format(router)
- test_func = functools.partial(_bgp_converge, 'r4')
+ test_func = functools.partial(_bgp_converge, "r4")
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert success is True, 'Failed bgp convergence (r4) in "{}"'.format(router)
- test_func = functools.partial(_bgp_has_routes, 'r4')
+ test_func = functools.partial(_bgp_has_routes, "r4")
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert success is False, 'eBGP policy is not working (r4) in "{}"'.format(router)
- test_func = functools.partial(_bgp_converge, 'r6')
+ test_func = functools.partial(_bgp_converge, "r6")
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert success is True, 'Failed bgp convergence (r6) in "{}"'.format(router)
- test_func = functools.partial(_bgp_has_routes, 'r6')
+ test_func = functools.partial(_bgp_has_routes, "r6")
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert success is True, 'eBGP policy is not working (r6) in "{}"'.format(router)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import sys
import pytest
-sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
+sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))
from lib.ltemplate import *
+
def test_check_linux_vrf():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/check_linux_vrf.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/check_linux_vrf.py", False, CliOnFail, CheckFunc)
+
def test_adjacencies():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc)
+
def SKIP_test_add_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc)
+
def test_check_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc)
-#manual data path setup test - remove once have bgp/zebra vrf path working
+
+# manual data path setup test - remove once have bgp/zebra vrf path working
def test_check_linux_mpls():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/check_linux_mpls.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/check_linux_mpls.py", False, CliOnFail, CheckFunc)
+
def test_del_bgp_instances():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/del_bgp_instances.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/del_bgp_instances.py", False, CliOnFail, CheckFunc)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
retval = pytest.main(["-s"])
sys.exit(retval)
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
class BGPIPV6RTADVTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 2 routers.
- tgen.add_router('r1')
- tgen.add_router('r2')
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
"Sets up the pytest environment"
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
+
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
# Check IPv4 routing tables.
logger.info("Checking IPv4 routes for convergence")
for router in tgen.routers().values():
- json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name)
+ json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip route json'.format(router.name), expected)
- _, result = topotest.run_and_expect(test_func, None, count=160,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip route json".format(router.name),
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
# Check IPv6 routing tables.
logger.info("Checking IPv6 routes for convergence")
for router in tgen.routers().values():
- json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name)
+ json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ipv6 route json'.format(router.name), expected)
- _, result = topotest.run_and_expect(test_func, None, count=160,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ipv6 route json".format(router.name),
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
from mininet.topo import Topo
import shutil
+
CWD = os.path.dirname(os.path.realpath(__file__))
# test name based on directory
TEST = os.path.basename(CWD)
+
class ThisTestTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# between routers, switches and hosts.
#
# Create P/PE routers
- tgen.add_router('r1')
- #check for mpls
+ tgen.add_router("r1")
+ # check for mpls
if tgen.hasmpls != True:
- logger.info('MPLS not available, tests will be skipped')
+ logger.info("MPLS not available, tests will be skipped")
return
for routern in range(2, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# Create CE routers
for routern in range(1, 4):
- tgen.add_router('ce{}'.format(routern))
+ tgen.add_router("ce{}".format(routern))
- #CE/PE links
- tgen.add_link(tgen.gears['ce1'], tgen.gears['r1'], 'ce1-eth0', 'r1-eth4')
- tgen.add_link(tgen.gears['ce2'], tgen.gears['r3'], 'ce2-eth0', 'r3-eth4')
- tgen.add_link(tgen.gears['ce3'], tgen.gears['r4'], 'ce3-eth0', 'r4-eth4')
+ # CE/PE links
+ tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4")
+ tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4")
+ tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4")
# Create a switch with just one router connected to it to simulate a
# empty network.
switch = {}
- switch[0] = tgen.add_switch('sw0')
- switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0')
- switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0')
+ switch[0] = tgen.add_switch("sw0")
+ switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
+ switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
- switch[1] = tgen.add_switch('sw1')
- switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1')
- switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0')
- switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0')
+ switch[1] = tgen.add_switch("sw1")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
+ switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
+
+ switch[1] = tgen.add_switch("sw2")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1")
- switch[1] = tgen.add_switch('sw2')
- switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth2')
- switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth1')
def ltemplatePreRouterStartHook():
cc = ltemplateRtrCmd()
tgen = get_topogen()
- logger.info('pre router-start hook')
- #check for mpls
+ logger.info("pre router-start hook")
+ # check for mpls
if tgen.hasmpls != True:
- logger.info('MPLS not available, skipping setup')
+ logger.info("MPLS not available, skipping setup")
return False
- #check for normal init
+ # check for normal init
if len(tgen.net) == 1:
- logger.info('Topology not configured, skipping setup')
+ logger.info("Topology not configured, skipping setup")
return False
- #configure r2 mpls interfaces
- intfs = ['lo', 'r2-eth0', 'r2-eth1', 'r2-eth2']
+ # configure r2 mpls interfaces
+ intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"]
for intf in intfs:
- cc.doCmd(tgen, 'r2', 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf))
- #configure MPLS
- rtrs = ['r1', 'r3', 'r4']
- cmds = ['echo 1 > /proc/sys/net/mpls/conf/lo/input']
+ cc.doCmd(tgen, "r2", "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf))
+ # configure MPLS
+ rtrs = ["r1", "r3", "r4"]
+ cmds = ["echo 1 > /proc/sys/net/mpls/conf/lo/input"]
for rtr in rtrs:
router = tgen.gears[rtr]
for cmd in cmds:
cc.doCmd(tgen, rtr, cmd)
- intfs = ['lo', rtr+'-eth0', rtr+'-eth4']
+ intfs = ["lo", rtr + "-eth0", rtr + "-eth4"]
for intf in intfs:
- cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf))
- logger.info('setup mpls input')
+ cc.doCmd(
+ tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)
+ )
+ logger.info("setup mpls input")
return True
+
def ltemplatePostRouterStartHook():
- logger.info('post router-start hook')
+ logger.info("post router-start hook")
return True
-
from lutil import luCommand
-luCommand('r1','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH')
-luCommand('r3','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH')
-luCommand('r4','vtysh -c "show bgp next"','99.0.0.. valid', 'wait', 'See CE static NH')
-luCommand('r1','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes')
-luCommand('r3','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes')
-luCommand('r4','vtysh -c "show bgp ipv4 uni"','i5.*i5','wait','See CE routes')
-luCommand('ce1','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes')
-luCommand('r1','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes')
-luCommand('ce2','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes')
-luCommand('r3','vtysh -c "show bgp ipv4 uni 5.1.0.0/24"','','none','See CE routes')
-luCommand('ce3','vtysh -c "show bgp ipv4 uni 5.1.2.0/24"','','none','See CE routes')
-luCommand('r4','vtysh -c "show bgp ipv4 uni 5.1.2.0/24"','','none','See CE routes')
-luCommand('r1','vtysh -c "add vrf cust1 prefix 99.0.0.1/32"','.','none','IP Address')
-luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','wait','Local Registration')
-luCommand('r1','vtysh -c "show vnc registrations imported"','2 out of 2 imported','wait','Imported Registrations')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn"','i99.0.0.1/32','wait','See R1s static address')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn"','i99.0.0.1/32','wait','See R1s static address')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn rd 10:1"','i5.*i5','wait','See R1s imports')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn rd 10:1"','i5.*i5','wait','See R1s imports')
+luCommand(
+ "r1", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH"
+)
+luCommand(
+ "r3", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH"
+)
+luCommand(
+ "r4", 'vtysh -c "show bgp next"', "99.0.0.. valid", "wait", "See CE static NH"
+)
+luCommand("r1", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes")
+luCommand("r3", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes")
+luCommand("r4", 'vtysh -c "show bgp ipv4 uni"', "i5.*i5", "wait", "See CE routes")
+luCommand("ce1", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes")
+luCommand("r1", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes")
+luCommand("ce2", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes")
+luCommand("r3", 'vtysh -c "show bgp ipv4 uni 5.1.0.0/24"', "", "none", "See CE routes")
+luCommand("ce3", 'vtysh -c "show bgp ipv4 uni 5.1.2.0/24"', "", "none", "See CE routes")
+luCommand("r4", 'vtysh -c "show bgp ipv4 uni 5.1.2.0/24"', "", "none", "See CE routes")
-luCommand('r3','vtysh -c "add vrf cust1 prefix 99.0.0.2/32"','.','none','IP Address')
-luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','wait','Local Registration')
-have2ndImports = luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','none','Imported Registrations',2)
+luCommand(
+ "r1", 'vtysh -c "add vrf cust1 prefix 99.0.0.1/32"', ".", "none", "IP Address"
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.1",
+ "wait",
+ "Local Registration",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations imported"',
+ "2 out of 2 imported",
+ "wait",
+ "Imported Registrations",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "i99.0.0.1/32",
+ "wait",
+ "See R1s static address",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "i99.0.0.1/32",
+ "wait",
+ "See R1s static address",
+)
+luCommand(
+ "r3", 'vtysh -c "show bgp ipv4 vpn rd 10:1"', "i5.*i5", "wait", "See R1s imports"
+)
+luCommand(
+ "r4", 'vtysh -c "show bgp ipv4 vpn rd 10:1"', "i5.*i5", "wait", "See R1s imports"
+)
+
+luCommand(
+ "r3", 'vtysh -c "add vrf cust1 prefix 99.0.0.2/32"', ".", "none", "IP Address"
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.2",
+ "wait",
+ "Local Registration",
+)
+have2ndImports = luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations imported"',
+ "2 out of 2 imported",
+ "none",
+ "Imported Registrations",
+ 2,
+)
if have2ndImports:
- luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','pass','Imported Registrations')
-luCommand('r1','vtysh -c "show bgp ipv4 vpn"','i99.0.0.2/32','wait','See R3s static address')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn"','i99.0.0.2/32','wait','See R3s static address')
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations imported"',
+ "2 out of 2 imported",
+ "pass",
+ "Imported Registrations",
+ )
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "i99.0.0.2/32",
+ "wait",
+ "See R3s static address",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "i99.0.0.2/32",
+ "wait",
+ "See R3s static address",
+)
if have2ndImports:
- luCommand('r1','vtysh -c "show bgp ipv4 vpn rd 10:3"','i5.*i5','none','See R3s imports')
- luCommand('r4','vtysh -c "show bgp ipv4 vpn rd 10:3"','i5.*i5','none','See R3s imports')
+ luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 vpn rd 10:3"',
+ "i5.*i5",
+ "none",
+ "See R3s imports",
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 vpn rd 10:3"',
+ "i5.*i5",
+ "none",
+ "See R3s imports",
+ )
-luCommand('r4','vtysh -c "add vrf cust1 prefix 99.0.0.3/32"','.','none','IP Address')
-luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','wait','Local Registration')
-luCommand('r4','vtysh -c "show vnc registrations imported"','2 out of 2 imported','wait','Imported Registrations')
-luCommand('r1','vtysh -c "show bgp ipv4 vpn"','i99.0.0.3/32','wait','See R4s static address')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn"','i99.0.0.3/32','wait','See R4s static address')
-luCommand('r1','vtysh -c "show bgp ipv4 vpn rd 10:4"','i5.*i5','wait','See R4s imports')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn rd 10:4"','i5.*i5','wait','See R4s imports')
+luCommand(
+ "r4", 'vtysh -c "add vrf cust1 prefix 99.0.0.3/32"', ".", "none", "IP Address"
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.3",
+ "wait",
+ "Local Registration",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations imported"',
+ "2 out of 2 imported",
+ "wait",
+ "Imported Registrations",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "i99.0.0.3/32",
+ "wait",
+ "See R4s static address",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "i99.0.0.3/32",
+ "wait",
+ "See R4s static address",
+)
+luCommand(
+ "r1", 'vtysh -c "show bgp ipv4 vpn rd 10:4"', "i5.*i5", "wait", "See R4s imports"
+)
+luCommand(
+ "r3", 'vtysh -c "show bgp ipv4 vpn rd 10:4"', "i5.*i5", "wait", "See R4s imports"
+)
-luCommand('r1','vtysh -c "show vnc registrations remote"','5.1.2.0/24 .*5.1.3.0/24','wait','R4s registrations')
-luCommand('r3','vtysh -c "show vnc registrations remote"','5.1.2.0/24 .*5.1.3.0/24','wait','R4s registrations')
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations remote"',
+ "5.1.2.0/24 .*5.1.3.0/24",
+ "wait",
+ "R4s registrations",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations remote"',
+ "5.1.2.0/24 .*5.1.3.0/24",
+ "wait",
+ "R4s registrations",
+)
if have2ndImports:
- luCommand('r1','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations')
- luCommand('r3','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations')
-luCommand('r4','vtysh -c "show vnc registrations remote"','5.1.0.0/24 .*5.1.1.0/24','wait','Remote registrations')
-luCommand('r1','vtysh -c "show vnc registrations"','.','none')
-luCommand('r3','vtysh -c "show vnc registrations"','.','none')
-luCommand('r4','vtysh -c "show vnc registrations"','.','none')
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations remote"',
+ "5.1.0.0/24 .*5.1.1.0/24",
+ "wait",
+ "Remote registrations",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations remote"',
+ "5.1.0.0/24 .*5.1.1.0/24",
+ "wait",
+ "Remote registrations",
+ )
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations remote"',
+ "5.1.0.0/24 .*5.1.1.0/24",
+ "wait",
+ "Remote registrations",
+)
+luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none")
from lutil import luCommand
-luCommand('ce1','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
-luCommand('ce2','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
-luCommand('ce3','ping 192.168.1.1 -c 1',' 0. packet loss','pass','CE->PE ping')
-luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up',180)
-luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
-luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
-luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
-luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
-luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping')
-luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
+
+luCommand("ce1", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping")
+luCommand("ce2", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping")
+luCommand("ce3", "ping 192.168.1.1 -c 1", " 0. packet loss", "pass", "CE->PE ping")
+luCommand("ce1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180)
+luCommand("ce2", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180)
+luCommand("ce3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180)
+luCommand(
+ "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r2",
+ 'vtysh -c "show bgp summary"',
+ " 00:0.* 00:0.* 00:0",
+ "wait",
+ "Core adjacencies up",
+ 180,
+)
+luCommand(
+ "r1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180
+)
+luCommand(
+ "r3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180
+)
+luCommand(
+ "r4", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0.* 00:0",
+ "pass",
+ "All adjacencies up",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0.* 00:0",
+ "pass",
+ "All adjacencies up",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0.* 00:0",
+ "pass",
+ "All adjacencies up",
+)
+luCommand(
+ "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping"
+)
+luCommand(
+ "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping"
+)
+luCommand(
+ "r4", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping"
+)
from lutil import luCommand
-luCommand('ce1','vtysh -c "show bgp ipv4 uni"','7 routes and 7','wait','Local and remote routes')
-luCommand('ce2','vtysh -c "show bgp ipv4 uni"','7 routes and 9','wait','Local and remote routes')
-luCommand('ce3','vtysh -c "show bgp ipv4 uni"','7 routes and 7','wait','Local and remote routes')
-luCommand('r1','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI')
-luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI')
-luCommand('r3','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI')
-luCommand('r4','vtysh -c "show bgp ipv4 uni"','7 routes and 9','pass','Unicast SAFI')
-have2ndImports = luCommand('r3','vtysh -c "show vnc registrations imported"','2 out of 2 imported','none','Imported Registrations',2)
+
+luCommand(
+ "ce1",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "7 routes and 7",
+ "wait",
+ "Local and remote routes",
+)
+luCommand(
+ "ce2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "7 routes and 9",
+ "wait",
+ "Local and remote routes",
+)
+luCommand(
+ "ce3",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "7 routes and 7",
+ "wait",
+ "Local and remote routes",
+)
+luCommand(
+ "r1", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI"
+)
+luCommand(
+ "r2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "No BGP prefixes displayed",
+ "pass",
+ "Unicast SAFI",
+)
+luCommand(
+ "r3", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI"
+)
+luCommand(
+ "r4", 'vtysh -c "show bgp ipv4 uni"', "7 routes and 9", "pass", "Unicast SAFI"
+)
+have2ndImports = luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations imported"',
+ "2 out of 2 imported",
+ "none",
+ "Imported Registrations",
+ 2,
+)
if have2ndImports:
- num = '9 routes and 9'
+ num = "9 routes and 9"
else:
- num = '7 routes and 7'
-luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI')
-luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI')
+ num = "7 routes and 7"
+luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI")
+luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI")
+luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI")
+luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI")
from lutil import luCommand
-luCommand('r1','vtysh -c "clear vrf cust1 prefix 99.0.0.1/32"','.','none','Cleared VRF route')
-luCommand('r3','vtysh -c "clear vrf cust1 prefix 99.0.0.2/32"','.','none','Cleared VRF route')
-luCommand('r4','vtysh -c "clear vrf cust1 prefix 99.0.0.3/32"','.','none','Cleared VRF route')
-luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','fail','Local Registration cleared')
-luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','fail','Local Registration cleared')
-luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','fail','Local Registration cleared')
-luCommand('r1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated')
-luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI')
-luCommand('r3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated')
-luCommand('r4','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated')
-luCommand('ce1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes')
-luCommand('ce2','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes')
-luCommand('ce3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes')
-luCommand('r1','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared')
-luCommand('r3','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared')
-luCommand('r4','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared')
+
+luCommand(
+ "r1",
+ 'vtysh -c "clear vrf cust1 prefix 99.0.0.1/32"',
+ ".",
+ "none",
+ "Cleared VRF route",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "clear vrf cust1 prefix 99.0.0.2/32"',
+ ".",
+ "none",
+ "Cleared VRF route",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "clear vrf cust1 prefix 99.0.0.3/32"',
+ ".",
+ "none",
+ "Cleared VRF route",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.1",
+ "fail",
+ "Local Registration cleared",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.2",
+ "fail",
+ "Local Registration cleared",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.3",
+ "fail",
+ "Local Registration cleared",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Unicast SAFI updated",
+)
+luCommand(
+ "r2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "No BGP prefixes displayed",
+ "pass",
+ "Unicast SAFI",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Unicast SAFI updated",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Unicast SAFI updated",
+)
+luCommand(
+ "ce1",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Local and remote routes",
+)
+luCommand(
+ "ce2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Local and remote routes",
+)
+luCommand(
+ "ce3",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Local and remote routes",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations remote"',
+ "Prefix ",
+ "fail",
+ "Remote Registration cleared",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations remote"',
+ "Prefix ",
+ "fail",
+ "Remote Registration cleared",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations remote"',
+ "Prefix ",
+ "fail",
+ "Remote Registration cleared",
+)
import sys
import pytest
-sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
+sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from lib.ltemplate import *
+
def test_adjacencies():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc)
+
def test_add_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc)
+
def test_check_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc)
+
def test_cleanup_all():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
retval = pytest.main(["-s"])
sys.exit(retval)
from mininet.topo import Topo
import shutil
+
CWD = os.path.dirname(os.path.realpath(__file__))
# test name based on directory
TEST = os.path.basename(CWD)
+
class ThisTestTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# between routers, switches and hosts.
#
# Create P/PE routers
- #check for mpls
- tgen.add_router('r1')
+ # check for mpls
+ tgen.add_router("r1")
if tgen.hasmpls != True:
- logger.info('MPLS not available, tests will be skipped')
+ logger.info("MPLS not available, tests will be skipped")
return
mach = platform.machine()
krel = platform.release()
- if mach[:1] == 'a' and topotest.version_cmp(krel, '4.11') < 0:
- logger.info('Need Kernel version 4.11 to run on arm processor')
+ if mach[:1] == "a" and topotest.version_cmp(krel, "4.11") < 0:
+ logger.info("Need Kernel version 4.11 to run on arm processor")
return
for routern in range(2, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# Create CE routers
for routern in range(1, 5):
- tgen.add_router('ce{}'.format(routern))
+ tgen.add_router("ce{}".format(routern))
- #CE/PE links
- tgen.add_link(tgen.gears['ce1'], tgen.gears['r1'], 'ce1-eth0', 'r1-eth4')
- tgen.add_link(tgen.gears['ce2'], tgen.gears['r3'], 'ce2-eth0', 'r3-eth4')
- tgen.add_link(tgen.gears['ce3'], tgen.gears['r4'], 'ce3-eth0', 'r4-eth4')
- tgen.add_link(tgen.gears['ce4'], tgen.gears['r4'], 'ce4-eth0', 'r4-eth5')
+ # CE/PE links
+ tgen.add_link(tgen.gears["ce1"], tgen.gears["r1"], "ce1-eth0", "r1-eth4")
+ tgen.add_link(tgen.gears["ce2"], tgen.gears["r3"], "ce2-eth0", "r3-eth4")
+ tgen.add_link(tgen.gears["ce3"], tgen.gears["r4"], "ce3-eth0", "r4-eth4")
+ tgen.add_link(tgen.gears["ce4"], tgen.gears["r4"], "ce4-eth0", "r4-eth5")
# Create a switch with just one router connected to it to simulate a
# empty network.
switch = {}
- switch[0] = tgen.add_switch('sw0')
- switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0')
- switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0')
+ switch[0] = tgen.add_switch("sw0")
+ switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
+ switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
- switch[1] = tgen.add_switch('sw1')
- switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1')
- switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0')
- switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0')
+ switch[1] = tgen.add_switch("sw1")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
+ switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
+
+ switch[1] = tgen.add_switch("sw2")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth2")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth1")
- switch[1] = tgen.add_switch('sw2')
- switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth2')
- switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth1')
l3mdev_accept = 0
+
def ltemplatePreRouterStartHook():
global l3mdev_accept
cc = ltemplateRtrCmd()
krel = platform.release()
tgen = get_topogen()
- logger.info('pre router-start hook, kernel=' + krel)
+ logger.info("pre router-start hook, kernel=" + krel)
- if topotest.version_cmp(krel, '4.15') >= 0 and \
- topotest.version_cmp(krel, '4.18') <= 0:
+ if (
+ topotest.version_cmp(krel, "4.15") >= 0
+ and topotest.version_cmp(krel, "4.18") <= 0
+ ):
l3mdev_accept = 1
- if topotest.version_cmp(krel, '5.0') >= 0:
+ if topotest.version_cmp(krel, "5.0") >= 0:
l3mdev_accept = 1
- logger.info('setting net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept))
- #check for mpls
+ logger.info("setting net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept))
+ # check for mpls
if tgen.hasmpls != True:
- logger.info('MPLS not available, skipping setup')
+ logger.info("MPLS not available, skipping setup")
return False
- #check for normal init
+ # check for normal init
if len(tgen.net) == 1:
- logger.info('Topology not configured, skipping setup')
+ logger.info("Topology not configured, skipping setup")
return False
- #trace errors/unexpected output
+ # trace errors/unexpected output
cc.resetCounts()
- #configure r2 mpls interfaces
- intfs = ['lo', 'r2-eth0', 'r2-eth1', 'r2-eth2']
+ # configure r2 mpls interfaces
+ intfs = ["lo", "r2-eth0", "r2-eth1", "r2-eth2"]
for intf in intfs:
- cc.doCmd(tgen, 'r2', 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf))
-
- #configure cust1 VRFs & MPLS
- rtrs = ['r1', 'r3', 'r4']
- cmds = ['ip link add {0}-cust1 type vrf table 10',
- 'ip ru add oif {0}-cust1 table 10',
- 'ip ru add iif {0}-cust1 table 10',
- 'ip link set dev {0}-cust1 up',
- 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)]
+ cc.doCmd(tgen, "r2", "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf))
+
+ # configure cust1 VRFs & MPLS
+ rtrs = ["r1", "r3", "r4"]
+ cmds = [
+ "ip link add {0}-cust1 type vrf table 10",
+ "ip ru add oif {0}-cust1 table 10",
+ "ip ru add iif {0}-cust1 table 10",
+ "ip link set dev {0}-cust1 up",
+ "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept),
+ ]
for rtr in rtrs:
router = tgen.gears[rtr]
for cmd in cmds:
cc.doCmd(tgen, rtr, cmd.format(rtr))
- cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth4 master {0}-cust1'.format(rtr))
- intfs = [rtr+'-cust1', 'lo', rtr+'-eth0', rtr+'-eth4']
+ cc.doCmd(tgen, rtr, "ip link set dev {0}-eth4 master {0}-cust1".format(rtr))
+ intfs = [rtr + "-cust1", "lo", rtr + "-eth0", rtr + "-eth4"]
for intf in intfs:
- cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf))
- logger.info('setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.'.format(rtr))
- #configure cust2 VRFs & MPLS
- rtrs = ['r4']
- cmds = ['ip link add {0}-cust2 type vrf table 20',
- 'ip ru add oif {0}-cust2 table 20',
- 'ip ru add iif {0}-cust2 table 20',
- 'ip link set dev {0}-cust2 up']
+ cc.doCmd(
+ tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)
+ )
+ logger.info(
+ "setup {0} vrf {0}-cust1, {0}-eth4. enabled mpls input.".format(rtr)
+ )
+ # configure cust2 VRFs & MPLS
+ rtrs = ["r4"]
+ cmds = [
+ "ip link add {0}-cust2 type vrf table 20",
+ "ip ru add oif {0}-cust2 table 20",
+ "ip ru add iif {0}-cust2 table 20",
+ "ip link set dev {0}-cust2 up",
+ ]
for rtr in rtrs:
for cmd in cmds:
cc.doCmd(tgen, rtr, cmd.format(rtr))
- cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth5 master {0}-cust2'.format(rtr))
- intfs = [rtr+'-cust2', rtr+'-eth5']
+ cc.doCmd(tgen, rtr, "ip link set dev {0}-eth5 master {0}-cust2".format(rtr))
+ intfs = [rtr + "-cust2", rtr + "-eth5"]
for intf in intfs:
- cc.doCmd(tgen, rtr, 'echo 1 > /proc/sys/net/mpls/conf/{}/input'.format(intf))
- logger.info('setup {0} vrf {0}-cust2, {0}-eth5. enabled mpls input.'.format(rtr))
- #put ce4-eth0 into a VRF (no default instance!)
- rtrs = ['ce4']
- cmds = ['ip link add {0}-cust2 type vrf table 20',
- 'ip ru add oif {0}-cust2 table 20',
- 'ip ru add iif {0}-cust2 table 20',
- 'ip link set dev {0}-cust2 up',
- 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept)]
+ cc.doCmd(
+ tgen, rtr, "echo 1 > /proc/sys/net/mpls/conf/{}/input".format(intf)
+ )
+ logger.info(
+ "setup {0} vrf {0}-cust2, {0}-eth5. enabled mpls input.".format(rtr)
+ )
+ # put ce4-eth0 into a VRF (no default instance!)
+ rtrs = ["ce4"]
+ cmds = [
+ "ip link add {0}-cust2 type vrf table 20",
+ "ip ru add oif {0}-cust2 table 20",
+ "ip ru add iif {0}-cust2 table 20",
+ "ip link set dev {0}-cust2 up",
+ "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept),
+ ]
for rtr in rtrs:
for cmd in cmds:
cc.doCmd(tgen, rtr, cmd.format(rtr))
- cc.doCmd(tgen, rtr, 'ip link set dev {0}-eth0 master {0}-cust2'.format(rtr))
+ cc.doCmd(tgen, rtr, "ip link set dev {0}-eth0 master {0}-cust2".format(rtr))
if cc.getOutput() != 4:
InitSuccess = False
- logger.info('Unexpected output seen ({} times, tests will be skipped'.format(cc.getOutput()))
+ logger.info(
+ "Unexpected output seen ({} times, tests will be skipped".format(
+ cc.getOutput()
+ )
+ )
else:
InitSuccess = True
- logger.info('VRF config successful!')
+ logger.info("VRF config successful!")
return InitSuccess
+
def ltemplatePostRouterStartHook():
- logger.info('post router-start hook')
+ logger.info("post router-start hook")
return True
from lutil import luCommand
-luCommand('r1','vtysh -c "add vrf r1-cust1 prefix 99.0.0.1/32"','.','none','IP Address')
-luCommand('r3','vtysh -c "add vrf r3-cust1 prefix 99.0.0.2/32"','.','none','IP Address')
-luCommand('r4','vtysh -c "add vrf r4-cust1 prefix 99.0.0.3/32"','.','none','IP Address')
-luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','pass','Local Registration')
-luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','pass','Local Registration')
-luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','pass','Local Registration')
-luCommand('r1','vtysh -c "show vnc registrations remote"','4 out of 4','wait','Remote Registration', 10)
-luCommand('r3','vtysh -c "show vnc registrations remote"','6 out of 6','wait','Remote Registration', 10)
-luCommand('r4','vtysh -c "show vnc registrations remote"','4 out of 4','wait','Remote Registration', 10)
-luCommand('r1','vtysh -c "show vnc registrations"','.','none')
-luCommand('r3','vtysh -c "show vnc registrations"','.','none')
-luCommand('r4','vtysh -c "show vnc registrations"','.','none')
+
+luCommand(
+ "r1", 'vtysh -c "add vrf r1-cust1 prefix 99.0.0.1/32"', ".", "none", "IP Address"
+)
+luCommand(
+ "r3", 'vtysh -c "add vrf r3-cust1 prefix 99.0.0.2/32"', ".", "none", "IP Address"
+)
+luCommand(
+ "r4", 'vtysh -c "add vrf r4-cust1 prefix 99.0.0.3/32"', ".", "none", "IP Address"
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.1",
+ "pass",
+ "Local Registration",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.2",
+ "pass",
+ "Local Registration",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.3",
+ "pass",
+ "Local Registration",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations remote"',
+ "4 out of 4",
+ "wait",
+ "Remote Registration",
+ 10,
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations remote"',
+ "6 out of 6",
+ "wait",
+ "Remote Registration",
+ 10,
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations remote"',
+ "4 out of 4",
+ "wait",
+ "Remote Registration",
+ 10,
+)
+luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none")
from lutil import luCommand
-luCommand('ce1','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('ce2','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('ce3','vtysh -c "show bgp summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('ce4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','Adjacencies up',180)
-luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
-luCommand('r1','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
-luCommand('r3','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
-luCommand('r4','vtysh -c "show bgp summary"',' 00:0','wait','Core adjacencies up', 180)
-luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
-luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0','pass','All adjacencies up')
-luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0.* 00:0.* 00:0','pass','All adjacencies up')
-luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
-luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping')
-luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
+
+luCommand("ce1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180)
+luCommand("ce2", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180)
+luCommand("ce3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Adjacencies up", 180)
+luCommand(
+ "ce4", 'vtysh -c "show bgp vrf all summary"', " 00:0", "wait", "Adjacencies up", 180
+)
+luCommand(
+ "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r2",
+ 'vtysh -c "show bgp summary"',
+ " 00:0.* 00:0.* 00:0",
+ "wait",
+ "Core adjacencies up",
+ 180,
+)
+luCommand(
+ "r1", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180
+)
+luCommand(
+ "r3", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180
+)
+luCommand(
+ "r4", 'vtysh -c "show bgp summary"', " 00:0", "wait", "Core adjacencies up", 180
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0.* 00:0",
+ "pass",
+ "All adjacencies up",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0.* 00:0",
+ "pass",
+ "All adjacencies up",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0.* 00:0.* 00:0",
+ "pass",
+ "All adjacencies up",
+)
+luCommand(
+ "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping"
+)
+luCommand(
+ "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping"
+)
+luCommand(
+ "r4", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping"
+)
from lutil import luCommand, luLast
from lib import topotest
-ret = luCommand('r2', 'ip -M route show',
- '\d*(?= via inet 10.0.2.4 dev r2-eth1)','wait','See mpls route to r4')
+ret = luCommand(
+ "r2",
+ "ip -M route show",
+ "\d*(?= via inet 10.0.2.4 dev r2-eth1)",
+ "wait",
+ "See mpls route to r4",
+)
found = luLast()
if ret != False and found != None:
label4r4 = found.group(0)
- luCommand('r2', 'ip -M route show',
- '.', 'pass',
- 'See %s as label to r4' % label4r4)
- ret = luCommand('r2', 'ip -M route show',
- '\d*(?= via inet 10.0.1.1 dev r2-eth0)', 'wait',
- 'See mpls route to r1')
+ luCommand("r2", "ip -M route show", ".", "pass", "See %s as label to r4" % label4r4)
+ ret = luCommand(
+ "r2",
+ "ip -M route show",
+ "\d*(?= via inet 10.0.1.1 dev r2-eth0)",
+ "wait",
+ "See mpls route to r1",
+ )
found = luLast()
if ret != False and found != None:
label4r1 = found.group(0)
- luCommand('r2', 'ip -M route show',
- '.', 'pass', 'See %s as label to r1' % label4r1)
-
- luCommand('r1', 'ip route show vrf r1-cust1',
- '99.0.0.4', 'pass', 'VRF->MPLS PHP route installed')
- luCommand('r4', 'ip route show vrf r4-cust2',
- '99.0.0.1','pass', 'VRF->MPLS PHP route installed')
-
- luCommand('r1', 'ip -M route show', '101', 'pass', 'MPLS->VRF route installed')
- luCommand('r4', 'ip -M route show', '1041', 'pass', 'MPLS->VRF1 route installed')
- luCommand('r4', 'ip -M route show', '1042', 'pass', 'MPLS->VRF2 route installed')
-
- luCommand('ce1', 'ping 99.0.0.4 -I 99.0.0.1 -c 1',
- ' 0. packet loss','wait','CE->CE (loopback) ping - l3vpn+zebra case')
- #skip due to VRF weirdness
- #luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1',
+ luCommand("r2", "ip -M route show", ".", "pass", "See %s as label to r1" % label4r1)
+
+ luCommand(
+ "r1",
+ "ip route show vrf r1-cust1",
+ "99.0.0.4",
+ "pass",
+ "VRF->MPLS PHP route installed",
+ )
+ luCommand(
+ "r4",
+ "ip route show vrf r4-cust2",
+ "99.0.0.1",
+ "pass",
+ "VRF->MPLS PHP route installed",
+ )
+
+ luCommand("r1", "ip -M route show", "101", "pass", "MPLS->VRF route installed")
+ luCommand("r4", "ip -M route show", "1041", "pass", "MPLS->VRF1 route installed")
+ luCommand("r4", "ip -M route show", "1042", "pass", "MPLS->VRF2 route installed")
+
+ luCommand(
+ "ce1",
+ "ping 99.0.0.4 -I 99.0.0.1 -c 1",
+ " 0. packet loss",
+ "wait",
+ "CE->CE (loopback) ping - l3vpn+zebra case",
+ )
+ # skip due to VRF weirdness
+ # luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1',
# ' 0. packet loss','wait','CE->CE (loopback) ping - l3vpn+zebra case')
- luCommand('ce1', 'ping 99.0.0.4 -I 99.0.0.1 -c 1',
- ' 0. packet loss','wait','CE->CE (loopback) ping')
- #luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1',
+ luCommand(
+ "ce1",
+ "ping 99.0.0.4 -I 99.0.0.1 -c 1",
+ " 0. packet loss",
+ "wait",
+ "CE->CE (loopback) ping",
+ )
+ # luCommand('ce4', 'ping 99.0.0.1 -I 99.0.0.4 -c 1',
# ' 0. packet loss','wait','CE->CE (loopback) ping')
- luCommand('r3', 'ip -M route show', '103', 'pass', 'MPLS->VRF route installed')
- luCommand('ce2', 'ping 99.0.0.3 -I 99.0.0.2 -c 1',
- ' 0. packet loss','wait','CE2->CE3 (loopback) ping')
- luCommand('ce3', 'ping 99.0.0.4 -I 99.0.0.3 -c 1',
- ' 0. packet loss','wait','CE3->CE4 (loopback) ping')
+ luCommand("r3", "ip -M route show", "103", "pass", "MPLS->VRF route installed")
+ luCommand(
+ "ce2",
+ "ping 99.0.0.3 -I 99.0.0.2 -c 1",
+ " 0. packet loss",
+ "wait",
+ "CE2->CE3 (loopback) ping",
+ )
+ luCommand(
+ "ce3",
+ "ping 99.0.0.4 -I 99.0.0.3 -c 1",
+ " 0. packet loss",
+ "wait",
+ "CE3->CE4 (loopback) ping",
+ )
from lutil import luCommand
from customize import l3mdev_accept
-l3mdev_rtrs = ['r1', 'r3', 'r4', 'ce4']
+l3mdev_rtrs = ["r1", "r3", "r4", "ce4"]
for rtr in l3mdev_rtrs:
- luCommand(rtr,'sysctl net.ipv4.tcp_l3mdev_accept',' = \d*','none','')
+ luCommand(rtr, "sysctl net.ipv4.tcp_l3mdev_accept", " = \d*", "none", "")
found = luLast()
- luCommand(rtr,'ss -naep',':179','pass','IPv4:bgp, l3mdev{}'.format(found.group(0)))
- luCommand(rtr,'ss -naep',':.*:179','pass','IPv6:bgp')
- luCommand(rtr,'sysctl net.ipv4.tcp_l3mdev_accept',' = {}'.format(l3mdev_accept),'pass','l3mdev matches expected (real/expected{}/{})'.format(found.group(0),l3mdev_accept))
+ luCommand(
+ rtr, "ss -naep", ":179", "pass", "IPv4:bgp, l3mdev{}".format(found.group(0))
+ )
+ luCommand(rtr, "ss -naep", ":.*:179", "pass", "IPv6:bgp")
+ luCommand(
+ rtr,
+ "sysctl net.ipv4.tcp_l3mdev_accept",
+ " = {}".format(l3mdev_accept),
+ "pass",
+ "l3mdev matches expected (real/expected{}/{})".format(
+ found.group(0), l3mdev_accept
+ ),
+ )
-rtrs = ['r1', 'r3', 'r4']
+rtrs = ["r1", "r3", "r4"]
for rtr in rtrs:
- luCommand(rtr, 'ip link show type vrf {}-cust1'.format(rtr),'cust1: .*UP','pass','VRF cust1 intf up')
- luCommand(rtr, 'ip add show vrf {}-cust1'.format(rtr),'r..eth4.*UP','pass','VRF cust1 IP intf up')
- luCommand(rtr, 'ip add show vrf {}-cust1'.format(rtr),'192.168','pass','VRF cust1 IP config')
- luCommand(rtr, 'ip route show vrf {}-cust1'.format(rtr),'192.168...0/24 dev r.-eth','pass','VRF cust1 interface route')
-luCommand('r4', 'ip link show type vrf r4-cust2','cust2: .*UP','pass','VRF cust2 up')
-luCommand('r4', 'ip add show vrf r4-cust2','r..eth5.*UP.* 192.168','pass','VRF cust1 IP config')
-luCommand(rtr, 'ip route show vrf r4-cust2'.format(rtr),'192.168...0/24 dev r.-eth','pass','VRF cust2 interface route')
-rtrs = ['ce1', 'ce2', 'ce3']
+ luCommand(
+ rtr,
+ "ip link show type vrf {}-cust1".format(rtr),
+ "cust1: .*UP",
+ "pass",
+ "VRF cust1 intf up",
+ )
+ luCommand(
+ rtr,
+ "ip add show vrf {}-cust1".format(rtr),
+ "r..eth4.*UP",
+ "pass",
+ "VRF cust1 IP intf up",
+ )
+ luCommand(
+ rtr,
+ "ip add show vrf {}-cust1".format(rtr),
+ "192.168",
+ "pass",
+ "VRF cust1 IP config",
+ )
+ luCommand(
+ rtr,
+ "ip route show vrf {}-cust1".format(rtr),
+ "192.168...0/24 dev r.-eth",
+ "pass",
+ "VRF cust1 interface route",
+ )
+luCommand("r4", "ip link show type vrf r4-cust2", "cust2: .*UP", "pass", "VRF cust2 up")
+luCommand(
+ "r4",
+ "ip add show vrf r4-cust2",
+ "r..eth5.*UP.* 192.168",
+ "pass",
+ "VRF cust1 IP config",
+)
+luCommand(
+ rtr,
+ "ip route show vrf r4-cust2".format(rtr),
+ "192.168...0/24 dev r.-eth",
+ "pass",
+ "VRF cust2 interface route",
+)
+rtrs = ["ce1", "ce2", "ce3"]
for rtr in rtrs:
- luCommand(rtr, 'ip route show','192.168...0/24 dev ce.-eth0','pass','CE interface route')
- luCommand(rtr,'ping 192.168.1.1 -c 1',' 0. packet loss','wait','CE->PE ping')
-luCommand('ce4', 'ip link show type vrf ce4-cust2','cust2: .*UP','pass','VRF cust2 up')
-luCommand('ce4', 'ip route show vrf ce4-cust2','192.168...0/24 dev ce.-eth0','pass','CE interface route')
-luCommand('ce4','ping 192.168.2.1 -c 1 -I ce4-cust2',' 0. packet loss','wait','CE4->PE4 ping')
+ luCommand(
+ rtr,
+ "ip route show",
+ "192.168...0/24 dev ce.-eth0",
+ "pass",
+ "CE interface route",
+ )
+ luCommand(rtr, "ping 192.168.1.1 -c 1", " 0. packet loss", "wait", "CE->PE ping")
+luCommand(
+ "ce4", "ip link show type vrf ce4-cust2", "cust2: .*UP", "pass", "VRF cust2 up"
+)
+luCommand(
+ "ce4",
+ "ip route show vrf ce4-cust2",
+ "192.168...0/24 dev ce.-eth0",
+ "pass",
+ "CE interface route",
+)
+luCommand(
+ "ce4",
+ "ping 192.168.2.1 -c 1 -I ce4-cust2",
+ " 0. packet loss",
+ "wait",
+ "CE4->PE4 ping",
+)
from lutil import luCommand
-from bgprib import bgpribRequireVpnRoutes,bgpribRequireUnicastRoutes
+from bgprib import bgpribRequireVpnRoutes, bgpribRequireUnicastRoutes
########################################################################
# CE routers: contain routes they originate
# ce4 vtysh -c "show bgp ipv4 uni"
want = [
- {'p':'5.1.0.0/24', 'n':'99.0.0.1'},
- {'p':'5.1.1.0/24', 'n':'99.0.0.1'},
- {'p':'99.0.0.1/32', 'n':'0.0.0.0'},
+ {"p": "5.1.0.0/24", "n": "99.0.0.1"},
+ {"p": "5.1.1.0/24", "n": "99.0.0.1"},
+ {"p": "99.0.0.1/32", "n": "0.0.0.0"},
]
-bgpribRequireUnicastRoutes('ce1','ipv4','','Cust 1 routes in ce1',want)
+bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes in ce1", want)
want = [
- {'p':'5.1.0.0/24', 'n':'99.0.0.2'},
- {'p':'5.1.1.0/24', 'n':'99.0.0.2'},
- {'p':'99.0.0.2/32', 'n':'0.0.0.0'},
+ {"p": "5.1.0.0/24", "n": "99.0.0.2"},
+ {"p": "5.1.1.0/24", "n": "99.0.0.2"},
+ {"p": "99.0.0.2/32", "n": "0.0.0.0"},
]
-bgpribRequireUnicastRoutes('ce2','ipv4','','Cust 2 routes in ce1',want)
+bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 2 routes in ce1", want)
want = [
- {'p':'5.1.2.0/24', 'n':'99.0.0.3'},
- {'p':'5.1.3.0/24', 'n':'99.0.0.3'},
- {'p':'99.0.0.3/32', 'n':'0.0.0.0'},
+ {"p": "5.1.2.0/24", "n": "99.0.0.3"},
+ {"p": "5.1.3.0/24", "n": "99.0.0.3"},
+ {"p": "99.0.0.3/32", "n": "0.0.0.0"},
]
-bgpribRequireUnicastRoutes('ce3','ipv4','','Cust 3 routes in ce1',want)
+bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 3 routes in ce1", want)
want = [
- {'p':'5.4.2.0/24', 'n':'99.0.0.4'},
- {'p':'5.4.3.0/24', 'n':'99.0.0.4'},
- {'p':'99.0.0.4/32', 'n':'0.0.0.0'},
+ {"p": "5.4.2.0/24", "n": "99.0.0.4"},
+ {"p": "5.4.3.0/24", "n": "99.0.0.4"},
+ {"p": "99.0.0.4/32", "n": "0.0.0.0"},
]
-bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 4 routes in ce1',want)
+bgpribRequireUnicastRoutes("ce4", "ipv4", "ce4-cust2", "Cust 4 routes in ce1", want)
########################################################################
# r1 vtysh -c "show bgp vrf r1-cust1 ipv4"
#
want_r1_cust1_routes = [
- {'p':'5.1.0.0/24', 'n':'99.0.0.1'},
- {'p':'5.1.1.0/24', 'n':'99.0.0.1'},
- {'p':'99.0.0.1/32', 'n':'192.168.1.2'},
+ {"p": "5.1.0.0/24", "n": "99.0.0.1"},
+ {"p": "5.1.1.0/24", "n": "99.0.0.1"},
+ {"p": "99.0.0.1/32", "n": "192.168.1.2"},
]
-bgpribRequireUnicastRoutes('r1','ipv4','r1-cust1','Customer 1 routes in r1 vrf',want_r1_cust1_routes)
+bgpribRequireUnicastRoutes(
+ "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_cust1_routes
+)
want_r3_cust1_routes = [
- {'p':'5.1.0.0/24', 'n':'99.0.0.2'},
- {'p':'5.1.1.0/24', 'n':'99.0.0.2'},
- {'p':'99.0.0.2/32', 'n':'192.168.1.2'},
+ {"p": "5.1.0.0/24", "n": "99.0.0.2"},
+ {"p": "5.1.1.0/24", "n": "99.0.0.2"},
+ {"p": "99.0.0.2/32", "n": "192.168.1.2"},
]
-bgpribRequireUnicastRoutes('r3','ipv4','r3-cust1','Customer 1 routes in r3 vrf',want_r3_cust1_routes)
+bgpribRequireUnicastRoutes(
+ "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_cust1_routes
+)
want_r4_cust1_routes = [
- {'p':'5.1.2.0/24', 'n':'99.0.0.3'},
- {'p':'5.1.3.0/24', 'n':'99.0.0.3'},
- {'p':'99.0.0.3/32', 'n':'192.168.1.2'},
+ {"p": "5.1.2.0/24", "n": "99.0.0.3"},
+ {"p": "5.1.3.0/24", "n": "99.0.0.3"},
+ {"p": "99.0.0.3/32", "n": "192.168.1.2"},
]
-bgpribRequireUnicastRoutes('r4','ipv4','r4-cust1','Customer 1 routes in r4 vrf',want_r4_cust1_routes)
+bgpribRequireUnicastRoutes(
+ "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_cust1_routes
+)
want_r4_cust2_routes = [
- {'p':'5.4.2.0/24', 'n':'99.0.0.4'},
- {'p':'5.4.3.0/24', 'n':'99.0.0.4'},
- {'p':'99.0.0.4/32', 'n':'192.168.2.2'},
+ {"p": "5.4.2.0/24", "n": "99.0.0.4"},
+ {"p": "5.4.3.0/24", "n": "99.0.0.4"},
+ {"p": "99.0.0.4/32", "n": "192.168.2.2"},
]
-bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf',want_r4_cust2_routes)
+bgpribRequireUnicastRoutes(
+ "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_cust2_routes
+)
########################################################################
# PE routers: core unicast routes are empty
########################################################################
-luCommand('r1','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean')
-luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean')
-luCommand('r3','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean')
-luCommand('r4','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Core Unicast SAFI clean')
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "No BGP prefixes displayed",
+ "pass",
+ "Core Unicast SAFI clean",
+)
+luCommand(
+ "r2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "No BGP prefixes displayed",
+ "pass",
+ "Core Unicast SAFI clean",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "No BGP prefixes displayed",
+ "pass",
+ "Core Unicast SAFI clean",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "No BGP prefixes displayed",
+ "pass",
+ "Core Unicast SAFI clean",
+)
########################################################################
# PE routers: local ce-originated routes are leaked to vpn
########################################################################
# nhzero is for the new code that sets nh of locally-leaked routes to 0
-#nhzero = 1
+# nhzero = 1
nhzero = 0
if nhzero:
- luCommand('r1','vtysh -c "show bgp ipv4 vpn"',
- 'Distinguisher: *10:1.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.1/32 *0.0.0.0 ',
- 'pass','vrf->vpn routes')
- luCommand('r3','vtysh -c "show bgp ipv4 vpn"',
- 'Distinguisher: *10:3.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.2/32 *0.0.0.0 ',
- 'pass','vrf->vpn routes')
+ luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "Distinguisher: *10:1.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.1/32 *0.0.0.0 ",
+ "pass",
+ "vrf->vpn routes",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ "Distinguisher: *10:3.*5.1.0.0/24 *0.0.0.0 .*5.1.1.0/24 *0.0.0.0 .*99.0.0.2/32 *0.0.0.0 ",
+ "pass",
+ "vrf->vpn routes",
+ )
want = [
- {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'0.0.0.0'},
- {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'0.0.0.0'},
- {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'0.0.0.0'},
-
- {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'0.0.0.0'},
- {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'0.0.0.0'},
- {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'0.0.0.0'},
+ {"rd": "10:41", "p": "5.1.2.0/24", "n": "0.0.0.0"},
+ {"rd": "10:41", "p": "5.1.3.0/24", "n": "0.0.0.0"},
+ {"rd": "10:41", "p": "99.0.0.3/32", "n": "0.0.0.0"},
+ {"rd": "10:42", "p": "5.4.2.0/24", "n": "0.0.0.0"},
+ {"rd": "10:42", "p": "5.4.3.0/24", "n": "0.0.0.0"},
+ {"rd": "10:42", "p": "99.0.0.4/32", "n": "0.0.0.0"},
]
- bgpribRequireVpnRoutes('r4','vrf->vpn routes',want)
+ bgpribRequireVpnRoutes("r4", "vrf->vpn routes", want)
else:
- luCommand('r1','vtysh -c "show bgp ipv4 vpn"',
- r'Distinguisher: *10:1.*5.1.0.0/24 *99.0.0.1\b.*5.1.1.0/24 *99.0.0.1\b.*99.0.0.1/32 *192.168.1.2\b',
- 'pass','vrf->vpn routes')
- luCommand('r3','vtysh -c "show bgp ipv4 vpn"',
- r'Distinguisher: *10:3.*5.1.0.0/24 *99.0.0.2\b.*5.1.1.0/24 *99.0.0.2\b.*99.0.0.2/32 *192.168.1.2\b',
- 'pass','vrf->vpn routes')
+ luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ r"Distinguisher: *10:1.*5.1.0.0/24 *99.0.0.1\b.*5.1.1.0/24 *99.0.0.1\b.*99.0.0.1/32 *192.168.1.2\b",
+ "pass",
+ "vrf->vpn routes",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 vpn"',
+ r"Distinguisher: *10:3.*5.1.0.0/24 *99.0.0.2\b.*5.1.1.0/24 *99.0.0.2\b.*99.0.0.2/32 *192.168.1.2\b",
+ "pass",
+ "vrf->vpn routes",
+ )
want = [
- {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'99.0.0.3'},
- {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'99.0.0.3'},
- {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'192.168.1.2'},
-
- {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'99.0.0.4'},
- {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'99.0.0.4'},
- {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'192.168.2.2'},
+ {"rd": "10:41", "p": "5.1.2.0/24", "n": "99.0.0.3"},
+ {"rd": "10:41", "p": "5.1.3.0/24", "n": "99.0.0.3"},
+ {"rd": "10:41", "p": "99.0.0.3/32", "n": "192.168.1.2"},
+ {"rd": "10:42", "p": "5.4.2.0/24", "n": "99.0.0.4"},
+ {"rd": "10:42", "p": "5.4.3.0/24", "n": "99.0.0.4"},
+ {"rd": "10:42", "p": "99.0.0.4/32", "n": "192.168.2.2"},
]
- bgpribRequireVpnRoutes('r4','vrf->vpn routes',want)
+ bgpribRequireVpnRoutes("r4", "vrf->vpn routes", want)
########################################################################
# PE routers: exporting vrfs set MPLS vrf labels in kernel
########################################################################
-luCommand('r1','vtysh -c "show mpls table"',' 101 *BGP *r1-cust1','pass','vrf labels')
-luCommand('r3','vtysh -c "show mpls table"',' 103 *BGP *r3-cust1','pass','vrf labels')
-luCommand('r4','vtysh -c "show mpls table"',' 1041 *BGP *r4-cust1 .*1042 *BGP *r4-cust2','pass','vrf labels')
+luCommand(
+ "r1", 'vtysh -c "show mpls table"', " 101 *BGP *r1-cust1", "pass", "vrf labels"
+)
+luCommand(
+ "r3", 'vtysh -c "show mpls table"', " 103 *BGP *r3-cust1", "pass", "vrf labels"
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show mpls table"',
+ " 1041 *BGP *r4-cust1 .*1042 *BGP *r4-cust2",
+ "pass",
+ "vrf labels",
+)
########################################################################
# Core VPN router: all customer routes
########################################################################
want_rd_routes = [
- {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'},
- {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'},
- {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'},
-
- {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'},
- {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'},
- {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'},
-
- {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'},
- {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'},
- {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'},
-
- {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'},
- {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'},
- {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'},
+ {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"},
+ {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"},
+ {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"},
+ {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"},
+ {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"},
+ {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"},
+ {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"},
+ {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"},
+ {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"},
]
-bgpribRequireVpnRoutes('r2','Customer routes in provider vpn core',want_rd_routes)
+bgpribRequireVpnRoutes("r2", "Customer routes in provider vpn core", want_rd_routes)
########################################################################
# PE routers: VPN routes from remote customers
# r1 vtysh -c "show bgp ipv4 vpn"
#
want_r1_remote_vpn_routes = [
- {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'},
- {'rd':'10:3', 'p':'5.1.1.0/24', 'n':'3.3.3.3'},
- {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'},
-
- {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'},
- {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'},
- {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'},
-
- {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'},
- {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'},
- {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'},
+ {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"},
+ {"rd": "10:3", "p": "5.1.1.0/24", "n": "3.3.3.3"},
+ {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"},
+ {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"},
+ {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"},
+ {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"},
]
-bgpribRequireVpnRoutes('r1','Remote Customer routes in R1 vpn',want_r1_remote_vpn_routes)
+bgpribRequireVpnRoutes(
+ "r1", "Remote Customer routes in R1 vpn", want_r1_remote_vpn_routes
+)
want_r3_remote_vpn_routes = [
- {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'},
- {'rd':'10:1', 'p':'5.1.1.0/24', 'n':'1.1.1.1'},
- {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'},
-
- {'rd':'10:41', 'p':'5.1.2.0/24', 'n':'4.4.4.4'},
- {'rd':'10:41', 'p':'5.1.3.0/24', 'n':'4.4.4.4'},
- {'rd':'10:41', 'p':'99.0.0.3/32', 'n':'4.4.4.4'},
-
- {'rd':'10:42', 'p':'5.4.2.0/24', 'n':'4.4.4.4'},
- {'rd':'10:42', 'p':'5.4.3.0/24', 'n':'4.4.4.4'},
- {'rd':'10:42', 'p':'99.0.0.4/32', 'n':'4.4.4.4'},
+ {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"},
+ {"rd": "10:1", "p": "5.1.1.0/24", "n": "1.1.1.1"},
+ {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"},
+ {"rd": "10:41", "p": "5.1.2.0/24", "n": "4.4.4.4"},
+ {"rd": "10:41", "p": "5.1.3.0/24", "n": "4.4.4.4"},
+ {"rd": "10:41", "p": "99.0.0.3/32", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "5.4.2.0/24", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "5.4.3.0/24", "n": "4.4.4.4"},
+ {"rd": "10:42", "p": "99.0.0.4/32", "n": "4.4.4.4"},
]
-bgpribRequireVpnRoutes('r3','Remote Customer routes in R3 vpn',want_r3_remote_vpn_routes)
+bgpribRequireVpnRoutes(
+ "r3", "Remote Customer routes in R3 vpn", want_r3_remote_vpn_routes
+)
want_r4_remote_vpn_routes = [
- {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'},
- {'rd':'10:1', 'p':'5.1.1.0/24', 'n':'1.1.1.1'},
- {'rd':'10:1', 'p':'99.0.0.1/32', 'n':'1.1.1.1'},
-
- {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'},
- {'rd':'10:3', 'p':'5.1.1.0/24', 'n':'3.3.3.3'},
- {'rd':'10:3', 'p':'99.0.0.2/32', 'n':'3.3.3.3'},
+ {"rd": "10:1", "p": "5.1.0.0/24", "n": "1.1.1.1"},
+ {"rd": "10:1", "p": "5.1.1.0/24", "n": "1.1.1.1"},
+ {"rd": "10:1", "p": "99.0.0.1/32", "n": "1.1.1.1"},
+ {"rd": "10:3", "p": "5.1.0.0/24", "n": "3.3.3.3"},
+ {"rd": "10:3", "p": "5.1.1.0/24", "n": "3.3.3.3"},
+ {"rd": "10:3", "p": "99.0.0.2/32", "n": "3.3.3.3"},
]
-bgpribRequireVpnRoutes('r4','Remote Customer routes in R4 vpn',want_r4_remote_vpn_routes)
-
+bgpribRequireVpnRoutes(
+ "r4", "Remote Customer routes in R4 vpn", want_r4_remote_vpn_routes
+)
# r1 vtysh -c "show bgp vrf r1-cust1 ipv4"
# PE routers: VRFs contain routes from remote customer nets
########################################################################
want_r1_remote_cust1_routes = [
- {'p':'5.1.0.0/24', 'n':'3.3.3.3'},
- {'p':'5.1.1.0/24', 'n':'3.3.3.3'},
- {'p':'99.0.0.2/32', 'n':'3.3.3.3'},
-
- {'p':'5.1.2.0/24', 'n':'4.4.4.4'},
- {'p':'5.1.3.0/24', 'n':'4.4.4.4'},
- {'p':'99.0.0.3/32', 'n':'4.4.4.4'},
-
- {'p':'5.4.2.0/24', 'n':'4.4.4.4'},
- {'p':'5.4.3.0/24', 'n':'4.4.4.4'},
- {'p':'99.0.0.3/32', 'n':'4.4.4.4'},
+ {"p": "5.1.0.0/24", "n": "3.3.3.3"},
+ {"p": "5.1.1.0/24", "n": "3.3.3.3"},
+ {"p": "99.0.0.2/32", "n": "3.3.3.3"},
+ {"p": "5.1.2.0/24", "n": "4.4.4.4"},
+ {"p": "5.1.3.0/24", "n": "4.4.4.4"},
+ {"p": "99.0.0.3/32", "n": "4.4.4.4"},
+ {"p": "5.4.2.0/24", "n": "4.4.4.4"},
+ {"p": "5.4.3.0/24", "n": "4.4.4.4"},
+ {"p": "99.0.0.3/32", "n": "4.4.4.4"},
]
-bgpribRequireUnicastRoutes('r1','ipv4','r1-cust1','Customer 1 routes in r1 vrf',want_r1_remote_cust1_routes)
+bgpribRequireUnicastRoutes(
+ "r1", "ipv4", "r1-cust1", "Customer 1 routes in r1 vrf", want_r1_remote_cust1_routes
+)
want_r3_remote_cust1_routes = [
- {'p':'5.1.0.0/24', 'n':'1.1.1.1'},
- {'p':'5.1.1.0/24', 'n':'1.1.1.1'},
- {'p':'99.0.0.1/32', 'n':'1.1.1.1'},
-
- {'p':'5.1.2.0/24', 'n':'4.4.4.4'},
- {'p':'5.1.3.0/24', 'n':'4.4.4.4'},
- {'p':'99.0.0.3/32', 'n':'4.4.4.4'},
-
- {'p':'5.4.2.0/24', 'n':'4.4.4.4'},
- {'p':'5.4.3.0/24', 'n':'4.4.4.4'},
- {'p':'99.0.0.3/32', 'n':'4.4.4.4'},
+ {"p": "5.1.0.0/24", "n": "1.1.1.1"},
+ {"p": "5.1.1.0/24", "n": "1.1.1.1"},
+ {"p": "99.0.0.1/32", "n": "1.1.1.1"},
+ {"p": "5.1.2.0/24", "n": "4.4.4.4"},
+ {"p": "5.1.3.0/24", "n": "4.4.4.4"},
+ {"p": "99.0.0.3/32", "n": "4.4.4.4"},
+ {"p": "5.4.2.0/24", "n": "4.4.4.4"},
+ {"p": "5.4.3.0/24", "n": "4.4.4.4"},
+ {"p": "99.0.0.3/32", "n": "4.4.4.4"},
]
-bgpribRequireUnicastRoutes('r3','ipv4','r3-cust1','Customer 1 routes in r3 vrf',want_r3_remote_cust1_routes)
+bgpribRequireUnicastRoutes(
+ "r3", "ipv4", "r3-cust1", "Customer 1 routes in r3 vrf", want_r3_remote_cust1_routes
+)
want_r4_remote_cust1_routes = [
- {'p':'5.1.0.0/24', 'n':'1.1.1.1'},
- {'p':'5.1.1.0/24', 'n':'1.1.1.1'},
- {'p':'5.1.0.0/24', 'n':'3.3.3.3'},
- {'p':'5.1.1.0/24', 'n':'3.3.3.3'},
- {'p':'99.0.0.1/32', 'n':'1.1.1.1'},
- {'p':'99.0.0.2/32', 'n':'3.3.3.3'},
+ {"p": "5.1.0.0/24", "n": "1.1.1.1"},
+ {"p": "5.1.1.0/24", "n": "1.1.1.1"},
+ {"p": "5.1.0.0/24", "n": "3.3.3.3"},
+ {"p": "5.1.1.0/24", "n": "3.3.3.3"},
+ {"p": "99.0.0.1/32", "n": "1.1.1.1"},
+ {"p": "99.0.0.2/32", "n": "3.3.3.3"},
]
-bgpribRequireUnicastRoutes('r4','ipv4','r4-cust1','Customer 1 routes in r4 vrf',want_r4_remote_cust1_routes)
+bgpribRequireUnicastRoutes(
+ "r4", "ipv4", "r4-cust1", "Customer 1 routes in r4 vrf", want_r4_remote_cust1_routes
+)
want_r4_remote_cust2_routes = [
- {'p':'5.1.0.0/24', 'n':'1.1.1.1'},
- {'p':'5.1.1.0/24', 'n':'1.1.1.1'},
- {'p':'5.1.0.0/24', 'n':'3.3.3.3'},
- {'p':'5.1.1.0/24', 'n':'3.3.3.3'},
- {'p':'99.0.0.1/32', 'n':'1.1.1.1'},
- {'p':'99.0.0.2/32', 'n':'3.3.3.3'},
+ {"p": "5.1.0.0/24", "n": "1.1.1.1"},
+ {"p": "5.1.1.0/24", "n": "1.1.1.1"},
+ {"p": "5.1.0.0/24", "n": "3.3.3.3"},
+ {"p": "5.1.1.0/24", "n": "3.3.3.3"},
+ {"p": "99.0.0.1/32", "n": "1.1.1.1"},
+ {"p": "99.0.0.2/32", "n": "3.3.3.3"},
]
-bgpribRequireUnicastRoutes('r4','ipv4','r4-cust2','Customer 2 routes in r4 vrf',want_r4_remote_cust2_routes)
+bgpribRequireUnicastRoutes(
+ "r4", "ipv4", "r4-cust2", "Customer 2 routes in r4 vrf", want_r4_remote_cust2_routes
+)
#########################################################################
# r1 vtysh -c "show bgp vrf r1-cust1 ipv4"
# r1 vtysh -c "show bgp vrf r1-cust1 ipv4 5.1.2.0/24"
-luCommand('ce1','vtysh -c "show bgp ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10)
+luCommand(
+ "ce1",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "10 routes and 10",
+ "wait",
+ "Local and remote routes",
+ 10,
+)
want = [
- {'p':'5.1.2.0/24', 'n':'192.168.1.1'},
- {'p':'5.1.3.0/24', 'n':'192.168.1.1'},
- {'p':'5.4.2.0/24', 'n':'192.168.1.1'},
- {'p':'5.4.3.0/24', 'n':'192.168.1.1'},
+ {"p": "5.1.2.0/24", "n": "192.168.1.1"},
+ {"p": "5.1.3.0/24", "n": "192.168.1.1"},
+ {"p": "5.4.2.0/24", "n": "192.168.1.1"},
+ {"p": "5.4.3.0/24", "n": "192.168.1.1"},
]
-bgpribRequireUnicastRoutes('ce1','ipv4','','Cust 1 routes from remote',want)
-
-luCommand('ce2','vtysh -c "show bgp ipv4 uni"','10 routes and 12','wait','Local and remote routes', 10)
+bgpribRequireUnicastRoutes("ce1", "ipv4", "", "Cust 1 routes from remote", want)
+
+luCommand(
+ "ce2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "10 routes and 12",
+ "wait",
+ "Local and remote routes",
+ 10,
+)
want = [
- {'p':'5.1.0.0/24', 'n':'192.168.1.1'},
- {'p':'5.1.1.0/24', 'n':'192.168.1.1'},
- {'p':'5.1.2.0/24', 'n':'192.168.1.1'},
- {'p':'5.1.3.0/24', 'n':'192.168.1.1'},
- {'p':'5.4.2.0/24', 'n':'192.168.1.1'},
- {'p':'5.4.3.0/24', 'n':'192.168.1.1'},
+ {"p": "5.1.0.0/24", "n": "192.168.1.1"},
+ {"p": "5.1.1.0/24", "n": "192.168.1.1"},
+ {"p": "5.1.2.0/24", "n": "192.168.1.1"},
+ {"p": "5.1.3.0/24", "n": "192.168.1.1"},
+ {"p": "5.4.2.0/24", "n": "192.168.1.1"},
+ {"p": "5.4.3.0/24", "n": "192.168.1.1"},
]
-bgpribRequireUnicastRoutes('ce2','ipv4','','Cust 1 routes from remote',want)
+bgpribRequireUnicastRoutes("ce2", "ipv4", "", "Cust 1 routes from remote", want)
# human readable output for debugging
-luCommand('r4','vtysh -c "show bgp vrf r4-cust1 ipv4 uni"')
-luCommand('r4','vtysh -c "show bgp vrf r4-cust2 ipv4 uni"')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn"')
-luCommand('r4','vtysh -c "show ip route vrf r4-cust1"')
-luCommand('r4','vtysh -c "show ip route vrf r4-cust2"')
-
-luCommand('ce3','vtysh -c "show bgp ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10)
+luCommand("r4", 'vtysh -c "show bgp vrf r4-cust1 ipv4 uni"')
+luCommand("r4", 'vtysh -c "show bgp vrf r4-cust2 ipv4 uni"')
+luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"')
+luCommand("r4", 'vtysh -c "show ip route vrf r4-cust1"')
+luCommand("r4", 'vtysh -c "show ip route vrf r4-cust2"')
+
+luCommand(
+ "ce3",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "10 routes and 10",
+ "wait",
+ "Local and remote routes",
+ 10,
+)
# Requires bvl-bug-degenerate-no-label fix (FRR PR #2053)
want = [
- {'p':'5.1.0.0/24', 'n':'192.168.1.1'},
- {'p':'5.1.1.0/24', 'n':'192.168.1.1'},
- {'p':'5.4.2.0/24', 'n':'192.168.1.1'},
- {'p':'5.4.3.0/24', 'n':'192.168.1.1'},
+ {"p": "5.1.0.0/24", "n": "192.168.1.1"},
+ {"p": "5.1.1.0/24", "n": "192.168.1.1"},
+ {"p": "5.4.2.0/24", "n": "192.168.1.1"},
+ {"p": "5.4.3.0/24", "n": "192.168.1.1"},
]
-bgpribRequireUnicastRoutes('ce3','ipv4','','Cust 1 routes from remote',want)
-
-luCommand('ce4','vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"','10 routes and 10','wait','Local and remote routes', 10)
+bgpribRequireUnicastRoutes("ce3", "ipv4", "", "Cust 1 routes from remote", want)
+
+luCommand(
+ "ce4",
+ 'vtysh -c "show bgp vrf ce4-cust2 ipv4 uni"',
+ "10 routes and 10",
+ "wait",
+ "Local and remote routes",
+ 10,
+)
want = [
- {'p':'5.1.0.0/24', 'n':'192.168.2.1'},
- {'p':'5.1.1.0/24', 'n':'192.168.2.1'},
- {'p':'5.1.2.0/24', 'n':'192.168.2.1'},
- {'p':'5.1.3.0/24', 'n':'192.168.2.1'},
+ {"p": "5.1.0.0/24", "n": "192.168.2.1"},
+ {"p": "5.1.1.0/24", "n": "192.168.2.1"},
+ {"p": "5.1.2.0/24", "n": "192.168.2.1"},
+ {"p": "5.1.3.0/24", "n": "192.168.2.1"},
]
-bgpribRequireUnicastRoutes('ce4','ipv4','ce4-cust2','Cust 2 routes from remote',want)
-
+bgpribRequireUnicastRoutes(
+ "ce4", "ipv4", "ce4-cust2", "Cust 2 routes from remote", want
+)
from lutil import luCommand
-luCommand('r1','vtysh -c "clear vrf r1-cust1 prefix 99.0.0.1/32"','.','none','Cleared VRF route')
-luCommand('r3','vtysh -c "clear vrf r3-cust1 prefix 99.0.0.2/32"','.','none','Cleared VRF route')
-luCommand('r4','vtysh -c "clear vrf r3-cust1 prefix 99.0.0.3/32"','.','none','Cleared VRF route')
-luCommand('r1','vtysh -c "show vnc registrations local"','99.0.0.1','fail','Local Registration cleared')
-luCommand('r3','vtysh -c "show vnc registrations local"','99.0.0.2','fail','Local Registration cleared')
-luCommand('r4','vtysh -c "show vnc registrations local"','99.0.0.3','fail','Local Registration cleared')
-luCommand('r1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10)
-luCommand('r2','vtysh -c "show bgp ipv4 uni"','No BGP prefixes displayed','pass','Unicast SAFI')
-luCommand('r3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10)
-luCommand('r4','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Unicast SAFI updated', 10)
-luCommand('ce1','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10)
-luCommand('ce2','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10)
-luCommand('ce3','vtysh -c "show bgp ipv4 uni"','2 routes and 2','wait','Local and remote routes', 10)
-luCommand('r1','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared')
-luCommand('r3','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared')
-luCommand('r4','vtysh -c "show vnc registrations remote"','Prefix ','fail','Remote Registration cleared')
+
+luCommand(
+ "r1",
+ 'vtysh -c "clear vrf r1-cust1 prefix 99.0.0.1/32"',
+ ".",
+ "none",
+ "Cleared VRF route",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "clear vrf r3-cust1 prefix 99.0.0.2/32"',
+ ".",
+ "none",
+ "Cleared VRF route",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "clear vrf r3-cust1 prefix 99.0.0.3/32"',
+ ".",
+ "none",
+ "Cleared VRF route",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.1",
+ "fail",
+ "Local Registration cleared",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.2",
+ "fail",
+ "Local Registration cleared",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations local"',
+ "99.0.0.3",
+ "fail",
+ "Local Registration cleared",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Unicast SAFI updated",
+ 10,
+)
+luCommand(
+ "r2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "No BGP prefixes displayed",
+ "pass",
+ "Unicast SAFI",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Unicast SAFI updated",
+ 10,
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Unicast SAFI updated",
+ 10,
+)
+luCommand(
+ "ce1",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Local and remote routes",
+ 10,
+)
+luCommand(
+ "ce2",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Local and remote routes",
+ 10,
+)
+luCommand(
+ "ce3",
+ 'vtysh -c "show bgp ipv4 uni"',
+ "2 routes and 2",
+ "wait",
+ "Local and remote routes",
+ 10,
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations remote"',
+ "Prefix ",
+ "fail",
+ "Remote Registration cleared",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations remote"',
+ "Prefix ",
+ "fail",
+ "Remote Registration cleared",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations remote"',
+ "Prefix ",
+ "fail",
+ "Remote Registration cleared",
+)
from lutil import luCommand
-luCommand('r1','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r1-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances')
-luCommand('r2','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5226"','.','none','Cleared bgp instances')
-luCommand('r3','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r3-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances')
-luCommand('r4','/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5228 vrf r4-cust2" -c "no router bgp 5227 vrf r4-cust1" -c "no router bgp 5226"','.','none','Cleared bgp instances')
-
+luCommand(
+ "r1",
+ '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r1-cust1" -c "no router bgp 5226"',
+ ".",
+ "none",
+ "Cleared bgp instances",
+)
+luCommand(
+ "r2",
+ '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5226"',
+ ".",
+ "none",
+ "Cleared bgp instances",
+)
+luCommand(
+ "r3",
+ '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5227 vrf r3-cust1" -c "no router bgp 5226"',
+ ".",
+ "none",
+ "Cleared bgp instances",
+)
+luCommand(
+ "r4",
+ '/usr/lib/frr/vtysh -c "conf ter" -c "no router bgp 5228 vrf r4-cust2" -c "no router bgp 5227 vrf r4-cust1" -c "no router bgp 5226"',
+ ".",
+ "none",
+ "Cleared bgp instances",
+)
from lutil import luCommand
-rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4']
+
+rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"]
for rtr in rtrs:
- ret = luCommand(rtr, 'vtysh -c "show bgp neigh"', 'Notification received .([A-Za-z0-9/ ]*)', 'none', 'collect neighbor stats')
+ ret = luCommand(
+ rtr,
+ 'vtysh -c "show bgp neigh"',
+ "Notification received .([A-Za-z0-9/ ]*)",
+ "none",
+ "collect neighbor stats",
+ )
found = luLast()
if ret != False and found != None:
val = found.group(1)
- ret = luCommand(rtr, 'vtysh -c "show bgp neigh"', 'Notification received', 'fail', 'Notify RXed! {}'.format(val))
-#done
+ ret = luCommand(
+ rtr,
+ 'vtysh -c "show bgp neigh"',
+ "Notification received",
+ "fail",
+ "Notify RXed! {}".format(val),
+ )
+# done
from lutil import luCommand
-ret = luCommand('ce1', 'vtysh -c "show ip route" | grep -c \\ 10\\.\\*/32','(.*)','pass', 'Looking for sharp routes')
+
+ret = luCommand(
+ "ce1",
+ 'vtysh -c "show ip route" | grep -c \\ 10\\.\\*/32',
+ "(.*)",
+ "pass",
+ "Looking for sharp routes",
+)
found = luLast()
if ret != False and found != None:
num = int(found.group())
- luCommand('ce3', 'vtysh -c "show bgp sum"',
- '.', 'pass', 'See %s sharp routes' % num)
+ luCommand(
+ "ce3", 'vtysh -c "show bgp sum"', ".", "pass", "See %s sharp routes" % num
+ )
if num > 0:
- rtrs = ['ce1', 'ce2', 'ce3']
+ rtrs = ["ce1", "ce2", "ce3"]
for rtr in rtrs:
- luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep Display','.', 'none', 'BGP routes pre remove')
- luCommand(rtr, 'ip route show | cat -n | tail','.', 'none', 'Linux routes pre remove')
- wait = 2*num/500
- luCommand('ce1', 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),'.','none','Removing {} routes'.format(num))
- luCommand('ce2', 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),'.','none','Removing {} routes'.format(num))
+ luCommand(
+ rtr,
+ 'vtysh -c "show bgp ipv4 uni" | grep Display',
+ ".",
+ "none",
+ "BGP routes pre remove",
+ )
+ luCommand(
+ rtr,
+ "ip route show | cat -n | tail",
+ ".",
+ "none",
+ "Linux routes pre remove",
+ )
+ wait = 2 * num / 500
+ luCommand(
+ "ce1",
+ 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),
+ ".",
+ "none",
+ "Removing {} routes".format(num),
+ )
+ luCommand(
+ "ce2",
+ 'vtysh -c "sharp remove routes 10.0.0.0 {}"'.format(num),
+ ".",
+ "none",
+ "Removing {} routes".format(num),
+ )
for rtr in rtrs:
- luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep Display',' 10 route', 'wait', 'BGP routes removed', wait, wait_time=10)
- luCommand(rtr, 'vtysh -c "show bgp ipv4 uni"','.', 'none', 'BGP routes post remove')
+ luCommand(
+ rtr,
+ 'vtysh -c "show bgp ipv4 uni" | grep Display',
+ " 10 route",
+ "wait",
+ "BGP routes removed",
+ wait,
+ wait_time=10,
+ )
+ luCommand(
+ rtr,
+ 'vtysh -c "show bgp ipv4 uni"',
+ ".",
+ "none",
+ "BGP routes post remove",
+ )
for rtr in rtrs:
- luCommand(rtr, 'ip route show | grep -c \\^10\\.','^0$', 'wait', 'Linux routes removed', wait, wait_time=10)
- luCommand(rtr, 'ip route show','.', 'none', 'Linux routes post remove')
- rtrs = ['r1', 'r3', 'r4']
+ luCommand(
+ rtr,
+ "ip route show | grep -c \\^10\\.",
+ "^0$",
+ "wait",
+ "Linux routes removed",
+ wait,
+ wait_time=10,
+ )
+ luCommand(rtr, "ip route show", ".", "none", "Linux routes post remove")
+ rtrs = ["r1", "r3", "r4"]
for rtr in rtrs:
- luCommand(rtr, 'ip route show vrf {}-cust1 | grep -c \\^10\\.'.format(rtr),'^0$','wait','VRF route removed',wait, wait_time=10)
-#done
+ luCommand(
+ rtr,
+ "ip route show vrf {}-cust1 | grep -c \\^10\\.".format(rtr),
+ "^0$",
+ "wait",
+ "VRF route removed",
+ wait,
+ wait_time=10,
+ )
+# done
from lutil import luCommand
+
num = 50000
-b = int(num/(256*256))
+b = int(num / (256 * 256))
if b > 0:
- r = num - b * (256*256)
+ r = num - b * (256 * 256)
else:
r = num
-c = int(r/256)
+c = int(r / 256)
if c > 0:
- d = r - c * 256 - 1
+ d = r - c * 256 - 1
else:
d = r
-wait = 2*num/1000
+wait = 2 * num / 1000
mem_z = {}
mem_b = {}
-rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4']
+rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"]
for rtr in rtrs:
- mem_z[rtr] = {'value': 0, 'units': 'unknown'}
- mem_b[rtr] = {'value': 0, 'units': 'unknown'}
- ret = luCommand(rtr, 'vtysh -c "show memory"', 'zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)', 'none', 'collect bgpd memory stats')
+ mem_z[rtr] = {"value": 0, "units": "unknown"}
+ mem_b[rtr] = {"value": 0, "units": "unknown"}
+ ret = luCommand(
+ rtr,
+ 'vtysh -c "show memory"',
+ "zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)",
+ "none",
+ "collect bgpd memory stats",
+ )
found = luLast()
if ret != False and found != None:
- mem_z[rtr] = {'value': int(found.group(1)), 'units': found.group(2)}
- mem_b[rtr] = {'value': int(found.group(3)), 'units': found.group(4)}
+ mem_z[rtr] = {"value": int(found.group(1)), "units": found.group(2)}
+ mem_b[rtr] = {"value": int(found.group(3)), "units": found.group(4)}
-luCommand('ce1', 'vtysh -c "show mem"', 'qmem sharpd', 'none','check if sharpd running')
+luCommand(
+ "ce1", 'vtysh -c "show mem"', "qmem sharpd", "none", "check if sharpd running"
+)
doSharp = False
found = luLast()
if ret != False and found != None:
doSharp = True
if doSharp != True:
- luCommand('ce1', 'vtysh -c "sharp data nexthop"', '.', 'pass','sharpd NOT running, skipping test')
+ luCommand(
+ "ce1",
+ 'vtysh -c "sharp data nexthop"',
+ ".",
+ "pass",
+ "sharpd NOT running, skipping test",
+ )
else:
- luCommand('ce1', 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.1 {}"'.format(num),'','pass','Adding {} routes'.format(num))
- luCommand('ce2', 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.2 {}"'.format(num),'','pass','Adding {} routes'.format(num))
- rtrs = ['ce1', 'ce2', 'ce3']
+ luCommand(
+ "ce1",
+ 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.1 {}"'.format(num),
+ "",
+ "pass",
+ "Adding {} routes".format(num),
+ )
+ luCommand(
+ "ce2",
+ 'vtysh -c "sharp install routes 10.0.0.0 nexthop 99.0.0.2 {}"'.format(num),
+ "",
+ "pass",
+ "Adding {} routes".format(num),
+ )
+ rtrs = ["ce1", "ce2", "ce3"]
for rtr in rtrs:
- luCommand(rtr, 'vtysh -c "show bgp ipv4 uni 10.{}.{}.{}"'.format(b,c,d), 'Last update:', 'wait', 'RXed last route, 10.{}.{}.{}'.format(b,c,d), wait, wait_time=10)
- luCommand(rtr, 'vtysh -c "show bgp ipv4 uni" | grep -c 10\\.\\*/32', str(num), 'wait', 'See all sharp routes in BGP', wait, wait_time=10)
- luCommand('r1', 'vtysh -c "show bgp vrf r1-cust1 ipv4 uni 10.{}.{}.{}"'.format(b,c,d),'99.0.0.1','wait','RXed -> 10.{}.{}.{} from CE1'.format(b,c,d), wait, wait_time=10)
- luCommand('r3', 'vtysh -c "show bgp vrf r3-cust1 ipv4 uni 10.{}.{}.{}"'.format(b,c,d),'99.0.0.2','wait','RXed -> 10.{}.{}.{} from CE2'.format(b,c,d), wait, wait_time=10)
- luCommand('r1', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'99.0.0.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d))
- luCommand('r3', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'99.0.0.2','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d))
- luCommand('r3', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'1.1.1.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d))
- luCommand('r1', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'3.3.3.3','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d))
- luCommand('r4', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'1.1.1.1','wait','see VPN safi -> 10.{}.{}.{} from CE1'.format(b,c,d))
- luCommand('r4', 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b,c,d),'3.3.3.3','wait','see VPN safi -> 10.{}.{}.{} from CE2'.format(b,c,d))
- rtrs = ['ce1', 'ce2', 'ce3']
+ luCommand(
+ rtr,
+ 'vtysh -c "show bgp ipv4 uni 10.{}.{}.{}"'.format(b, c, d),
+ "Last update:",
+ "wait",
+ "RXed last route, 10.{}.{}.{}".format(b, c, d),
+ wait,
+ wait_time=10,
+ )
+ luCommand(
+ rtr,
+ 'vtysh -c "show bgp ipv4 uni" | grep -c 10\\.\\*/32',
+ str(num),
+ "wait",
+ "See all sharp routes in BGP",
+ wait,
+ wait_time=10,
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show bgp vrf r1-cust1 ipv4 uni 10.{}.{}.{}"'.format(b, c, d),
+ "99.0.0.1",
+ "wait",
+ "RXed -> 10.{}.{}.{} from CE1".format(b, c, d),
+ wait,
+ wait_time=10,
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show bgp vrf r3-cust1 ipv4 uni 10.{}.{}.{}"'.format(b, c, d),
+ "99.0.0.2",
+ "wait",
+ "RXed -> 10.{}.{}.{} from CE2".format(b, c, d),
+ wait,
+ wait_time=10,
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d),
+ "99.0.0.1",
+ "wait",
+ "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d),
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d),
+ "99.0.0.2",
+ "wait",
+ "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d),
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d),
+ "1.1.1.1",
+ "wait",
+ "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d),
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d),
+ "3.3.3.3",
+ "wait",
+ "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d),
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d),
+ "1.1.1.1",
+ "wait",
+ "see VPN safi -> 10.{}.{}.{} from CE1".format(b, c, d),
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show bgp ipv4 vpn 10.{}.{}.{}"'.format(b, c, d),
+ "3.3.3.3",
+ "wait",
+ "see VPN safi -> 10.{}.{}.{} from CE2".format(b, c, d),
+ )
+ rtrs = ["ce1", "ce2", "ce3"]
for rtr in rtrs:
- luCommand(rtr, 'ip route get 10.{}.{}.{}'.format(b,c,d),'dev','wait','Route to 10.{}.{}.{} available'.format(b,c,d), wait, wait_time=10)
- luCommand(rtr, 'ip route show | grep -c \\^10\\.', str(num), 'wait', 'See {} linux routes'.format(num), wait, wait_time=10)
+ luCommand(
+ rtr,
+ "ip route get 10.{}.{}.{}".format(b, c, d),
+ "dev",
+ "wait",
+ "Route to 10.{}.{}.{} available".format(b, c, d),
+ wait,
+ wait_time=10,
+ )
+ luCommand(
+ rtr,
+ "ip route show | grep -c \\^10\\.",
+ str(num),
+ "wait",
+ "See {} linux routes".format(num),
+ wait,
+ wait_time=10,
+ )
- rtrs = ['r1', 'r3', 'r4']
+ rtrs = ["r1", "r3", "r4"]
for rtr in rtrs:
- luCommand(rtr, 'ip route get vrf {}-cust1 10.{}.{}.{}'.format(rtr,b,c,d),'dev','wait','VRF route available',wait, wait_time=10)
- luCommand(rtr, 'ip route show vrf {}-cust1 | grep -c \\^10\\.'.format(rtr), str(num), 'wait','See {} linux routes'.format(num), wait, wait_time=10)
- rtrs = ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3', 'r4']
+ luCommand(
+ rtr,
+ "ip route get vrf {}-cust1 10.{}.{}.{}".format(rtr, b, c, d),
+ "dev",
+ "wait",
+ "VRF route available",
+ wait,
+ wait_time=10,
+ )
+ luCommand(
+ rtr,
+ "ip route show vrf {}-cust1 | grep -c \\^10\\.".format(rtr),
+ str(num),
+ "wait",
+ "See {} linux routes".format(num),
+ wait,
+ wait_time=10,
+ )
+ rtrs = ["ce1", "ce2", "ce3", "r1", "r2", "r3", "r4"]
for rtr in rtrs:
- ret = luCommand(rtr, 'vtysh -c "show memory"', 'zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)', 'none', 'collect bgpd memory stats')
+ ret = luCommand(
+ rtr,
+ 'vtysh -c "show memory"',
+ "zebra: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*) .*bgpd: System allocator statistics: Total heap allocated: *(\d*) ([A-Za-z]*)",
+ "none",
+ "collect bgpd memory stats",
+ )
found = luLast()
if ret != False and found != None:
val_z = int(found.group(1))
- if mem_z[rtr]['units'] != found.group(2):
+ if mem_z[rtr]["units"] != found.group(2):
val_z *= 1000
- delta_z = val_z - int(mem_z[rtr]['value'])
- ave_z = float(delta_z)/float(num)
+ delta_z = val_z - int(mem_z[rtr]["value"])
+ ave_z = float(delta_z) / float(num)
val_b = int(found.group(3))
- if mem_b[rtr]['units'] != found.group(4):
+ if mem_b[rtr]["units"] != found.group(4):
val_b *= 1000
- delta_b = val_b - int(mem_b[rtr]['value'])
- ave_b = float(delta_b)/float(num)
- luCommand(rtr, 'vtysh -c "show thread cpu"', '.', 'pass', 'BGPd heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)'.format(mem_b[rtr]['value'], mem_b[rtr]['units'], found.group(3), found.group(4), round(ave_b,4)))
- luCommand(rtr, 'vtysh -c "show thread cpu"', '.', 'pass', 'Zebra heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)'.format(mem_z[rtr]['value'], mem_z[rtr]['units'], found.group(1), found.group(2), round(ave_z,4)))
-#done
+ delta_b = val_b - int(mem_b[rtr]["value"])
+ ave_b = float(delta_b) / float(num)
+ luCommand(
+ rtr,
+ 'vtysh -c "show thread cpu"',
+ ".",
+ "pass",
+ "BGPd heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)".format(
+ mem_b[rtr]["value"],
+ mem_b[rtr]["units"],
+ found.group(3),
+ found.group(4),
+ round(ave_b, 4),
+ ),
+ )
+ luCommand(
+ rtr,
+ 'vtysh -c "show thread cpu"',
+ ".",
+ "pass",
+ "Zebra heap: {0} {1} --> {2} {3} ({4} {1}/vpn route)".format(
+ mem_z[rtr]["value"],
+ mem_z[rtr]["units"],
+ found.group(1),
+ found.group(2),
+ round(ave_z, 4),
+ ),
+ )
+# done
import sys
import pytest
-sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
+sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))
from lib.ltemplate import *
+
def test_check_linux_vrf():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/check_linux_vrf.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/check_linux_vrf.py", False, CliOnFail, CheckFunc)
+
def test_adjacencies():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc)
+
def test_notification_check():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc)
+
def SKIP_test_add_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc)
+
def test_check_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc)
+
-#manual data path setup test - remove once have bgp/zebra vrf path working
+# manual data path setup test - remove once have bgp/zebra vrf path working
def test_check_linux_mpls():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/check_linux_mpls.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/check_linux_mpls.py", False, CliOnFail, CheckFunc)
+
def test_notification_check():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc)
+
def test_check_scale_up():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/scale_up.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/scale_up.py", False, CliOnFail, CheckFunc)
+
def test_notification_check():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc)
+
def test_check_scale_down():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/scale_down.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/scale_down.py", False, CliOnFail, CheckFunc)
+
def test_notification_check():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\', iproute2=\'4.9\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
- ltemplateTest('scripts/notification_check.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1', iproute2='4.9')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True, iproute2=\'4.9\')'
+ ltemplateTest("scripts/notification_check.py", False, CliOnFail, CheckFunc)
+
def SKIP_test_cleanup_all():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'4.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
- ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('4.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'4.1\', cli=True)'
+ ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
retval = pytest.main(["-s"])
sys.exit(retval)
from mininet.topo import Topo
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, reset_config_on_routers,
- create_route_maps, create_bgp_community_lists,
- create_prefix_lists, verify_bgp_community, step,
- check_address_types
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ create_route_maps,
+ create_bgp_community_lists,
+ create_prefix_lists,
+ verify_bgp_community,
+ step,
+ check_address_types,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify
-)
+from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
# Save the Current Working Directory to find configuration files.
bgp_convergence = False
NETWORK = {
"ipv4": ["200.50.2.0", "200.50.2.1", "200.50.2.0"],
- "ipv6": ["1::1", "1::2", "1::0"]
+ "ipv6": ["1::1", "1::2", "1::0"],
}
MASK = {"ipv4": "32", "ipv6": "128"}
NET_MASK = {"ipv4": "24", "ipv6": "120"}
"pf_list_1": "0:0:1 0:0:10 0:0:100",
"pf_list_2": "0:0:2 0:0:20 0:0:200",
"agg_1": "0:0:1 0:0:2 0:0:10 0:0:20 0:0:100 0:0:200 2:1:1 "
- "2:2:1 2:3:1 2:4:1 2:5:1",
- "agg_2": "0:0:2 0:0:20 0:0:200 2:1:1 "
- "2:2:1 2:3:1 2:4:1 2:5:1"
+ "2:2:1 2:3:1 2:4:1 2:5:1",
+ "agg_2": "0:0:2 0:0:20 0:0:200 2:1:1 " "2:2:1 2:3:1 2:4:1 2:5:1",
}
STANDARD_COMM = {
"r1": "1:1 1:2 1:3 1:4 1:5",
"pf_list_1": "0:1 0:10 0:100",
"pf_list_2": "0:2 0:20 0:200",
"agg_1": "0:1 0:2 0:10 0:20 0:100 0:200 2:1 2:2 2:3 2:4 2:5",
- "agg_2": "0:2 0:20 0:200 2:1 2:2 2:3 2:4 2:5"
+ "agg_2": "0:2 0:20 0:200 2:1 2:2 2:3 2:4 2:5",
}
##tgen.mininet_cli()
# Api call verify whether BGP is converged
bgp_convergence = verify_bgp_convergence(tgen, topo)
- assert bgp_convergence is True, ("setup_module :Failed \n Error:"
- " {}".format(bgp_convergence))
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
ADDR_TYPES = check_address_types()
logger.info("Running setup_module() done")
# Stop toplogy and Remove tmp files
tgen.stop_topology()
- logger.info("Testsuite end time: {}".
- format(time.asctime(time.localtime(time.time()))))
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
logger.info("=" * 40)
"action": "permit",
"seq_id": "10",
"set": {
- "large_community": {
- "num": LARGE_COMM["r1"]
- },
- "community": {
- "num": STANDARD_COMM["r1"]
- }
- }
+ "large_community": {"num": LARGE_COMM["r1"]},
+ "community": {"num": STANDARD_COMM["r1"]},
+ },
}
]
}
step("Configuring LC1 on r1")
result = create_route_maps(tgen, input_dict_1)
- assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_2 = {
"unicast": {
"advertise_networks": [
{
- "network": "%s/%s" % (
- NETWORK["ipv4"][0], MASK["ipv4"]),
- "no_of_network": 4
+ "network": "%s/%s"
+ % (NETWORK["ipv4"][0], MASK["ipv4"]),
+ "no_of_network": 4,
}
],
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {
- "route_maps": [{
- "name": "LC1",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC1", "direction": "out"}
+ ]
}
}
},
"r3": {
"dest_link": {
"r1-link1": {
- "route_maps": [{
- "name": "LC1",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC1", "direction": "out"}
+ ]
}
}
- }
- }
+ },
+ },
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
- "network": "%s/%s" % (
- NETWORK["ipv6"][0], MASK["ipv6"]),
- "no_of_network": 4
+ "network": "%s/%s"
+ % (NETWORK["ipv6"][0], MASK["ipv6"]),
+ "no_of_network": 4,
}
],
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {
- "route_maps": [{
- "name": "LC1",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC1", "direction": "out"}
+ ]
}
}
},
"r3": {
"dest_link": {
"r1-link1": {
- "route_maps": [{
- "name": "LC1",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC1", "direction": "out"}
+ ]
}
}
- }
- }
+ },
+ },
}
- }
+ },
}
}
}
step("Applying LC1 on r1 neighbors and advertising networks")
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
CONFIG_ROUTER_R1 = True
"action": "permit",
"seq_id": "10",
"set": {
- "large_community": {
- "num": LARGE_COMM["r2"]
- },
- "community": {
- "num": STANDARD_COMM["r2"]
- }
- }
+ "large_community": {"num": LARGE_COMM["r2"]},
+ "community": {"num": STANDARD_COMM["r2"]},
+ },
}
]
}
step("Configuring route-maps LC2 on r2")
result = create_route_maps(tgen, input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_1 = {
"r2": {
"r4": {
"dest_link": {
"r2-link1": {
- "route_maps": [{
- "name": "LC2",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC2", "direction": "out"}
+ ]
}
}
}
"r4": {
"dest_link": {
"r2-link1": {
- "route_maps": [{
- "name": "LC2",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC2", "direction": "out"}
+ ]
}
}
}
}
}
- }
+ },
}
}
}
step("Applying LC2 on r2 neighbors in out direction")
result = create_router_bgp(tgen, topo, input_dict_1)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
CONFIG_ROUTER_R2 = True
"set": {
"large_community": {
"num": LARGE_COMM["r2"],
- "action": "additive"
+ "action": "additive",
},
"community": {
"num": STANDARD_COMM["r2"],
- "action": "additive"
- }
- }
+ "action": "additive",
+ },
+ },
}
]
}
step("Configuring LC2 with community attributes as additive")
result = create_route_maps(tgen, input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
# tgen.mininet_cli()
CONFIG_ROUTER_ADDITIVE = True
"pf_list_1": [
{
"seqid": "10",
- "network": "%s/%s" % (NETWORK["ipv4"][0],
- MASK["ipv4"]),
- "action": "permit"
+ "network": "%s/%s" % (NETWORK["ipv4"][0], MASK["ipv4"]),
+ "action": "permit",
}
],
"pf_list_2": [
{
"seqid": "10",
- "network": "%s/%s" % (NETWORK["ipv4"][1],
- MASK["ipv4"]),
- "action": "permit"
+ "network": "%s/%s" % (NETWORK["ipv4"][1], MASK["ipv4"]),
+ "action": "permit",
}
- ]
+ ],
},
"ipv6": {
"pf_list_3": [
{
"seqid": "10",
- "network": "%s/%s" % (NETWORK["ipv6"][0],
- MASK["ipv6"]),
- "action": "permit"
+ "network": "%s/%s" % (NETWORK["ipv6"][0], MASK["ipv6"]),
+ "action": "permit",
}
],
"pf_list_4": [
{
"seqid": "10",
- "network": "%s/%s" % (NETWORK["ipv6"][1],
- MASK["ipv6"]),
- "action": "permit"
+ "network": "%s/%s" % (NETWORK["ipv6"][1], MASK["ipv6"]),
+ "action": "permit",
}
- ]
- }
-
+ ],
+ },
}
}
}
step("Configuring prefix-lists on r1 to filter networks")
result = create_prefix_lists(tgen, input_dict_1)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_2 = {
"r1": {
{
"action": "permit",
"seq_id": 10,
- "match": {
- "ipv4": {
- "prefix_lists": "pf_list_1"
- }
- },
+ "match": {"ipv4": {"prefix_lists": "pf_list_1"}},
"set": {
- "large_community": {
- "num": LARGE_COMM["pf_list_1"]
- },
- "community": {
- "num": STANDARD_COMM["pf_list_1"]
- }
- }
+ "large_community": {"num": LARGE_COMM["pf_list_1"]},
+ "community": {"num": STANDARD_COMM["pf_list_1"]},
+ },
},
{
"action": "permit",
"seq_id": 20,
- "match": {
- "ipv6": {
- "prefix_lists": "pf_list_3"
- }
- },
+ "match": {"ipv6": {"prefix_lists": "pf_list_3"}},
"set": {
- "large_community": {
- "num": LARGE_COMM["pf_list_1"]
- },
- "community": {
- "num": STANDARD_COMM["pf_list_1"]
- }
- }
+ "large_community": {"num": LARGE_COMM["pf_list_1"]},
+ "community": {"num": STANDARD_COMM["pf_list_1"]},
+ },
},
{
"action": "permit",
"seq_id": 30,
- "match": {
- "ipv4": {
- "prefix_lists": "pf_list_2"
- }
- },
+ "match": {"ipv4": {"prefix_lists": "pf_list_2"}},
"set": {
- "large_community": {
- "num": LARGE_COMM["pf_list_2"]
- },
- "community": {
- "num": STANDARD_COMM["pf_list_2"]
- }
- }
+ "large_community": {"num": LARGE_COMM["pf_list_2"]},
+ "community": {"num": STANDARD_COMM["pf_list_2"]},
+ },
},
{
"action": "permit",
"seq_id": 40,
- "match": {
- "ipv6": {
- "prefix_lists": "pf_list_4"
- }
- },
+ "match": {"ipv6": {"prefix_lists": "pf_list_4"}},
"set": {
- "large_community": {
- "num": LARGE_COMM["pf_list_2"]
- },
- "community": {
- "num": STANDARD_COMM["pf_list_2"]
- }
- }
- }
+ "large_community": {"num": LARGE_COMM["pf_list_2"]},
+ "community": {"num": STANDARD_COMM["pf_list_2"]},
+ },
+ },
]
}
}
}
- step("Applying prefix-lists match in route-map LC1 on r1. Setting"
- " community attritbute for filtered networks")
+ step(
+ "Applying prefix-lists match in route-map LC1 on r1. Setting"
+ " community attritbute for filtered networks"
+ )
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
config_router_additive(tgen, topo, tc_name)
"action": "permit",
"name": "ANY",
"value": LARGE_COMM["pf_list_1"],
- "large": True
+ "large": True,
},
{
"community_type": "standard",
"action": "permit",
"name": "ANY",
"value": STANDARD_COMM["pf_list_1"],
- }
+ },
]
}
}
step("Configuring bgp community lists on r4")
result = create_bgp_community_lists(tgen, input_dict_3)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_4 = {
"r4": {
"seq_id": "10",
"match": {
"large_community_list": {"id": "ANY"},
- "community_list": {"id": "ANY"}
+ "community_list": {"id": "ANY"},
},
- "set": {
- "path": {
- "as_num": "4000000",
- "as_action": "prepend"
- }
- }
+ "set": {"path": {"as_num": "4000000", "as_action": "prepend"}},
}
]
}
step("Applying community list on route-map on r4")
result = create_route_maps(tgen, input_dict_4)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_5 = {
"r4": {
"r5": {
"dest_link": {
"r4-link1": {
- "route_maps": [{
- "name": "LC4",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC4", "direction": "out"}
+ ]
}
}
}
"r5": {
"dest_link": {
"r4-link1": {
- "route_maps": [{
- "name": "LC4",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "LC4", "direction": "out"}
+ ]
}
}
}
}
}
- }
+ },
}
}
}
step("Applying route-map LC4 out from r4 to r5 ")
result = create_router_bgp(tgen, topo, input_dict_5)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
#####################################################
"seq_id": "10",
"set": {
"large_community": {"num": LARGE_COMM["r1"]},
- "community": {"num": STANDARD_COMM["r1"]}
- }
+ "community": {"num": STANDARD_COMM["r1"]},
+ },
}
]
}
step("Trying to set bgp communities")
result = create_route_maps(tgen, input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]],
- input_dict)
+ result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict)
assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- result = verify_bgp_community(tgen, adt, "r3", [NETWORK[adt][0]],
- input_dict)
+ result = verify_bgp_community(tgen, adt, "r3", [NETWORK[adt][0]], input_dict)
assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict_1 = {
"largeCommunity": LARGE_COMM["r1"],
- "community": STANDARD_COMM["r1"]
+ "community": STANDARD_COMM["r1"],
}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]],
- input_dict_1)
+ result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_1)
assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict_3 = {
"largeCommunity": LARGE_COMM["r2"],
- "community": STANDARD_COMM["r2"]
+ "community": STANDARD_COMM["r2"],
}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][1]],
- input_dict_3)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][1]], input_dict_3)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict_1 = {
"largeCommunity": "%s %s" % (LARGE_COMM["r1"], LARGE_COMM["r2"]),
- "community": "%s %s" % (STANDARD_COMM["r1"], STANDARD_COMM["r2"])
+ "community": "%s %s" % (STANDARD_COMM["r1"], STANDARD_COMM["r2"]),
}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]],
- input_dict_1)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_1)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
config_for_as_path(tgen, topo, tc_name)
input_dict = {
- "largeCommunity": "%s %s" % (
- LARGE_COMM["pf_list_1"], LARGE_COMM["r2"]),
- "community": "%s %s" % (
- STANDARD_COMM["pf_list_1"], STANDARD_COMM["r2"]),
+ "largeCommunity": "%s %s" % (LARGE_COMM["pf_list_1"], LARGE_COMM["r2"]),
+ "community": "%s %s" % (STANDARD_COMM["pf_list_1"], STANDARD_COMM["r2"]),
}
input_dict_1 = {
- "largeCommunity": "%s %s" % (
- LARGE_COMM["pf_list_2"], LARGE_COMM["r2"]),
- "community": "%s %s" % (
- STANDARD_COMM["pf_list_2"], STANDARD_COMM["r2"]),
+ "largeCommunity": "%s %s" % (LARGE_COMM["pf_list_2"], LARGE_COMM["r2"]),
+ "community": "%s %s" % (STANDARD_COMM["pf_list_2"], STANDARD_COMM["r2"]),
}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][0]],
- input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][0]], input_dict)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
- result = verify_bgp_community(tgen, adt, "r5", [NETWORK[adt][1]],
- input_dict_1, expected=False)
+ result = verify_bgp_community(
+ tgen, adt, "r5", [NETWORK[adt][1]], input_dict_1, expected=False
+ )
- assert result is not True, "Test case {} : Should fail \n Error: {}". \
- format(tc_name, result)
+ assert result is not True, "Test case {} : Should fail \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
"action": "permit",
"name": "ANY",
"value": "1:1:1",
- "large": True
+ "large": True,
},
{
"community_type": "standard",
"action": "permit",
"name": "ALL",
"value": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:1:1 2:2:1",
- "large": True
+ "large": True,
},
{
"community_type": "expanded",
"action": "permit",
"name": "EXP_ALL",
"value": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:[1-5]:1",
- "large": True
- }
+ "large": True,
+ },
]
}
}
step("Create bgp community lists for ANY, EXACT and EXP_ALL match")
result = create_bgp_community_lists(tgen, input_dict_1)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_2 = {
"r4": {
{
"action": "permit",
"seq_id": "10",
- "match": {"large-community-list": {"id": "ANY"}}
+ "match": {"large-community-list": {"id": "ANY"}},
},
{
"action": "permit",
"seq_id": "20",
- "match": {"large-community-list": {"id": "EXACT"}}
+ "match": {"large-community-list": {"id": "EXACT"}},
},
{
"action": "permit",
"seq_id": "30",
- "match": {"large-community-list": {"id": "EXP_ALL"}}
- }
+ "match": {"large-community-list": {"id": "EXP_ALL"}},
+ },
]
}
}
step("Applying bgp community lits on LC4 route-map")
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_3 = {
"r4": {
"r5": {
"dest_link": {
"r4-link1": {
- "route_maps": [{
- "name": "LC4",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "LC4", "direction": "in"}
+ ]
}
}
}
"r5": {
"dest_link": {
"r4-link1": {
- "route_maps": [{
- "name": "LC4",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "LC4", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
}
step("Apply route-mpa LC4 on r4 for r2 neighbor, direction 'in'")
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_4 = {
"largeCommunity": "1:1:1 1:2:1 1:3:1 1:4:1 1:5:1 2:1:1 2:2:1 2:3:1 "
- "2:4:1 2:5:1"
+ "2:4:1 2:5:1"
}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]],
- input_dict_4)
- assert result is True, "Test case {} : Should fail \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(tgen, adt, "r4", [NETWORK[adt][0]], input_dict_4)
+ assert result is True, "Test case {} : Should fail \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
-#@pytest.mark.skip(reason="as-set not working for ipv6")
+# @pytest.mark.skip(reason="as-set not working for ipv6")
def test_large_community_aggregate_network(request):
"""
Restart router and check if large community and community
input_dict = {
"community": STANDARD_COMM["agg_1"],
- "largeCommunity": LARGE_COMM["agg_1"]
+ "largeCommunity": LARGE_COMM["agg_1"],
}
input_dict_1 = {
"unicast": {
"aggregate_address": [
{
- "network": "%s/%s" % (
- NETWORK["ipv4"][2], NET_MASK["ipv4"]),
- "as_set": True
+ "network": "%s/%s"
+ % (NETWORK["ipv4"][2], NET_MASK["ipv4"]),
+ "as_set": True,
}
]
}
"unicast": {
"aggregate_address": [
{
- "network": "%s/%s" % (
- NETWORK["ipv6"][2], NET_MASK["ipv6"]),
- "as_set": True
+ "network": "%s/%s"
+ % (NETWORK["ipv6"][2], NET_MASK["ipv6"]),
+ "as_set": True,
}
]
}
- }
+ },
}
}
}
step("Configuring aggregate address as-set on r2")
result = create_router_bgp(tgen, topo, input_dict_1)
- assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r4",
- ["%s/%s" % (NETWORK[adt][2],
- NET_MASK[adt])],
- input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(
+ tgen, adt, "r4", ["%s/%s" % (NETWORK[adt][2], NET_MASK[adt])], input_dict
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
input_dict_2 = {
"r1": {
"unicast": {
"advertise_networks": [
{
- "network": "%s/%s" % (
- NETWORK["ipv4"][0], MASK["ipv4"]),
+ "network": "%s/%s"
+ % (NETWORK["ipv4"][0], MASK["ipv4"]),
"no_of_network": 1,
- "delete": True
+ "delete": True,
}
]
}
"unicast": {
"advertise_networks": [
{
- "network": "%s/%s" % (
- NETWORK["ipv6"][0], MASK["ipv6"]),
+ "network": "%s/%s"
+ % (NETWORK["ipv6"][0], MASK["ipv6"]),
"no_of_network": 1,
- "delete": True
+ "delete": True,
}
]
}
- }
+ },
}
}
}
step("Stop advertising one of the networks")
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Test case {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(tc_name, result)
input_dict_3 = {
"community": STANDARD_COMM["agg_2"],
- "largeCommunity": LARGE_COMM["agg_2"]
+ "largeCommunity": LARGE_COMM["agg_2"],
}
for adt in ADDR_TYPES:
step("Verifying bgp community values on r5 is also modified")
- result = verify_bgp_community(tgen, adt, "r4",
- ["%s/%s" % (NETWORK[adt][2],
- NET_MASK[adt])],
- input_dict_3)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(
+ tgen, adt, "r4", ["%s/%s" % (NETWORK[adt][2], NET_MASK[adt])], input_dict_3
+ )
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
"community_type": "standard",
"action": "permit",
"name": "ANY",
- "value": "0:-1"
+ "value": "0:-1",
}
]
}
step("Checking boundary value for community 0:-1")
result = create_bgp_community_lists(tgen, input_dict)
- assert result is not True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is not True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Checking community attribute 0:65536")
input_dict_2 = {
"community_type": "standard",
"action": "permit",
"name": "ANY",
- "value": "0:65536"
+ "value": "0:65536",
}
]
}
step("Checking boundary value for community 0:65536")
result = create_bgp_community_lists(tgen, input_dict_2)
- assert result is not True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is not True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Checking boundary value for community 0:4294967296")
input_dict_3 = {
"action": "permit",
"name": "ANY",
"value": "0:4294967296",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_3)
- assert result is not True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is not True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Checking boundary value for community 0:-1:1")
input_dict_4 = {
"action": "permit",
"name": "ANY",
"value": "0:-1:1",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_4)
- assert result is not True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ assert result is not True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
def test_large_community_after_clear_bgp(request):
reset_config_on_routers(tgen)
config_router_r1(tgen, topo, tc_name)
- input_dict = {
- "largeCommunity": LARGE_COMM["r1"],
- "community": STANDARD_COMM["r1"]
- }
+ input_dict = {"largeCommunity": LARGE_COMM["r1"], "community": STANDARD_COMM["r1"]}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]],
- input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
step("Clearing BGP on r1")
clear_bgp_and_verify(tgen, topo, "r1")
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]],
- input_dict)
- assert result is True, "Test case {} : Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_bgp_community(tgen, adt, "r2", [NETWORK[adt][0]], input_dict)
+ assert result is True, "Test case {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
#
# Copyright (c) 2019 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation,
-#Inc. ("NetDEF") in this file.
+# Inc. ("NetDEF") in this file.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
from mininet.topo import Topo
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, reset_config_on_routers,
- create_route_maps, create_bgp_community_lists,
- create_prefix_lists, verify_bgp_community, step,
- verify_create_community_list, delete_route_maps,
- verify_route_maps, create_static_routes,
- check_address_types
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ reset_config_on_routers,
+ create_route_maps,
+ create_bgp_community_lists,
+ create_prefix_lists,
+ verify_bgp_community,
+ step,
+ verify_create_community_list,
+ delete_route_maps,
+ verify_route_maps,
+ create_static_routes,
+ check_address_types,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence, create_router_bgp,
- clear_bgp_and_verify
-)
+from lib.bgp import verify_bgp_convergence, create_router_bgp, clear_bgp_and_verify
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology and configuration creation
# Building topology from json file
build_topo_from_json(tgen, topo)
+
def setup_module(mod):
"""
Sets up the pytest environment
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
- logger.info("="*40)
+ logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# Api call verify whether BGP is converged
# Ipv4
bgp_convergence = verify_bgp_convergence(tgen, topo)
- assert bgp_convergence is True, ("setup_module :Failed \n Error:"
- " {}".format(bgp_convergence))
+ assert bgp_convergence is True, "setup_module :Failed \n Error:" " {}".format(
+ bgp_convergence
+ )
ADDR_TYPES = check_address_types()
logger.info("Running setup_module() done")
# Stop toplogy and Remove tmp files
tgen.stop_topology()
- logger.info("Testsuite end time: {}".\
- format(time.asctime(time.localtime(time.time()))))
- logger.info("="*40)
+ logger.info(
+ "Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
+ )
+ logger.info("=" * 40)
+
#####################################################
#
"action": "permit",
"name": "LC_1_STD",
"value": "2:1:1 2:1:2 1:2:3",
- "large": True
+ "large": True,
},
{
"community_type": "standard",
"action": "permit",
"name": "LC_2_STD",
"value": "3:1:1 3:1:2",
- "large": True
- }
+ "large": True,
+ },
]
}
}
result = create_bgp_community_lists(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify BGP large community is created")
result = verify_create_community_list(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create srtandard large community list with in-correct values")
input_dict = {
"action": "permit",
"name": "LC_1_STD_ERR",
"value": "0:0:0",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
## TODO should fail
step("Verify BGP large community is created")
result = verify_create_community_list(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
"action": "permit",
"name": "LC_1_EXP",
"value": "1:1:200 1:2:* 3:2:1",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify BGP large community is created")
result = verify_create_community_list(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
"action": "permit",
"name": "LC_DEL",
"value": "1:2:1 1:3:1 2:1:1 2:2:2 3:3:3",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_2 = {
"set": {
"large_community": {
"num": "1:2:1 1:3:1 2:10:1 3:3:3 4:4:4 5:5:5",
- "action": "additive"
+ "action": "additive",
}
- }
+ },
}
]
}
{
"action": "permit",
"seq_id": "10",
- "set": {
- "large_comm_list": {
- "id": "LC_DEL",
- "delete": True
- }
- }
+ "set": {"large_comm_list": {"id": "LC_DEL", "delete": True}},
}
]
}
- }
+ },
}
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map and advertise networks")
input_dict_3 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ],
+ "advertise_networks": [{"network": "200.50.2.0/32"}],
"neighbor": {
"r2": {
"dest_link": {
"r1": {
- "route_maps": [{
- "name": "RM_R2_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R2_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
- }
+ },
}
},
"ipv6": {
"unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ],
+ "advertise_networks": [{"network": "1::1/128"}],
"neighbor": {
"r2": {
"dest_link": {
"r1": {
- "route_maps": [{
- "name": "RM_R2_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R2_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
- }
+ },
}
- }
+ },
}
}
},
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify Community-list")
dut = "r4"
- input_dict_4 = {
- "largeCommunity": "2:10:1 4:4:4 5:5:5"
- }
+ input_dict_4 = {"largeCommunity": "2:10:1 4:4:4 5:5:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"set": {
"large_community": {
"num": "200:200:1 200:200:10 200:200:20000",
- "action": "additive"
+ "action": "additive",
}
- }
+ },
}
]
}
}
}
result = create_route_maps(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map and advertise networks")
input_dict_2 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r6": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_LC1",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "RM_LC1", "direction": "out"}
+ ]
}
}
}
"r6": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_LC1",
- "direction": "out"
- }]
+ "route_maps": [
+ {"name": "RM_LC1", "direction": "out"}
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r6"
- input_dict_4 = {
- "largeCommunity": "200:200:1 200:200:10 200:200:20000"
- }
+ input_dict_4 = {"largeCommunity": "200:200:1 200:200:10 200:200:20000"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
step("Delete route map reference by community-list")
- input_dict_3 = {
- "r4": {
- "route_maps": ["RM_LC1"]
- }
- }
+ input_dict_3 = {"r4": {"route_maps": ["RM_LC1"]}}
result = delete_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify route map is deleted")
result = verify_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4, expected=False)
+ result = verify_bgp_community(
+ tgen, adt, dut, NETWORKS[adt], input_dict_4, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"set": {
"large_community": {
"num": "0:0:1 0:0:10 0:0:100 2:0:1 2:0:2 2:0:3"
- " 2:0:4 2:0:5",
- "action": "additive"
+ " 2:0:4 2:0:5",
+ "action": "additive",
}
- }
+ },
}
],
"RM_R4_OUT": [
"set": {
"large_community": {
"num": "0:0:1 0:0:10 0:0:10000 2:0:1 2:0:2",
- "action": "additive"
+ "action": "additive",
}
- }
+ },
}
- ]
+ ],
}
}
}
result = create_route_maps(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map and advertise networks")
input_dict_2 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
},
"r6": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
},
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
},
"r6": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
- }
+ },
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r6"
input_dict_4 = {
- "largeCommunity":
- "0:0:1 0:0:10 0:0:100 0:0:10000 2:0:1 2:0:2 2:0:3 2:0:4 2:0:5"
+ "largeCommunity": "0:0:1 0:0:10 0:0:100 0:0:10000 2:0:1 2:0:2 2:0:3 2:0:4 2:0:5"
}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"set": {
"large_community": {
"num": "0:0:1 0:0:10 0:0:100 2:0:1 2:0:2 2:0:3"
- " 2:0:4",
- "action": "additive"
+ " 2:0:4",
+ "action": "additive",
}
- }
+ },
}
]
}
{
"action": "permit",
"seq_id": "10",
- "set": {
- "large_community": {
- "num": "none"
- }
- }
+ "set": {"large_community": {"num": "none"}},
}
]
}
- }
+ },
}
result = create_route_maps(tgen, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_2 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
},
"r4": {
"dest_link": {
"r6": {
- "route_maps": [{
- "name": "RM_R6_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R6_IN", "direction": "in"}
+ ]
}
}
}
"r4": {
"dest_link": {
"r6": {
- "route_maps": [{
- "name": "RM_R6_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R6_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify Community-list")
dut = "r6"
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- expected=False)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], expected=False)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict = {
"r1": {
"static_routes": [
- {
- "network": "200.50.2.0/32",
- "next_hop": "10.0.0.6"
- },
- {
- "network": "1::1/128",
- "next_hop": "fd00:0:0:1::2"
- }
+ {"network": "200.50.2.0/32", "next_hop": "10.0.0.6"},
+ {"network": "1::1/128", "next_hop": "fd00:0:0:1::2"},
]
}
}
result = create_static_routes(tgen, input_dict)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("redistribute static routes")
input_dict_1 = {
- "r1":{
+ "r1": {
"bgp": {
"address_family": {
"ipv4": {
"redistribute": [
{
"redist_type": "static",
- "attribute": "route-map RM_R2_OUT"
+ "attribute": "route-map RM_R2_OUT",
},
{
"redist_type": "connected",
- "attribute": "route-map RM_R2_OUT"
- }
+ "attribute": "route-map RM_R2_OUT",
+ },
]
}
},
"redistribute": [
{
"redist_type": "static",
- "attribute": "route-map RM_R2_OUT"
+ "attribute": "route-map RM_R2_OUT",
},
{
"redist_type": "connected",
- "attribute": "route-map RM_R2_OUT"
- }
+ "attribute": "route-map RM_R2_OUT",
+ },
]
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_1)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_3 = {
"r1": {
"route_maps": {
- "RM_R2_OUT": [{
- "action": "permit",
- "set": {
- "large_community": {"num":"55:55:55 555:555:555"}
- }
- }]
+ "RM_R2_OUT": [
+ {
+ "action": "permit",
+ "set": {"large_community": {"num": "55:55:55 555:555:555"}},
+ }
+ ]
}
- }
+ }
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
- step("Verify large-community-list for static and connected ipv4 route on"
- " r2")
+ step("Verify large-community-list for static and connected ipv4 route on" " r2")
- input_dict_5 = {
- "largeCommunity": "55:55:55 555:555:555"
- }
+ input_dict_5 = {"largeCommunity": "55:55:55 555:555:555"}
if "ipv4" in ADDR_TYPES:
dut = "r2"
networks = ["200.50.2.0/32", "1.0.1.17/32"]
- result = verify_bgp_community(tgen, "ipv4", dut, networks,
- input_dict_5)
+ result = verify_bgp_community(tgen, "ipv4", dut, networks, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- step("Verify large-community-list for static and connected ipv4 route"
- " on r4")
+ step("Verify large-community-list for static and connected ipv4 route" " on r4")
dut = "r4"
networks = ["200.50.2.0/32", "1.0.1.17/32"]
- result = verify_bgp_community(tgen, "ipv4", dut, networks,
- input_dict_5)
+ result = verify_bgp_community(tgen, "ipv4", dut, networks, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
if "ipv6" in ADDR_TYPES:
- step("Verify large-community-list for static and connected ipv6 route"
- " on r2")
+ step("Verify large-community-list for static and connected ipv6 route" " on r2")
dut = "r2"
networks = ["1::1/128", "2001:db8:f::1:17/128"]
- result = verify_bgp_community(tgen, "ipv6", dut, networks,
- input_dict_5)
+ result = verify_bgp_community(tgen, "ipv6", dut, networks, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
- step("Verify large-community-list for static and connected ipv6 route"
- " on r4")
+ step("Verify large-community-list for static and connected ipv6 route" " on r4")
dut = "r4"
networks = ["1::1/128", "2001:db8:f::1:17/128"]
- result = verify_bgp_community(tgen, "ipv6", dut, networks,
- input_dict_5)
+ result = verify_bgp_community(tgen, "ipv6", dut, networks, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"action": "permit",
"name": "Test",
"value": "1:2:1 1:1:10 1:3:100",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_3 = {
{
"action": "permit",
"seq_id": "10",
- "set": {
- "large_comm_list": {
- "id": "Test",
- "delete": True
- }
- }
+ "set": {"large_comm_list": {"id": "Test", "delete": True}},
}
]
}
"set": {
"large_community": {
"num": "1:2:1 1:1:10 1:3:100 2:1:1 2:2:2 2:3:3"
- " 2:4:4 2:5:5",
- "action": "additive"
+ " 2:4:4 2:5:5",
+ "action": "additive",
}
- }
+ },
}
]
}
- }
+ },
}
result = create_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map and advertise networks")
input_dict_4 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
},
"r4": {
"dest_link": {
"r6": {
- "route_maps": [{
- "name": "RM_R6_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R6_IN", "direction": "in"}
+ ]
}
}
}
"r4": {
"dest_link": {
"r6": {
- "route_maps": [{
- "name": "RM_R6_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R6_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r6"
- input_dict_5 = {
- "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
+ input_dict_5 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_5)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"action": "permit",
"seq_id": "10",
"set": {
- "large_community": {
- "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
- }
+ "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
+ },
}
]
}
}
}
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map and advertise networks")
input_dict_3 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r6": {
"dest_link": {
"r5": {
- "route_maps": [{
- "name": "RM_R6_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R6_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
"r6": {
"dest_link": {
"r5": {
- "route_maps": [{
- "name": "RM_R6_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R6_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r6"
- input_dict_4 = {
- "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
+ input_dict_4 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
step("Configure neighbor for no-send-community")
input_dict_5 = {
"unicast": {
"neighbor": {
"r6": {
- "dest_link": {
- "r5": {
- "no_send_community": "large"
- }
- }
+ "dest_link": {"r5": {"no_send_community": "large"}}
}
}
}
"unicast": {
"neighbor": {
"r6": {
- "dest_link": {
- "r5": {
- "no_send_community": "large"
- }
- }
+ "dest_link": {"r5": {"no_send_community": "large"}}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_5)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify Community-list")
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4, expected=False)
+ result = verify_bgp_community(
+ tgen, adt, dut, NETWORKS[adt], input_dict_4, expected=False
+ )
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"community_type": "standard",
"action": "permit",
"name": "Test1",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_1)
assert result is not True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"action": "permit",
"seq_id": "10",
"set": {
- "large_community": {
- "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
- }
+ "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
+ },
}
]
}
}
}
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map and advertise networks")
input_dict_3 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create standard large commumity-list")
input_dict_4 = {
"action": "permit",
"name": "EXACT",
"value": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify BGP large community is created")
result = verify_create_community_list(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_5 = {
"seq_id": "10",
"match": {
"large-community-list": ["EXACT"],
- "match_exact": True
- }
+ "match_exact": True,
+ },
}
]
}
}
}
result = create_route_maps(tgen, input_dict_5)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_6 = {
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_6)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r4"
- input_dict_4 = {
- "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
+ input_dict_4 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
input_dict_2 = {
"r2": {
"route_maps": {
- "RM_R4_OUT": [{
- "action": "permit",
- "set": {
- "large_community": {
- "num": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
+ "RM_R4_OUT": [
+ {
+ "action": "permit",
+ "set": {
+ "large_community": {
+ "num": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
+ }
+ },
}
- }]
+ ]
}
}
}
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_3 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create standard large commumity-list")
input_dict_4 = {
"action": "permit",
"name": "ALL",
"value": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5",
- "large": True
+ "large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify BGP large community is created")
result = verify_create_community_list(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_5 = {
{
"action": "permit",
"seq_id": "10",
- "match": {
- "large-community-list": {
- "id": "ALL"
- }
- }
+ "match": {"large-community-list": {"id": "ALL"}},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_5)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_6 = {
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_6)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r4"
- input_dict_4 = {
- "largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
+ input_dict_4 = {"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_4)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"action": "permit",
"seq_id": "10",
"set": {
- "large_community": {
- "num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
- }
+ "large_community": {"num": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
+ },
}
]
}
}
}
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_3 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
result = create_router_bgp(tgen, topo, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create standard large commumity-list")
input_dict_4 = {
"action": "permit",
"name": "ANY",
"value": "2:1:1",
- "large": True
+ "large": True,
},
{
"community_type": "standard",
"action": "permit",
"name": "ANY",
"value": "2:2:1",
- "large": True
+ "large": True,
},
{
"community_type": "standard",
"action": "permit",
"name": "ANY",
"value": "2:3:1",
- "large": True
+ "large": True,
},
{
"community_type": "standard",
"action": "permit",
"name": "ANY",
"value": "2:4:1",
- "large": True
- }
+ "large": True,
+ },
]
}
}
result = create_bgp_community_lists(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify BGP large community is created")
result = verify_create_community_list(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_5 = {
{
"action": "permit",
"seq_id": "10",
- "match": {
- "large-community-list": {
- "id": "ANY"
- }
- }
+ "match": {"large-community-list": {"id": "ANY"}},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_5)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_6 = {
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_6)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r4"
- input_dict_7 = {
- "largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"
- }
+ input_dict_7 = {"largeCommunity": "2:1:1 2:2:2 2:3:3 2:4:4 2:5:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_7)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_7)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
write_test_footer(tc_name)
"large_community": {
"num": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5",
},
- "community": {
- "num": "1:1 1:2 1:3 1:4 1:5"
- }
- }
+ "community": {"num": "1:1 1:2 1:3 1:4 1:5"},
+ },
}
]
}
}
}
result = create_route_maps(tgen, input_dict_2)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_3 = {
"address_family": {
"ipv4": {
"unicast": {
- "advertise_networks": [
- {"network": "200.50.2.0/32"}
- ]
+ "advertise_networks": [{"network": "200.50.2.0/32"}]
}
},
"ipv6": {
- "unicast": {
- "advertise_networks": [
- {"network": "1::1/128"}
- ]
- }
- }
+ "unicast": {"advertise_networks": [{"network": "1::1/128"}]}
+ },
}
}
},
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
"r4": {
"dest_link": {
"r2": {
- "route_maps": [{
- "name": "RM_R4_OUT",
- "direction": "out"
- }]
+ "route_maps": [
+ {
+ "name": "RM_R4_OUT",
+ "direction": "out",
+ }
+ ]
}
}
}
}
}
- }
+ },
}
}
- }
+ },
}
- result = create_router_bgp(tgen, topo,input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ result = create_router_bgp(tgen, topo, input_dict_3)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create standard large commumity-list")
input_dict_4 = {
"action": "permit",
"name": "ALL",
"value": "1:1:1 2:1:3 2:1:4 2:1:5",
- "large": True
+ "large": True,
},
{
"community_type": "expanded",
"action": "permit",
"name": "EXP_ALL",
"value": "1:1:1 2:1:[3-5]",
- "large": True
- }
+ "large": True,
+ },
]
}
}
result = create_bgp_community_lists(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify BGP large community is created")
result = verify_create_community_list(tgen, input_dict_4)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_5 = {
{
"action": "permit",
"seq_id": "10",
- "match": {
- "large_community_list": {
- "id": "ALL",
- },
- },
+ "match": {"large_community_list": {"id": "ALL",},},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_5)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map")
input_dict_6 = {
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
"r2": {
"dest_link": {
"r4": {
- "route_maps": [{
- "name": "RM_R4_IN",
- "direction": "in"
- }]
+ "route_maps": [
+ {"name": "RM_R4_IN", "direction": "in"}
+ ]
}
}
}
}
}
- }
+ },
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_6)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r4"
- input_dict_7 = {
- "largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"
- }
+ input_dict_7 = {"largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_7)
+ result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt], input_dict_7)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ tc_name, result
+ )
step("Delete route map reference by community-list")
- input_dict_3 = {
- "r4": {
- "route_maps": ["RM_R4_IN"]
- }
- }
+ input_dict_3 = {"r4": {"route_maps": ["RM_R4_IN"]}}
result = delete_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
result = verify_route_maps(tgen, input_dict_3)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create route map")
input_dict_5 = {
{
"action": "permit",
"seq_id": "20",
- "match": {
- "large_community_list": {
- "id": "EXP_ALL",
- },
- },
+ "match": {"large_community_list": {"id": "EXP_ALL",},},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_5)
- assert result is True, "Testcase {} : Failed \n Error: {}".format(
- tc_name, result)
+ assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("clear ip bgp")
- result = clear_bgp_and_verify(tgen, topo, 'r4')
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ result = clear_bgp_and_verify(tgen, topo, "r4")
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
step("Verify large-community-list")
dut = "r4"
- input_dict_7 = {
- "largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"
- }
+ input_dict_7 = {"largeCommunity": "1:1:1 1:1:2 2:1:3 2:1:4 2:1:5"}
for adt in ADDR_TYPES:
- result = verify_bgp_community(tgen, adt, dut, NETWORKS[adt],
- input_dict_7, expected=False)
- assert result is not True, "Testcase {} : Failed \n Error: {}".\
- format(tc_name, result)
+ result = verify_bgp_community(
+ tgen, adt, dut, NETWORKS[adt], input_dict_7, expected=False
+ )
+ assert result is not True, "Testcase {} : Failed \n Error: {}".format(
+ tc_name, result
+ )
write_test_footer(tc_name)
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65301:125000 (1.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "multipath":true,
+ "extendedCommunity":{
+ "string":"LB:65303:125000 (1.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.6"
+ }
+ ]
+ },
+ {
+ "valid":true,
+ "multipath":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65201:375000 (3.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "multipath":true,
+ "extendedCommunity":{
+ "string":"LB:65303:125000 (1.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.6"
+ }
+ ]
+ },
+ {
+ "valid":true,
+ "multipath":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65301:250000 (2.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "prefix":"198.10.1.11\/32",
+ "paths":[
+ {
+ "valid":true,
+ "multipath":true,
+ "extendedCommunity":{
+ "string":"LB:65303:125000 (1.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.6"
+ }
+ ]
+ },
+ {
+ "valid":true,
+ "multipath":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65201:250000 (2.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "multipath":true,
+ "nexthops":[
+ {
+ "ip":"11.1.1.6"
+ }
+ ]
+ },
+ {
+ "valid":true,
+ "multipath":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65201:375000 (3.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.1.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+hostname r1
+!
+router bgp 65101
+ bgp router-id 11.1.1.1
+ bgp bestpath as-path multipath-relax
+ neighbor 11.1.1.2 remote-as external
+ neighbor 11.1.1.6 remote-as external
+!
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.6",
+ "weight":25
+ },
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":75
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.6",
+ "weight":33
+ },
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":66
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.11\/32":[
+ {
+ "prefix":"198.10.1.11\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.6",
+ "weight":33
+ },
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":66
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":1
+ },
+ {
+ "fib":true,
+ "ip":"11.1.1.6",
+ "weight":1
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.11\/32":[
+ {
+ "prefix":"198.10.1.11\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":1
+ },
+ {
+ "fib":true,
+ "ip":"11.1.1.6",
+ "weight":1
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":100
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.11\/32":[
+ {
+ "prefix":"198.10.1.11\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":100
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.6",
+ "weight":1
+ },
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":100
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.11\/32":[
+ {
+ "prefix":"198.10.1.11\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.1.6",
+ "weight":1
+ },
+ {
+ "fib":true,
+ "ip":"11.1.1.2",
+ "weight":100
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "10.0.1.1\/32":[
+ {
+ "prefix":"10.0.1.1\/32",
+ "protocol":"ospf",
+ "distance":110,
+ "metric":10,
+ "table":254,
+ "internalStatus":0,
+ "internalFlags":0,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":1,
+ "nexthops":[
+ {
+ "flags":9,
+ "ip":"0.0.0.0",
+ "afi":"ipv4",
+ "interfaceIndex":2,
+ "interfaceName":"r1-eth0",
+ "active":true,
+ "onLink":true
+ }
+ ]
+ },
+ {
+ "prefix":"10.0.1.1\/32",
+ "protocol":"connected",
+ "selected":true,
+ "destSelected":true,
+ "distance":0,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "internalStatus":16,
+ "internalFlags":8,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":1,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceIndex":2,
+ "interfaceName":"r1-eth0",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.3.4\/32":[
+ {
+ "prefix":"10.0.3.4\/32",
+ "protocol":"connected",
+ "selected":true,
+ "destSelected":true,
+ "distance":0,
+ "metric":0,
+ "installed":true,
+ "table":254,
+ "internalStatus":16,
+ "internalFlags":8,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":1,
+ "nexthops":[
+ {
+ "flags":3,
+ "fib":true,
+ "directlyConnected":true,
+ "interfaceIndex":3,
+ "interfaceName":"r1-eth1",
+ "active":true
+ }
+ ]
+ }
+ ],
+ "10.0.20.1\/32":[
+ {
+ "prefix":"10.0.20.1\/32",
+ "protocol":"ospf",
+ "selected":true,
+ "destSelected":true,
+ "distance":110,
+ "metric":20,
+ "installed":true,
+ "table":254,
+ "internalStatus":16,
+ "internalFlags":8,
+ "internalNextHopNum":1,
+ "internalNextHopActiveNum":1,
+ "nexthops":[
+ {
+ "flags":11,
+ "fib":true,
+ "ip":"10.0.3.2",
+ "afi":"ipv4",
+ "interfaceIndex":3,
+ "interfaceName":"r1-eth1",
+ "active":true,
+ "onLink":true
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+!
+interface r1-eth0
+ ip address 11.1.1.1/30
+!
+interface r1-eth1
+ ip address 11.1.1.5/30
+!
--- /dev/null
+hostname r10
+!
+ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32
+!
+route-map redist permit 10
+ match ip address prefix-list redist
+!
+router bgp 65354
+ bgp router-id 11.1.6.2
+ neighbor 11.1.6.1 remote-as external
+ !
+ address-family ipv4 unicast
+ redistribute connected route-map redist
+ !
+!
--- /dev/null
+interface r10-eth0
+ ip address 11.1.6.2/30
+!
+interface r10-eth1
+ ip address 50.1.1.10/32
+!
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65301:125000 (1.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.2.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65301:250000 (2.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.2.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "multipath":true,
+ "extendedCommunity":{
+ "string":"LB:65302:125000 (1.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.2.6"
+ }
+ ]
+ },
+ {
+ "valid":true,
+ "multipath":true,
+ "bestpath":{
+ "overall":true
+ },
+ "extendedCommunity":{
+ "string":"LB:65301:250000 (2.000 Mbps)"
+ },
+ "nexthops":[
+ {
+ "ip":"11.1.2.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+hostname r2
+!
+router bgp 65201
+ bgp router-id 11.1.2.1
+ bgp bestpath as-path multipath-relax
+ neighbor 11.1.1.1 remote-as external
+ neighbor 11.1.2.2 remote-as external
+ neighbor 11.1.2.6 remote-as external
+!
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "protocol":"bgp",
+ "selected":true,
+ "installed":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.2.2",
+ "interfaceName":"r2-eth1",
+ "active":true,
+ "weight":1
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.2.6",
+ "weight":33
+ },
+ {
+ "fib":true,
+ "ip":"11.1.2.2",
+ "weight":66
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.2.2",
+ "weight":1
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+!
+interface r2-eth0
+ ip address 11.1.1.2/30
+!
+interface r2-eth1
+ ip address 11.1.2.1/30
+!
+interface r2-eth2
+ ip address 11.1.2.5/30
+!
--- /dev/null
+hostname r3
+!
+router bgp 65202
+ bgp router-id 11.1.3.1
+ bgp bestpath as-path multipath-relax
+ neighbor 11.1.1.5 remote-as external
+ neighbor 11.1.3.2 remote-as external
+!
--- /dev/null
+!
+interface r3-eth0
+ ip address 11.1.1.6/30
+!
+interface r3-eth1
+ ip address 11.1.3.1/30
+!
--- /dev/null
+{
+ "prefix":"198.10.1.1\/32",
+ "paths":[
+ {
+ "valid":true,
+ "multipath":true,
+ "nexthops":[
+ {
+ "ip":"11.1.4.6"
+ }
+ ]
+ },
+ {
+ "valid":true,
+ "multipath":true,
+ "nexthops":[
+ {
+ "ip":"11.1.4.2"
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+!
+log file bgpd.log
+!
+debug bgp updates
+debug bgp zebra
+debug bgp bestpath 198.10.1.1/32
+!
+hostname r4
+!
+ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32
+!
+route-map anycast_ip permit 10
+ match ip address prefix-list anycast_ip
+ set extcommunity bandwidth num-multipaths
+!
+route-map anycast_ip permit 20
+!
+router bgp 65301
+ bgp router-id 11.1.4.1
+ bgp bestpath as-path multipath-relax
+ neighbor 11.1.2.1 remote-as external
+ neighbor 11.1.4.2 remote-as external
+ neighbor 11.1.4.6 remote-as external
+ !
+ address-family ipv4 unicast
+ neighbor 11.1.2.1 route-map anycast_ip out
+ !
+!
--- /dev/null
+{
+ "198.10.1.1\/32":[
+ {
+ "prefix":"198.10.1.1\/32",
+ "protocol":"bgp",
+ "selected":true,
+ "nexthops":[
+ {
+ "fib":true,
+ "ip":"11.1.4.2",
+ "weight":1
+ },
+ {
+ "fib":true,
+ "ip":"11.1.4.6",
+ "weight":1
+ }
+ ]
+ }
+ ]
+}
--- /dev/null
+!
+interface r4-eth0
+ ip address 11.1.2.2/30
+!
+interface r4-eth1
+ ip address 11.1.4.1/30
+!
+interface r4-eth2
+ ip address 11.1.4.5/30
+!
--- /dev/null
+hostname r5
+!
+ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32
+!
+route-map anycast_ip permit 10
+ match ip address prefix-list anycast_ip
+ set extcommunity bandwidth num-multipaths
+!
+route-map anycast_ip permit 20
+!
+router bgp 65302
+ bgp router-id 11.1.5.1
+ bgp bestpath as-path multipath-relax
+ neighbor 11.1.2.5 remote-as external
+ neighbor 11.1.5.2 remote-as external
+ !
+ address-family ipv4 unicast
+ neighbor 11.1.2.5 route-map anycast_ip out
+ !
+!
--- /dev/null
+!
+interface r5-eth0
+ ip address 11.1.2.6/30
+!
+interface r5-eth1
+ ip address 11.1.5.1/30
+!
--- /dev/null
+hostname r6
+!
+ip prefix-list anycast_ip seq 10 permit 198.10.1.0/24 le 32
+!
+route-map anycast_ip permit 10
+ match ip address prefix-list anycast_ip
+ set extcommunity bandwidth num-multipaths
+!
+route-map anycast_ip permit 20
+!
+router bgp 65303
+ bgp router-id 11.1.6.1
+ bgp bestpath as-path multipath-relax
+ neighbor 11.1.3.1 remote-as external
+ neighbor 11.1.6.2 remote-as external
+ !
+ address-family ipv4 unicast
+ neighbor 11.1.3.1 route-map anycast_ip out
+ !
+!
--- /dev/null
+!
+interface r6-eth0
+ ip address 11.1.3.2/30
+!
+interface r6-eth1
+ ip address 11.1.6.1/30
+!
--- /dev/null
+hostname r7
+!
+ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32
+!
+route-map redist permit 10
+ match ip address prefix-list redist
+!
+router bgp 65351
+ bgp router-id 11.1.4.2
+ neighbor 11.1.4.1 remote-as external
+ !
+ address-family ipv4 unicast
+ redistribute connected route-map redist
+ !
+!
--- /dev/null
+interface r7-eth0
+ ip address 11.1.4.2/30
+!
+interface r7-eth1
+ ip address 50.1.1.7/32
+!
--- /dev/null
+hostname r8
+!
+ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32
+!
+route-map redist permit 10
+ match ip address prefix-list redist
+!
+router bgp 65352
+ bgp router-id 11.1.4.6
+ neighbor 11.1.4.5 remote-as external
+ !
+ address-family ipv4 unicast
+ redistribute connected route-map redist
+ !
+!
--- /dev/null
+interface r8-eth0
+ ip address 11.1.4.6/30
+!
+interface r8-eth1
+ ip address 50.1.1.8/32
+!
--- /dev/null
+hostname r9
+!
+ip prefix-list redist seq 10 permit 0.0.0.0/0 ge 32
+!
+route-map redist permit 10
+ match ip address prefix-list redist
+!
+router bgp 65353
+ bgp router-id 11.1.5.2
+ neighbor 11.1.5.1 remote-as external
+ !
+ address-family ipv4 unicast
+ redistribute connected route-map redist
+ !
+!
--- /dev/null
+interface r9-eth0
+ ip address 11.1.5.2/30
+!
+interface r9-eth1
+ ip address 50.1.1.9/32
+!
--- /dev/null
+#!/usr/bin/env python
+
+#
+# test_bgp_linkbw_ip.py
+#
+# Copyright (c) 2020 by
+# Cumulus Networks, Inc
+# Vivek Venkatraman
+#
+# Permission to use, copy, modify, and/or distribute this software
+# for any purpose with or without fee is hereby granted, provided
+# that the above copyright notice and this permission notice appear
+# in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+#
+
+"""
+test_bgp_linkbw_ip.py: Test weighted ECMP using BGP link-bandwidth
+"""
+
+import os
+import re
+import sys
+from functools import partial
+import pytest
+import json
+
+# Save the Current Working Directory to find configuration files.
+CWD = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CWD, '../'))
+
+# pylint: disable=C0413
+# Import topogen and topotest helpers
+from lib import topotest
+from lib.topogen import Topogen, TopoRouter, get_topogen
+from lib.topolog import logger
+
+# Required to instantiate the topology builder class.
+from mininet.topo import Topo
+
+"""
+This topology is for validating one of the primary use cases for
+weighted ECMP (a.k.a. Unequal cost multipath) using BGP link-bandwidth:
+https://tools.ietf.org/html/draft-mohanty-bess-ebgp-dmz
+
+The topology consists of two PODs. Pod-1 consists of a spine switch
+and two leaf switches, with two servers attached to the first leaf and
+one to the second leaf. Pod-2 consists of one spine and one leaf, with
+one server connected to the leaf. The PODs are connected by a super-spine
+switch.
+
+Note that the use of the term "switch" above is in keeping with common
+data-center terminology. These devices are all regular routers; for
+this scenario, the servers are also routers as they have to announce
+anycast IP (VIP) addresses via BGP.
+"""
+
+class BgpLinkBwTopo(Topo):
+ "Test topology builder"
+ def build(self, *_args, **_opts):
+ "Build function"
+ tgen = get_topogen(self)
+
+ # Create 10 routers - 1 super-spine, 2 spines, 3 leafs
+ # and 4 servers
+ routers = {}
+ for i in range(1, 11):
+ routers[i] = tgen.add_router('r{}'.format(i))
+
+ # Create 13 "switches" - to interconnect the above routers
+ switches = {}
+ for i in range(1, 14):
+ switches[i] = tgen.add_switch('s{}'.format(i))
+
+ # Interconnect R1 (super-spine) to R2 and R3 (the two spines)
+ switches[1].add_link(tgen.gears['r1'])
+ switches[1].add_link(tgen.gears['r2'])
+ switches[2].add_link(tgen.gears['r1'])
+ switches[2].add_link(tgen.gears['r3'])
+
+ # Interconnect R2 (spine in pod-1) to R4 and R5 (the associated
+ # leaf switches)
+ switches[3].add_link(tgen.gears['r2'])
+ switches[3].add_link(tgen.gears['r4'])
+ switches[4].add_link(tgen.gears['r2'])
+ switches[4].add_link(tgen.gears['r5'])
+
+ # Interconnect R3 (spine in pod-2) to R6 (associated leaf)
+ switches[5].add_link(tgen.gears['r3'])
+ switches[5].add_link(tgen.gears['r6'])
+
+ # Interconnect leaf switches to servers
+ switches[6].add_link(tgen.gears['r4'])
+ switches[6].add_link(tgen.gears['r7'])
+ switches[7].add_link(tgen.gears['r4'])
+ switches[7].add_link(tgen.gears['r8'])
+ switches[8].add_link(tgen.gears['r5'])
+ switches[8].add_link(tgen.gears['r9'])
+ switches[9].add_link(tgen.gears['r6'])
+ switches[9].add_link(tgen.gears['r10'])
+
+ # Create empty networks for the servers
+ switches[10].add_link(tgen.gears['r7'])
+ switches[11].add_link(tgen.gears['r8'])
+ switches[12].add_link(tgen.gears['r9'])
+ switches[13].add_link(tgen.gears['r10'])
+
+def setup_module(mod):
+ "Sets up the pytest environment"
+ tgen = Topogen(BgpLinkBwTopo, mod.__name__)
+ tgen.start_topology()
+
+ router_list = tgen.routers()
+ for rname, router in router_list.iteritems():
+ router.load_config(
+ TopoRouter.RD_ZEBRA,
+ os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP,
+ os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ )
+
+ # Initialize all routers.
+ tgen.start_router()
+
+ #tgen.mininet_cli()
+
+def teardown_module(mod):
+ "Teardown the pytest environment"
+ tgen = get_topogen()
+ tgen.stop_topology()
+
+def test_bgp_linkbw_adv():
+ "Test #1: Test BGP link-bandwidth advertisement based on number of multipaths"
+ logger.info('\nTest #1: Test BGP link-bandwidth advertisement based on number of multipaths')
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip('skipped because of router(s) failure')
+
+ r1 = tgen.gears['r1']
+ r2 = tgen.gears['r2']
+
+ # Configure anycast IP on server r7
+ logger.info('Configure anycast IP on server r7')
+
+ tgen.net['r7'].cmd('ip addr add 198.10.1.1/32 dev r7-eth1')
+
+ # Check on spine router r2 for link-bw advertisement by leaf router r4
+ logger.info('Check on spine router r2 for link-bw advertisement by leaf router r4')
+
+ json_file = '{}/r2/bgp-route-1.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on spine router r2'
+ assert result is None, assertmsg
+
+ # Check on spine router r2 that default weight is used as there is no multipath
+ logger.info('Check on spine router r2 that default weight is used as there is no multipath')
+
+ json_file = '{}/r2/ip-route-1.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r2, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on spine router r2'
+ assert result is None, assertmsg
+
+ # Check on super-spine router r1 that link-bw has been propagated by spine router r2
+ logger.info('Check on super-spine router r1 that link-bw has been propagated by spine router r2')
+
+ json_file = '{}/r1/bgp-route-1.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+def test_bgp_cumul_linkbw():
+ "Test #2: Test cumulative link-bandwidth propagation"
+ logger.info('\nTest #2: Test cumulative link-bandwidth propagation')
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip('skipped because of router(s) failure')
+
+ r1 = tgen.gears['r1']
+ r2 = tgen.gears['r2']
+ r4 = tgen.gears['r4']
+
+ # Configure anycast IP on additional server r8
+ logger.info('Configure anycast IP on server r8')
+
+ tgen.net['r8'].cmd('ip addr add 198.10.1.1/32 dev r8-eth1')
+
+ # Check multipath on leaf router r4
+ logger.info('Check multipath on leaf router r4')
+
+ json_file = '{}/r4/bgp-route-1.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r4, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on leaf router r4'
+ assert result is None, assertmsg
+
+ # Check regular ECMP is in effect on leaf router r4
+ logger.info('Check regular ECMP is in effect on leaf router r4')
+
+ json_file = '{}/r4/ip-route-1.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r4, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on leaf router r4'
+ assert result is None, assertmsg
+
+ # Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths
+ logger.info('Check on spine router r2 that leaf has propagated the cumulative link-bw based on num-multipaths')
+
+ json_file = '{}/r2/bgp-route-2.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on spine router r2'
+ assert result is None, assertmsg
+
+def test_weighted_ecmp():
+ "Test #3: Test weighted ECMP - multipath with next hop weights"
+ logger.info('\nTest #3: Test weighted ECMP - multipath with next hop weights')
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip('skipped because of router(s) failure')
+
+ r1 = tgen.gears['r1']
+ r2 = tgen.gears['r2']
+
+ # Configure anycast IP on additional server r9
+ logger.info('Configure anycast IP on server r9')
+
+ tgen.net['r9'].cmd('ip addr add 198.10.1.1/32 dev r9-eth1')
+
+ # Check multipath on spine router r2
+ logger.info('Check multipath on spine router r2')
+ json_file = '{}/r2/bgp-route-3.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r2, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on spine router r2'
+ assert result is None, assertmsg
+
+ # Check weighted ECMP is in effect on the spine router r2
+ logger.info('Check weighted ECMP is in effect on the spine router r2')
+
+ json_file = '{}/r2/ip-route-2.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r2, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on spine router r2'
+ assert result is None, assertmsg
+
+ # Configure anycast IP on additional server r10
+ logger.info('Configure anycast IP on server r10')
+
+ tgen.net['r10'].cmd('ip addr add 198.10.1.1/32 dev r10-eth1')
+
+ # Check multipath on super-spine router r1
+ logger.info('Check multipath on super-spine router r1')
+ json_file = '{}/r1/bgp-route-2.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ # Check weighted ECMP is in effect on the super-spine router r1
+ logger.info('Check weighted ECMP is in effect on the super-spine router r1')
+ json_file = '{}/r1/ip-route-1.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+def test_weighted_ecmp_link_flap():
+ "Test #4: Test weighted ECMP rebalancing upon change (link flap)"
+ logger.info('\nTest #4: Test weighted ECMP rebalancing upon change (link flap)')
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip('skipped because of router(s) failure')
+
+ r1 = tgen.gears['r1']
+ r2 = tgen.gears['r2']
+
+ # Bring down link on server r9
+ logger.info('Bring down link on server r9')
+
+ tgen.net['r9'].cmd('ip link set dev r9-eth1 down')
+
+ # Check spine router r2 has only one path
+ logger.info('Check spine router r2 has only one path')
+
+ json_file = '{}/r2/ip-route-3.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r2, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on spine router r2'
+ assert result is None, assertmsg
+
+ # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1
+ logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1')
+
+ json_file = '{}/r1/bgp-route-3.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ json_file = '{}/r1/ip-route-2.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ # Bring up link on server r9
+ logger.info('Bring up link on server r9')
+
+ tgen.net['r9'].cmd('ip link set dev r9-eth1 up')
+
+ # Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1
+ logger.info('Check link-bandwidth change and weighted ECMP rebalance on super-spine router r1')
+
+ json_file = '{}/r1/bgp-route-2.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ json_file = '{}/r1/ip-route-1.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+def test_weighted_ecmp_second_anycast_ip():
+ "Test #5: Test weighted ECMP for a second anycast IP"
+ logger.info('\nTest #5: Test weighted ECMP for a second anycast IP')
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip('skipped because of router(s) failure')
+
+ r1 = tgen.gears['r1']
+ r2 = tgen.gears['r2']
+
+ # Configure anycast IP on additional server r7, r9 and r10
+ logger.info('Configure anycast IP on server r7, r9 and r10')
+
+ tgen.net['r7'].cmd('ip addr add 198.10.1.11/32 dev r7-eth1')
+ tgen.net['r9'].cmd('ip addr add 198.10.1.11/32 dev r9-eth1')
+ tgen.net['r10'].cmd('ip addr add 198.10.1.11/32 dev r10-eth1')
+
+ # Check link-bandwidth and weighted ECMP on super-spine router r1
+ logger.info('Check link-bandwidth and weighted ECMP on super-spine router r1')
+
+ json_file = '{}/r1/bgp-route-4.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show bgp ipv4 uni 198.10.1.11/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ json_file = '{}/r1/ip-route-3.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.11/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+def test_paths_with_and_without_linkbw():
+ "Test #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP"
+ logger.info('\nTest #6: Test paths with and without link-bandwidth - receiver should resort to regular ECMP')
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip('skipped because of router(s) failure')
+
+ r1 = tgen.gears['r1']
+
+ # Configure leaf router r6 to not advertise any link-bandwidth
+ logger.info('Configure leaf router r6 to not advertise any link-bandwidth')
+
+ tgen.net['r6'].cmd('vtysh -c \"conf t\" -c \"router bgp 65303\" -c \"address-family ipv4 unicast\" -c \"no neighbor 11.1.3.1 route-map anycast_ip out\"')
+
+ # Check link-bandwidth change on super-spine router r1
+ logger.info('Check link-bandwidth change on super-spine router r1')
+
+ json_file = '{}/r1/bgp-route-5.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show bgp ipv4 uni 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ # Check super-spine router r1 resorts to regular ECMP
+ logger.info('Check super-spine router r1 resorts to regular ECMP')
+
+ json_file = '{}/r1/ip-route-4.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ json_file = '{}/r1/ip-route-5.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.11/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=50, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+def test_linkbw_handling_options():
+ "Test #7: Test different options for processing link-bandwidth on the receiver"
+ logger.info('\nTest #7: Test different options for processing link-bandwidth on the receiver')
+
+ tgen = get_topogen()
+ if tgen.routers_have_failure():
+ pytest.skip('skipped because of router(s) failure')
+
+ r1 = tgen.gears['r1']
+
+ # Configure super-spine r1 to skip multipaths without link-bandwidth
+ logger.info('Configure super-spine r1 to skip multipaths without link-bandwidth')
+
+ tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth skip-missing\"')
+
+ # Check super-spine router r1 resorts to only one path as other path is skipped
+ logger.info('Check super-spine router r1 resorts to only one path as other path is skipped')
+
+ json_file = '{}/r1/ip-route-6.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ json_file = '{}/r1/ip-route-7.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.11/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ # Configure super-spine r1 to use default-weight for multipaths without link-bandwidth
+ logger.info('Configure super-spine r1 to use default-weight for multipaths without link-bandwidth')
+
+ tgen.net['r1'].cmd('vtysh -c \"conf t\" -c \"router bgp 65101\" -c \"bgp bestpath bandwidth default-weight-for-missing\"')
+
+ # Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth
+ logger.info('Check super-spine router r1 uses ECMP with weight 1 for path without link-bandwidth')
+
+ json_file = '{}/r1/ip-route-8.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.1/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+ json_file = '{}/r1/ip-route-9.json'.format(CWD)
+ expected = json.loads(open(json_file).read())
+ test_func = partial(topotest.router_json_cmp,
+ r1, 'show ip route 198.10.1.11/32 json', expected)
+ _, result = topotest.run_and_expect(test_func, None, count=200, wait=0.5)
+ assertmsg = 'JSON output mismatch on super-spine router r1'
+ assert result is None, assertmsg
+
+if __name__ == '__main__':
+ args = ["-s"] + sys.argv[1:]
+ sys.exit(pytest.main(args))
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r3'])
- switch.add_link(tgen.gears['r4'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_remove_private_as():
tgen = get_topogen()
def _bgp_converge(router):
while True:
- output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
- if output['192.168.255.1']['bgpState'] == 'Established':
+ output = json.loads(
+ tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")
+ )
+ if output["192.168.255.1"]["bgpState"] == "Established":
time.sleep(1)
return True
def _bgp_as_path(router):
- output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json"))
- if output['prefix'] == '172.16.255.254/32':
- return output['paths'][0]['aspath']['segments'][0]['list']
+ output = json.loads(
+ tgen.gears[router].vtysh_cmd("show ip bgp 172.16.255.254/32 json")
+ )
+ if output["prefix"] == "172.16.255.254/32":
+ return output["paths"][0]["aspath"]["segments"][0]["list"]
+
+ if _bgp_converge("r2"):
+ assert len(_bgp_as_path("r2")) == 1
+ assert 65000 not in _bgp_as_path("r2")
- if _bgp_converge('r2'):
- assert len(_bgp_as_path('r2')) == 1
- assert 65000 not in _bgp_as_path('r2')
+ if _bgp_converge("r4"):
+ assert len(_bgp_as_path("r4")) == 2
+ assert 3000 in _bgp_as_path("r4")
- if _bgp_converge('r4'):
- assert len(_bgp_as_path('r4')) == 2
- assert 3000 in _bgp_as_path('r4')
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_maximum_prefix_invalid():
tgen = get_topogen()
def _bgp_converge(router):
while True:
- output = json.loads(tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
- if output['192.168.255.1']['connectionsEstablished'] > 0:
+ output = json.loads(
+ tgen.gears[router].vtysh_cmd("show ip bgp neighbor 192.168.255.1 json")
+ )
+ if output["192.168.255.1"]["connectionsEstablished"] > 0:
return True
def _bgp_parsing_nlri(router):
- cmd_max_exceeded = 'grep "%MAXPFXEXCEED: No. of IPv4 Unicast prefix received" bgpd.log'
+ cmd_max_exceeded = (
+ 'grep "%MAXPFXEXCEED: No. of IPv4 Unicast prefix received" bgpd.log'
+ )
cmdt_error_parsing_nlri = 'grep "Error parsing NLRI" bgpd.log'
output_max_exceeded = tgen.gears[router].run(cmd_max_exceeded)
output_error_parsing_nlri = tgen.gears[router].run(cmdt_error_parsing_nlri)
return False
return True
+ if _bgp_converge("r2"):
+ assert _bgp_parsing_nlri("r2") == True
- if _bgp_converge('r2'):
- assert _bgp_parsing_nlri('r2') == True
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_maximum_prefix_out():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r2']
+ router = tgen.gears["r2"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json"))
expected = {
- '192.168.255.2': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 2
- }
- }
+ "192.168.255.2": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
}
}
return topotest.json_cmp(output, expected)
assert result is None, 'Failed bgp convergence in "{}"'.format(router)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes different routes per PE
for i in range(0, numRoutes):
- stdout.write('announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n' % ((peer+100), i, peer, peer))
+ stdout.write(
+ "announce route 10.%s.%s.0/24 med 100 community %i:1 next-hop 172.16.1.%i\n"
+ % ((peer + 100), i, peer, peer)
+ )
stdout.flush()
# Announce 1 overlapping route per peer
-stdout.write('announce route 10.0.1.0/24 next-hop 172.16.1.%i\n' % peer)
+stdout.write("announce route 10.0.1.0/24 next-hop 172.16.1.%i\n" % peer)
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
~~ 172.20.0.1/28 ~~ attributes (using route-map)
~~ Stub Switch ~~
~~~~~~~~~~~~~
-"""
+"""
import os
import re
##
#####################################################
+
class NetworkTopo(Topo):
"BGP Multiview Topology 1"
def build(self, **_opts):
- exabgpPrivateDirs = ['/etc/exabgp',
- '/var/run/exabgp',
- '/var/log']
+ exabgpPrivateDirs = ["/etc/exabgp", "/var/run/exabgp", "/var/log"]
# Setup Routers
router = {}
for i in range(1, 2):
- router[i] = topotest.addRouter(self, 'r%s' % i)
+ router[i] = topotest.addRouter(self, "r%s" % i)
# Setup Provider BGP peers
peer = {}
for i in range(1, 9):
- peer[i] = self.addHost('peer%s' % i, ip='172.16.1.%s/24' % i,
- defaultRoute='via 172.16.1.254',
- privateDirs=exabgpPrivateDirs)
+ peer[i] = self.addHost(
+ "peer%s" % i,
+ ip="172.16.1.%s/24" % i,
+ defaultRoute="via 172.16.1.254",
+ privateDirs=exabgpPrivateDirs,
+ )
# Setup Switches
switch = {}
# First switch is for a dummy interface (for local network)
- switch[0] = self.addSwitch('sw0', cls=topotest.LegacySwitch)
- self.addLink(switch[0], router[1], intfName2='r1-stub')
+ switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch)
+ self.addLink(switch[0], router[1], intfName2="r1-stub")
# Second switch is for connection to all peering routers
- switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch)
- self.addLink(switch[1], router[1], intfName2='r1-eth0')
+ switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
+ self.addLink(switch[1], router[1], intfName2="r1-eth0")
for j in range(1, 9):
- self.addLink(switch[1], peer[j], intfName2='peer%s-eth0' % j)
+ self.addLink(switch[1], peer[j], intfName2="peer%s-eth0" % j)
#####################################################
##
#####################################################
+
def setup_module(module):
global topo, net
print("******************************************\n")
print("Cleanup old Mininet runs")
- os.system('sudo mn -c > /dev/null 2>&1')
+ os.system("sudo mn -c > /dev/null 2>&1")
thisDir = os.path.dirname(os.path.realpath(__file__))
topo = NetworkTopo()
# Starting Routers
for i in range(1, 2):
- net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i))
- net['r%s' % i].loadConf('bgpd', '%s/r%s/bgpd.conf' % (thisDir, i))
- net['r%s' % i].startRouter()
+ net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
+ net["r%s" % i].loadConf("bgpd", "%s/r%s/bgpd.conf" % (thisDir, i))
+ net["r%s" % i].startRouter()
# Starting PE Hosts and init ExaBGP on each of them
- print('*** Starting BGP on all 8 Peers')
+ print("*** Starting BGP on all 8 Peers")
for i in range(1, 9):
- net['peer%s' % i].cmd('cp %s/exabgp.env /etc/exabgp/exabgp.env' % thisDir)
- net['peer%s' % i].cmd('cp %s/peer%s/* /etc/exabgp/' % (thisDir, i))
- net['peer%s' % i].cmd('chmod 644 /etc/exabgp/*')
- net['peer%s' % i].cmd('chmod 755 /etc/exabgp/*.py')
- net['peer%s' % i].cmd('chown -R exabgp:exabgp /etc/exabgp')
- net['peer%s' % i].cmd('exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg')
- print('peer%s' % i),
- print('')
+ net["peer%s" % i].cmd("cp %s/exabgp.env /etc/exabgp/exabgp.env" % thisDir)
+ net["peer%s" % i].cmd("cp %s/peer%s/* /etc/exabgp/" % (thisDir, i))
+ net["peer%s" % i].cmd("chmod 644 /etc/exabgp/*")
+ net["peer%s" % i].cmd("chmod 755 /etc/exabgp/*.py")
+ net["peer%s" % i].cmd("chown -R exabgp:exabgp /etc/exabgp")
+ net["peer%s" % i].cmd("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
+ print("peer%s" % i),
+ print("")
# For debugging after starting Quagga/FRR daemons, uncomment the next line
# CLI(net)
+
def teardown_module(module):
global net
print("******************************************\n")
# Shutdown - clean up everything
- print('*** Killing BGP on Peer routers')
+ print("*** Killing BGP on Peer routers")
# Killing ExaBGP
for i in range(1, 9):
- net['peer%s' % i].cmd('kill `cat /var/run/exabgp/exabgp.pid`')
+ net["peer%s" % i].cmd("kill `cat /var/run/exabgp/exabgp.pid`")
# End - Shutdown network
net.stop()
+
def test_router_running():
global fatal_error
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
print("\n\n** Check if FRR/Quagga is running on each Router node")
# Starting Routers
for i in range(1, 2):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
# Wait for BGP to converge (All Neighbors in either Full or TwoWay State)
# Look for any node not yet converged
for i in range(1, 2):
for view in range(1, 4):
- notConverged = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s summary" 2> /dev/null | grep ^[0-9] | grep -v " 11$"' % view)
+ notConverged = net["r%s" % i].cmd(
+ 'vtysh -c "show ip bgp view %s summary" 2> /dev/null | grep ^[0-9] | grep -v " 11$"'
+ % view
+ )
if notConverged:
- print('Waiting for r%s, view %s' % (i, view))
+ print("Waiting for r%s, view %s" % (i, view))
sys.stdout.flush()
break
if notConverged:
sleep(5)
timeout -= 5
else:
- print('Done')
+ print("Done")
break
else:
# Bail out with error if a router fails to converge
- bgpStatus = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s summary"' % view)
+ bgpStatus = net["r%s" % i].cmd('vtysh -c "show ip bgp view %s summary"' % view)
assert False, "BGP did not converge:\n%s" % bgpStatus
# Wait for an extra 5s to announce all routes
- print('Waiting 5s for routes to be announced');
+ print("Waiting 5s for routes to be announced")
sleep(5)
-
+
print("BGP converged.")
# if timeout < 60:
# Make sure that all daemons are running
for i in range(1, 2):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting Quagga/FRR daemons, uncomment the next line
# CLI(net)
+
def test_bgp_routingTable():
global fatal_error
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
for view in range(1, 4):
success = 0
# This glob pattern should work as long as number of views < 10
- for refTableFile in (glob.glob(
- '%s/r%s/show_ip_bgp_view_%s*.ref' % (thisDir, i, view))):
+ for refTableFile in glob.glob(
+ "%s/r%s/show_ip_bgp_view_%s*.ref" % (thisDir, i, view)
+ ):
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ip bgp view %s" 2> /dev/null' % view).rstrip()
-
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ip bgp view %s" 2> /dev/null' % view)
+ .rstrip()
+ )
+
# Fix inconsitent spaces between 0.99.24 and newer versions of Quagga...
- actual = re.sub('0 0', '0 0', actual)
- actual = re.sub(r'([0-9]) 32768', r'\1 32768', actual)
+ actual = re.sub("0 0", "0 0", actual)
+ actual = re.sub(
+ r"([0-9]) 32768", r"\1 32768", actual
+ )
# Remove summary line (changed recently)
- actual = re.sub(r'Total number.*', '', actual)
- actual = re.sub(r'Displayed.*', '', actual)
+ actual = re.sub(r"Total number.*", "", actual)
+ actual = re.sub(r"Displayed.*", "", actual)
actual = actual.rstrip()
# Fix table version (ignore it)
- actual = re.sub(r'(BGP table version is )[0-9]+', r'\1XXX', actual)
+ actual = re.sub(r"(BGP table version is )[0-9]+", r"\1XXX", actual)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual BGP routing table",
- title2="expected BGP routing table")
+ title2="expected BGP routing table",
+ )
if diff:
diffresult[refTableFile] = diff
else:
success = 1
print("template %s matched: r%s ok" % (refTableFile, i))
- break;
+ break
if not success:
- resultstr = 'No template matched.\n'
+ resultstr = "No template matched.\n"
for f in diffresult.iterkeys():
resultstr += (
- 'template %s: r%s failed Routing Table Check for view %s:\n%s\n'
- % (f, i, view, diffresult[f]))
+ "template %s: r%s failed Routing Table Check for view %s:\n%s\n"
+ % (f, i, view, diffresult[f])
+ )
raise AssertionError(
- "Routing Table verification failed for router r%s, view %s:\n%s" % (i, view, resultstr))
-
+ "Routing Table verification failed for router r%s, view %s:\n%s"
+ % (i, view, resultstr)
+ )
# Make sure that all daemons are running
for i in range(1, 2):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n")
- pytest.skip('Skipping test for Stderr output')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ print(
+ "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n"
+ )
+ pytest.skip("Skipping test for Stderr output")
thisDir = os.path.dirname(os.path.realpath(__file__))
print("\n\n** Verifying unexpected STDERR output from daemons")
print("******************************************\n")
- net['r1'].stopRouter()
+ net["r1"].stopRouter()
- log = net['r1'].getStdErr('bgpd')
+ log = net["r1"].getStdErr("bgpd")
if log:
print("\nBGPd StdErr Log:\n" + log)
- log = net['r1'].getStdErr('zebra')
+ log = net["r1"].getStdErr("zebra")
if log:
print("\nZebra StdErr Log:\n" + log)
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
- if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None:
- print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n")
- pytest.skip('Skipping test for memory leaks')
-
+ if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None:
+ print(
+ "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n"
+ )
+ pytest.skip("Skipping test for memory leaks")
+
thisDir = os.path.dirname(os.path.realpath(__file__))
- net['r1'].stopRouter()
- net['r1'].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__))
+ net["r1"].stopRouter()
+ net["r1"].report_memory_leaks(
+ os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__)
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
- setLogLevel('info')
+ setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
import pytest
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
class TemplateTopo(Topo):
def build(self, **_opts):
tgen = get_topogen(self)
- router = tgen.add_router('r1')
- switch = tgen.add_switch('s1')
+ router = tgen.add_router("r1")
+ switch = tgen.add_switch("s1")
switch.add_link(router)
- switch = tgen.gears['s1']
- peer1 = tgen.add_exabgp_peer('peer1', ip='10.0.0.101', defaultRoute='via 10.0.0.1')
- peer2 = tgen.add_exabgp_peer('peer2', ip='10.0.0.102', defaultRoute='via 10.0.0.1')
+ switch = tgen.gears["s1"]
+ peer1 = tgen.add_exabgp_peer(
+ "peer1", ip="10.0.0.101", defaultRoute="via 10.0.0.1"
+ )
+ peer2 = tgen.add_exabgp_peer(
+ "peer2", ip="10.0.0.102", defaultRoute="via 10.0.0.1"
+ )
switch.add_link(peer1)
switch.add_link(peer2)
tgen = Topogen(TemplateTopo, module.__name__)
tgen.start_topology()
- router = tgen.gears['r1']
- router.load_config(TopoRouter.RD_ZEBRA, os.path.join(CWD, '{}/zebra.conf'.format('r1')))
- router.load_config(TopoRouter.RD_BGP, os.path.join(CWD, '{}/bgpd.conf'.format('r1')))
+ router = tgen.gears["r1"]
+ router.load_config(
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format("r1"))
+ )
+ router.load_config(
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1"))
+ )
router.start()
- logger.info('starting exaBGP on peer1')
+ logger.info("starting exaBGP on peer1")
peer_list = tgen.exabgp_peers()
for pname, peer in peer_list.iteritems():
peer_dir = os.path.join(CWD, pname)
- env_file = os.path.join(CWD, 'exabgp.env')
- logger.info('Running ExaBGP peer')
+ env_file = os.path.join(CWD, "exabgp.env")
+ logger.info("Running ExaBGP peer")
peer.start(peer_dir, env_file)
logger.info(pname)
def test_r1_receive_and_advertise_prefix_sid_type1():
tgen = get_topogen()
- router = tgen.gears['r1']
+ router = tgen.gears["r1"]
def _check_type1_r1(router, prefix, remoteLabel, labelIndex):
- output = router.vtysh_cmd('show bgp ipv4 labeled-unicast {} json'.format(prefix))
+ output = router.vtysh_cmd(
+ "show bgp ipv4 labeled-unicast {} json".format(prefix)
+ )
output = json.loads(output)
expected = {
- 'prefix': prefix,
- 'advertisedTo': { '10.0.0.101':{}, '10.0.0.102':{} },
- 'paths': [{
- 'valid':True,
- 'remoteLabel': remoteLabel,
- 'labelIndex': labelIndex,
- }]
+ "prefix": prefix,
+ "advertisedTo": {"10.0.0.101": {}, "10.0.0.102": {}},
+ "paths": [
+ {"valid": True, "remoteLabel": remoteLabel, "labelIndex": labelIndex,}
+ ],
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_check_type1_r1, router, '3.0.0.1/32', 800001, 1)
+ test_func = functools.partial(_check_type1_r1, router, "3.0.0.1/32", 800001, 1)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert result is None, 'Failed _check_type1_r1 in "{}"'.format(router)
- test_func = functools.partial(_check_type1_r1, router, '3.0.0.2/32', 800002, 2)
+ test_func = functools.partial(_check_type1_r1, router, "3.0.0.2/32", 800002, 2)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assert result is None, 'Failed _check_type1_r1 in "{}"'.format(router)
def exabgp_get_update_prefix(filename, afi, nexthop, prefix):
- with open('/tmp/peer2-received.log') as f:
+ with open("/tmp/peer2-received.log") as f:
for line in f.readlines():
output = json.loads(line)
- ret = output.get('neighbor')
+ ret = output.get("neighbor")
if ret is None:
continue
- ret = ret.get('message')
+ ret = ret.get("message")
if ret is None:
continue
- ret = ret.get('update')
+ ret = ret.get("update")
if ret is None:
continue
- ret = ret.get('announce')
+ ret = ret.get("announce")
if ret is None:
continue
ret = ret.get(afi)
def test_peer2_receive_prefix_sid_type1():
tgen = get_topogen()
- peer2 = tgen.gears['peer2']
+ peer2 = tgen.gears["peer2"]
def _check_type1_peer2(prefix, labelindex):
- output = exabgp_get_update_prefix('/tmp/peer2-received.log', 'ipv4 nlri-mpls', '10.0.0.101', prefix)
+ output = exabgp_get_update_prefix(
+ "/tmp/peer2-received.log", "ipv4 nlri-mpls", "10.0.0.101", prefix
+ )
expected = {
- 'type': 'update',
- 'neighbor': {
- 'ip': '10.0.0.1',
- 'message': {
- 'update': {
- 'attribute': {
- 'attribute-0x28-0xE0': '0x010007000000{:08x}'.format(labelindex)
+ "type": "update",
+ "neighbor": {
+ "ip": "10.0.0.1",
+ "message": {
+ "update": {
+ "attribute": {
+ "attribute-0x28-0xE0": "0x010007000000{:08x}".format(
+ labelindex
+ )
},
- 'announce': { 'ipv4 nlri-mpls': { '10.0.0.101': {} } }
+ "announce": {"ipv4 nlri-mpls": {"10.0.0.101": {}}},
}
- }
- }
+ },
+ },
}
return topotest.json_cmp(output, expected)
- test_func = functools.partial(_check_type1_peer2, '3.0.0.1/32', labelindex=1)
+ test_func = functools.partial(_check_type1_peer2, "3.0.0.1/32", labelindex=1)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
- assert result is None, 'Failed _check_type1_peer2 in "{}"'.format('peer2')
+ assert result is None, 'Failed _check_type1_peer2 in "{}"'.format("peer2")
- test_func = functools.partial(_check_type1_peer2, '3.0.0.2/32', labelindex=2)
+ test_func = functools.partial(_check_type1_peer2, "3.0.0.2/32", labelindex=2)
success, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
- assert result is None, 'Failed _check_type1_peer2 in "{}"'.format('peer2')
+ assert result is None, 'Failed _check_type1_peer2 in "{}"'.format("peer2")
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
ret = pytest.main(args)
sys.exit(ret)
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_reject_as_sets():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r2']
+ router = tgen.gears["r2"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json"))
expected = {
- '192.168.255.2': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 2
- }
- }
+ "192.168.255.2": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_has_aggregated_route_with_stripped_as_set(router):
output = json.loads(router.vtysh_cmd("show ip bgp 172.16.0.0/16 json"))
expected = {
- 'paths': [
- {
- 'aspath': {
- 'string': 'Local',
- 'segments': [
- ],
- 'length': 0
- }
- }
- ]
+ "paths": [{"aspath": {"string": "Local", "segments": [], "length": 0}}]
}
return topotest.json_cmp(output, expected)
def _bgp_announce_route_without_as_sets(router):
- output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.254.2 advertised-routes json"))
+ output = json.loads(
+ router.vtysh_cmd(
+ "show ip bgp neighbor 192.168.254.2 advertised-routes json"
+ )
+ )
expected = {
- 'advertisedRoutes': {
- '172.16.0.0/16': {
- 'path': ''
- },
- '192.168.254.0/30': {
- 'path': '65003'
- },
- '192.168.255.0/30': {
- 'path': '65001'
- }
+ "advertisedRoutes": {
+ "172.16.0.0/16": {"path": ""},
+ "192.168.254.0/30": {"path": "65003"},
+ "192.168.255.0/30": {"path": "65001"},
},
- 'totalPrefixCounter': 3
+ "totalPrefixCounter": 3,
}
return topotest.json_cmp(output, expected)
assert result is None, 'Failed bgp convergence in "{}"'.format(router)
- test_func = functools.partial(_bgp_has_aggregated_route_with_stripped_as_set, router)
+ test_func = functools.partial(
+ _bgp_has_aggregated_route_with_stripped_as_set, router
+ )
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, 'Failed to see an aggregated route in "{}"'.format(router)
test_func = functools.partial(_bgp_announce_route_without_as_sets, router)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
- assert result is None, 'Route 172.16.0.0/16 should be sent without AS_SET to r3 "{}"'.format(router)
+ assert (
+ result is None
+ ), 'Route 172.16.0.0/16 should be sent without AS_SET to r3 "{}"'.format(router)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
from mininet.topo import Topo
import shutil
+
CWD = os.path.dirname(os.path.realpath(__file__))
# test name based on directory
TEST = os.path.basename(CWD)
+
class ThisTestTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# between routers, switches and hosts.
#
# Create P/PE routers
- tgen.add_router('r1')
+ tgen.add_router("r1")
for routern in range(2, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# Create a switch with just one router connected to it to simulate a
# empty network.
switch = {}
- switch[0] = tgen.add_switch('sw0')
- switch[0].add_link(tgen.gears['r1'], nodeif='r1-eth0')
- switch[0].add_link(tgen.gears['r2'], nodeif='r2-eth0')
+ switch[0] = tgen.add_switch("sw0")
+ switch[0].add_link(tgen.gears["r1"], nodeif="r1-eth0")
+ switch[0].add_link(tgen.gears["r2"], nodeif="r2-eth0")
- switch[1] = tgen.add_switch('sw1')
- switch[1].add_link(tgen.gears['r2'], nodeif='r2-eth1')
- switch[1].add_link(tgen.gears['r3'], nodeif='r3-eth0')
- switch[1].add_link(tgen.gears['r4'], nodeif='r4-eth0')
+ switch[1] = tgen.add_switch("sw1")
+ switch[1].add_link(tgen.gears["r2"], nodeif="r2-eth1")
+ switch[1].add_link(tgen.gears["r3"], nodeif="r3-eth0")
+ switch[1].add_link(tgen.gears["r4"], nodeif="r4-eth0")
+
+ switch[2] = tgen.add_switch("sw2")
+ switch[2].add_link(tgen.gears["r2"], nodeif="r2-eth2")
+ switch[2].add_link(tgen.gears["r3"], nodeif="r3-eth1")
- switch[2] = tgen.add_switch('sw2')
- switch[2].add_link(tgen.gears['r2'], nodeif='r2-eth2')
- switch[2].add_link(tgen.gears['r3'], nodeif='r3-eth1')
def ltemplatePreRouterStartHook():
cc = ltemplateRtrCmd()
tgen = get_topogen()
- logger.info('pre router-start hook')
- #check for normal init
+ logger.info("pre router-start hook")
+ # check for normal init
if len(tgen.net) == 1:
- logger.info('Topology not configured, skipping setup')
+ logger.info("Topology not configured, skipping setup")
return False
return True
+
def ltemplatePostRouterStartHook():
- logger.info('post router-start hook')
+ logger.info("post router-start hook")
return True
-
from lutil import luCommand
-holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set')
+
+holddownFactorSet = luCommand(
+ "r1",
+ 'vtysh -c "show running"',
+ "rfp holddown-factor",
+ "none",
+ "Holddown factor set",
+)
if not holddownFactorSet:
to = "-1"
cost = ""
else:
to = "6"
cost = "cost 50"
-luCommand('r1','vtysh -c "debug rfapi-dev open vn 10.0.0.1 un 1.1.1.1"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI')
-luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"','rc=2', 'pass', 'Clean query')
-luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered')
-luCommand('r1','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration')
-luCommand('r1','vtysh -c "debug rfapi-dev response-omit-self off"','.','none')
-luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"','11.11.11.0/24', 'pass', 'Query self')
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev open vn 10.0.0.1 un 1.1.1.1"',
+ "rfapi_set_response_cb: status 0",
+ "pass",
+ "Opened RFAPI",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"',
+ "rc=2",
+ "pass",
+ "Clean query",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24 lifetime {}"'.format(
+ to
+ ),
+ "",
+ "none",
+ "Prefix registered",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "1 out of 1",
+ "wait",
+ "Local registration",
+)
+luCommand("r1", 'vtysh -c "debug rfapi-dev response-omit-self off"', ".", "none")
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 11.11.11.11"',
+ "11.11.11.0/24",
+ "pass",
+ "Query self",
+)
-luCommand('r3','vtysh -c "debug rfapi-dev open vn 10.0.0.2 un 2.2.2.2"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI')
-luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered')
-luCommand('r3','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration')
-luCommand('r3','vtysh -c "debug rfapi-dev response-omit-self on"','.','none')
-luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 22.22.22.22"','rc=2', 'pass', 'Self excluded')
-luCommand('r3','vtysh -c "debug rfapi-dev open vn 10.0.1.2 un 2.1.1.2"','rfapi_set_response_cb: status 0', 'pass', 'Opened query only RFAPI')
-luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.1.2 un 2.1.1.2 target 22.22.22.22"','22.22.22.0/24', 'pass', 'See local')
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev open vn 10.0.0.2 un 2.2.2.2"',
+ "rfapi_set_response_cb: status 0",
+ "pass",
+ "Opened RFAPI",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24 lifetime {}"'.format(
+ to
+ ),
+ "",
+ "none",
+ "Prefix registered",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations local"',
+ "1 out of 1",
+ "wait",
+ "Local registration",
+)
+luCommand("r3", 'vtysh -c "debug rfapi-dev response-omit-self on"', ".", "none")
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 22.22.22.22"',
+ "rc=2",
+ "pass",
+ "Self excluded",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev open vn 10.0.1.2 un 2.1.1.2"',
+ "rfapi_set_response_cb: status 0",
+ "pass",
+ "Opened query only RFAPI",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.1.2 un 2.1.1.2 target 22.22.22.22"',
+ "22.22.22.0/24",
+ "pass",
+ "See local",
+)
-luCommand('r4','vtysh -c "debug rfapi-dev open vn 10.0.0.3 un 3.3.3.3"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI')
-luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered')
-luCommand('r4','vtysh -c "show vnc registrations local"','1 out of 1','wait','Local registration')
-luCommand('r4','vtysh -c "debug rfapi-dev response-omit-self off"','.','none')
-luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 33.33.33.33"','33.33.33.0/24', 'pass', 'Query self')
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev open vn 10.0.0.3 un 3.3.3.3"',
+ "rfapi_set_response_cb: status 0",
+ "pass",
+ "Opened RFAPI",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24 lifetime {}"'.format(
+ to
+ ),
+ "",
+ "none",
+ "Prefix registered",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations local"',
+ "1 out of 1",
+ "wait",
+ "Local registration",
+)
+luCommand("r4", 'vtysh -c "debug rfapi-dev response-omit-self off"', ".", "none")
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 33.33.33.33"',
+ "33.33.33.0/24",
+ "pass",
+ "Query self",
+)
-luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24 lifetime {} {}"'.format(to, cost),'', 'none', 'MP Prefix registered')
-luCommand('r4','vtysh -c "show vnc registrations local"','2 out of 2','wait','Local registration')
-luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"','11.11.11.0/24', 'pass', 'Query self MP')
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24 lifetime {} {}"'.format(
+ to, cost
+ ),
+ "",
+ "none",
+ "MP Prefix registered",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations local"',
+ "2 out of 2",
+ "wait",
+ "Local registration",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"',
+ "11.11.11.0/24",
+ "pass",
+ "Query self MP",
+)
-luCommand('r1','vtysh -c "show vnc registrations"','.','none')
-luCommand('r3','vtysh -c "show vnc registrations"','.','none')
-luCommand('r4','vtysh -c "show vnc registrations"','.','none')
+luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none")
-luCommand('r1','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r3','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r4','ping 2.2.2.2 -c 1',' 0. packet loss','wait','PE->P2 (loopback) ping',60)
-luCommand('r2','vtysh -c "show bgp summary"',' 00:0.* 00:0.* 00:0','wait','Core adjacencies up',180)
-luCommand('r1','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
-luCommand('r3','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
-luCommand('r4','vtysh -c "show bgp vrf all summary"',' 00:0','wait','All adjacencies up',180)
-luCommand('r1','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
-luCommand('r1','ping 4.4.4.4 -c 1',' 0. packet loss','wait','PE->PE4 (loopback) ping')
-#luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
+luCommand(
+ "r1", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r3", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r4", "ping 2.2.2.2 -c 1", " 0. packet loss", "wait", "PE->P2 (loopback) ping", 60
+)
+luCommand(
+ "r2",
+ 'vtysh -c "show bgp summary"',
+ " 00:0.* 00:0.* 00:0",
+ "wait",
+ "Core adjacencies up",
+ 180,
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0",
+ "wait",
+ "All adjacencies up",
+ 180,
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0",
+ "wait",
+ "All adjacencies up",
+ 180,
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show bgp vrf all summary"',
+ " 00:0",
+ "wait",
+ "All adjacencies up",
+ 180,
+)
+luCommand(
+ "r1", "ping 3.3.3.3 -c 1", " 0. packet loss", "wait", "PE->PE3 (loopback) ping"
+)
+luCommand(
+ "r1", "ping 4.4.4.4 -c 1", " 0. packet loss", "wait", "PE->PE4 (loopback) ping"
+)
+# luCommand('r4','ping 3.3.3.3 -c 1',' 0. packet loss','wait','PE->PE3 (loopback) ping')
from lutil import luCommand
-holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set')
+
+holddownFactorSet = luCommand(
+ "r1",
+ 'vtysh -c "show running"',
+ "rfp holddown-factor",
+ "none",
+ "Holddown factor set",
+)
if not holddownFactorSet:
to = "-1"
else:
to = "1"
-luCommand('r1','vtysh -c "debug rfapi-dev open vn 20.0.0.1 un 1.1.1.21"','rfapi_set_response_cb: status 0', 'pass', 'Opened RFAPI')
-luCommand('r1','vtysh -c "debug rfapi-dev register vn 20.0.0.1 un 1.1.1.21 prefix 111.111.111.0/24 lifetime {}"'.format(to),'', 'none', 'Prefix registered')
-luCommand('r1','vtysh -c "show vnc registrations local"','111.111.111.0/24','wait','Local registration',1)
-luCommand('r1','vtysh -c "show vnc registrations"','.','none')
-luCommand('r3','vtysh -c "show vnc registrations"','111.111.111.0/24','wait','See registration')
-luCommand('r4','vtysh -c "show vnc registrations"','111.111.111.0/24','wait','See registration')
-luCommand('r1','vtysh -c "debug rfapi-dev close vn 20.0.0.1 un 1.1.1.21"','status 0', 'pass', 'Closed RFAPI')
-luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See cleanup')
-luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See cleanup')
-luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2','wait','See cleanup')
-luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown',20)
-luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown')
-luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown')
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev open vn 20.0.0.1 un 1.1.1.21"',
+ "rfapi_set_response_cb: status 0",
+ "pass",
+ "Opened RFAPI",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev register vn 20.0.0.1 un 1.1.1.21 prefix 111.111.111.0/24 lifetime {}"'.format(
+ to
+ ),
+ "",
+ "none",
+ "Prefix registered",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "111.111.111.0/24",
+ "wait",
+ "Local registration",
+ 1,
+)
+luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "111.111.111.0/24",
+ "wait",
+ "See registration",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "111.111.111.0/24",
+ "wait",
+ "See registration",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev close vn 20.0.0.1 un 1.1.1.21"',
+ "status 0",
+ "pass",
+ "Closed RFAPI",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 3",
+ "wait",
+ "See cleanup",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 3",
+ "wait",
+ "See cleanup",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 2 .* Remotely: *Active: 2",
+ "wait",
+ "See cleanup",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+ 20,
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+)
from lutil import luCommand
-luCommand('r1','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI')
-luCommand('r2','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn"','','none','VPN SAFI')
-luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See all registrations')
-luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3','wait','See all registrations')
-luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2','wait','See all registrations')
-num = '4 routes and 4'
-luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay')
-luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI okay')
-luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 22.22.22.22"','pfx=', 'pass', 'Query R2s info')
-luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 33.33.33.33"','pfx=', 'pass', 'Query R4s info')
-luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 11.11.11.11"','11.11.11.0/24.*11.11.11.0/24.*', 'pass', 'Query R1s+R4s info')
-luCommand('r3','vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 33.33.33.33"','pfx=', 'pass', 'Query R4s info')
-luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"','11.11.11.0/24.*11.11.11.0/24.*', 'pass', 'Query R1s+R4s info')
-luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 22.22.22.22"','pfx=', 'pass', 'Query R2s info')
+
+luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI")
+luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI")
+luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI")
+luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', "", "none", "VPN SAFI")
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 3",
+ "wait",
+ "See all registrations",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 3",
+ "wait",
+ "See all registrations",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 2 .* Remotely: *Active: 2",
+ "wait",
+ "See all registrations",
+)
+num = "4 routes and 4"
+luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay")
+luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay")
+luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay")
+luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI okay")
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 22.22.22.22"',
+ "pfx=",
+ "pass",
+ "Query R2s info",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 33.33.33.33"',
+ "pfx=",
+ "pass",
+ "Query R4s info",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 11.11.11.11"',
+ "11.11.11.0/24.*11.11.11.0/24.*",
+ "pass",
+ "Query R1s+R4s info",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.2 un 2.2.2.2 target 33.33.33.33"',
+ "pfx=",
+ "pass",
+ "Query R4s info",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 11.11.11.11"',
+ "11.11.11.0/24.*11.11.11.0/24.*",
+ "pass",
+ "Query R1s+R4s info",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 22.22.22.22"',
+ "pfx=",
+ "pass",
+ "Query R2s info",
+)
from lutil import luCommand
-holddownFactorSet = luCommand('r1','vtysh -c "show running"','rfp holddown-factor','none','Holddown factor set')
-luCommand('r1','vtysh -c "show vnc registrations"','.','none')
-luCommand('r3','vtysh -c "show vnc registrations"','.','none')
-luCommand('r4','vtysh -c "show vnc registrations"','.','none')
+
+holddownFactorSet = luCommand(
+ "r1",
+ 'vtysh -c "show running"',
+ "rfp holddown-factor",
+ "none",
+ "Holddown factor set",
+)
+luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none")
+luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none")
if not holddownFactorSet:
- luCommand('r1','vtysh -c "show vnc summary"','.','pass','Holddown factor not set -- skipping test')
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc summary"',
+ ".",
+ "pass",
+ "Holddown factor not set -- skipping test",
+ )
else:
- #holddown time test
- luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"','', 'none', 'Prefix registered')
- luCommand('r1','vtysh -c "show vnc registrations local"','1.111.0.0/16','wait','Local registration')
+ # holddown time test
+ luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"',
+ "",
+ "none",
+ "Prefix registered",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "1.111.0.0/16",
+ "wait",
+ "Local registration",
+ )
- luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"','', 'none', 'Prefix registered')
- luCommand('r3','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration')
+ luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"',
+ "",
+ "none",
+ "Prefix registered",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations local"',
+ "1.222.0.0/16",
+ "wait",
+ "Local registration",
+ )
- luCommand('r4','vtysh -c "show vnc registrations"','Remotely: *Active: 4 ','wait', 'See registrations, L=10')
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Remotely: *Active: 4 ",
+ "wait",
+ "See registrations, L=10",
+ )
- luCommand('r4','vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16 lifetime 5 cost 50"','', 'none', 'MP Prefix registered')
- luCommand('r4','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration (MP prefix)')
+ luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16 lifetime 5 cost 50"',
+ "",
+ "none",
+ "MP Prefix registered",
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations local"',
+ "1.222.0.0/16",
+ "wait",
+ "Local registration (MP prefix)",
+ )
- luCommand('r1','vtysh -c "show vnc registrations"','.','none')
- luCommand('r3','vtysh -c "show vnc registrations"','.','none')
+ luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none")
+ luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none")
- luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.111.111.111"','pfx=', 'pass', 'Query R1s info')
- luCommand('r4','vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.222.222.222"','1.222.0.0/16.*1.222.0.0/16', 'pass', 'Query R3s+R4s info')
+ luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.111.111.111"',
+ "pfx=",
+ "pass",
+ "Query R1s info",
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.3 un 3.3.3.3 target 1.222.222.222"',
+ "1.222.0.0/16.*1.222.0.0/16",
+ "pass",
+ "Query R3s+R4s info",
+ )
- luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16"','', 'none', 'MP Prefix removed')
- luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown')
- luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown')
- luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 1 ','wait', 'MP prefix in holddown')
- luCommand('r1','vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 1.222.222.222"','1.222.0.0/16', 'pass', 'Query R3s info')
- luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16"','', 'none', 'Prefix timeout')
- luCommand('r1','vtysh -c "show vnc registrations holddown"','1.111.0.0/16','wait','Local holddown',1)
- luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16"','', 'none', 'Prefix timeout')
- luCommand('r3','vtysh -c "show vnc registrations holddown"','1.222.0.0/16','wait','Local holddown',1)
- luCommand('r4','vtysh -c "show vnc registrations"','.','none')
- luCommand('r4','vtysh -c "show vnc registrations"','.','none')
+ luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 1.222.0.0/16"',
+ "",
+ "none",
+ "MP Prefix removed",
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 1 ",
+ "wait",
+ "MP prefix in holddown",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 1 ",
+ "wait",
+ "MP prefix in holddown",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 1 ",
+ "wait",
+ "MP prefix in holddown",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev query vn 10.0.0.1 un 1.1.1.1 target 1.222.222.222"',
+ "1.222.0.0/16",
+ "pass",
+ "Query R3s info",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16"',
+ "",
+ "none",
+ "Prefix timeout",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations holddown"',
+ "1.111.0.0/16",
+ "wait",
+ "Local holddown",
+ 1,
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16"',
+ "",
+ "none",
+ "Prefix timeout",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations holddown"',
+ "1.222.0.0/16",
+ "wait",
+ "Local holddown",
+ 1,
+ )
+ luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none")
+ luCommand("r4", 'vtysh -c "show vnc registrations"', ".", "none")
- luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown')
- luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown')
- luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 2 ','wait', 'In holddown')
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 2 ",
+ "wait",
+ "In holddown",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 2 ",
+ "wait",
+ "In holddown",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 2 ",
+ "wait",
+ "In holddown",
+ )
- luCommand('r1','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown',20)
- luCommand('r3','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown')
- luCommand('r4','vtysh -c "show vnc registrations"','In Holddown: *Active: 0','wait','Out of holddown')
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+ 20,
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+ )
- #kill test
- luCommand('r1','vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"','', 'none', 'Prefix registered')
- luCommand('r1','vtysh -c "show vnc registrations local"','1.111.0.0/16','wait','Local registration')
+ # kill test
+ luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 lifetime 10"',
+ "",
+ "none",
+ "Prefix registered",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations local"',
+ "1.111.0.0/16",
+ "wait",
+ "Local registration",
+ )
- luCommand('r3','vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"','', 'none', 'Prefix registered')
- luCommand('r3','vtysh -c "show vnc registrations local"','1.222.0.0/16','wait','Local registration')
+ luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev register vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 lifetime 10"',
+ "",
+ "none",
+ "Prefix registered",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations local"',
+ "1.222.0.0/16",
+ "wait",
+ "Local registration",
+ )
- luCommand('r4','vtysh -c "show vnc registrations"','Remotely: *Active: 4 ','wait', 'See registrations L=10 (pre-kill)',5)
- luCommand('r1','vtysh -c "show vnc registrations"','.','none')
- luCommand('r3','vtysh -c "show vnc registrations"','.','none')
- luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 kill"','', 'none', 'Prefix kill')
- luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 4 .*In Holddown: *Active: 0','wait','Registration killed',1)
- luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Remote in holddown',5)
- luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Remote in holddown',5)
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Remotely: *Active: 4 ",
+ "wait",
+ "See registrations L=10 (pre-kill)",
+ 5,
+ )
+ luCommand("r1", 'vtysh -c "show vnc registrations"', ".", "none")
+ luCommand("r3", 'vtysh -c "show vnc registrations"', ".", "none")
+ luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 1.111.0.0/16 kill"',
+ "",
+ "none",
+ "Prefix kill",
+ )
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 4 .*In Holddown: *Active: 0",
+ "wait",
+ "Registration killed",
+ 1,
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1",
+ "wait",
+ "Remote in holddown",
+ 5,
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 2 .* Remotely: *Active: 3 .*In Holddown: *Active: 1",
+ "wait",
+ "Remote in holddown",
+ 5,
+ )
- luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 kill"','', 'none', 'Prefix kill')
- luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 1','wait','Registration killed',1)
- luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 2','wait','Remote in holddown',5)
+ luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 1.222.0.0/16 kill"',
+ "",
+ "none",
+ "Prefix kill",
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 1",
+ "wait",
+ "Registration killed",
+ 1,
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 2",
+ "wait",
+ "Remote in holddown",
+ 5,
+ )
- luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0','wait','Out of holddown',20)
- luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0','wait','Out of holddown')
- luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 0','wait','Out of holddown')
+ luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+ 20,
+ )
+ luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 1 .* Remotely: *Active: 3 .*In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+ )
+ luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 2 .* Remotely: *Active: 2 .*In Holddown: *Active: 0",
+ "wait",
+ "Out of holddown",
+ )
from lutil import luCommand
-luCommand('r1','vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24"','', 'none', 'Prefix removed')
-luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed')
-luCommand('r1','vtysh -c "debug rfapi-dev close vn 10.0.0.1 un 1.1.1.1"','status 0', 'pass', 'Closed RFAPI')
-luCommand('r3','vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24"','', 'none', 'Prefix removed')
-luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed')
-luCommand('r3','vtysh -c "debug rfapi-dev close vn 10.0.0.2 un 2.2.2.2"','status 0', 'pass', 'Closed RFAPI')
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.1 un 1.1.1.1 prefix 11.11.11.0/24"',
+ "",
+ "none",
+ "Prefix removed",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 ",
+ "wait",
+ "Local registration removed",
+)
+luCommand(
+ "r1",
+ 'vtysh -c "debug rfapi-dev close vn 10.0.0.1 un 1.1.1.1"',
+ "status 0",
+ "pass",
+ "Closed RFAPI",
+)
-luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24"','', 'none', 'Prefix removed')
-luCommand('r4','vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24"','', 'none', 'MP prefix removed')
-luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 ','wait','Local registration removed')
-#luCommand('r4','vtysh -c "debug rfapi-dev close vn 10.0.0.3 un 3.3.3.3"','status 0', 'pass', 'Closed RFAPI')
-luCommand('r4','vtysh -c "clear vnc nve *"','.', 'pass', 'Cleared NVEs')
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.2 un 2.2.2.2 prefix 22.22.22.0/24"',
+ "",
+ "none",
+ "Prefix removed",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 ",
+ "wait",
+ "Local registration removed",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "debug rfapi-dev close vn 10.0.0.2 un 2.2.2.2"',
+ "status 0",
+ "pass",
+ "Closed RFAPI",
+)
-luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared')
-luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared')
-luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0','wait','All registrations cleared')
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 33.33.33.0/24"',
+ "",
+ "none",
+ "Prefix removed",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "debug rfapi-dev unregister vn 10.0.0.3 un 3.3.3.3 prefix 11.11.11.0/24"',
+ "",
+ "none",
+ "MP prefix removed",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 ",
+ "wait",
+ "Local registration removed",
+)
+# luCommand('r4','vtysh -c "debug rfapi-dev close vn 10.0.0.3 un 3.3.3.3"','status 0', 'pass', 'Closed RFAPI')
+luCommand("r4", 'vtysh -c "clear vnc nve *"', ".", "pass", "Cleared NVEs")
-num = '0 exist'
-luCommand('r1','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear')
-luCommand('r2','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear')
-luCommand('r3','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear')
-luCommand('r4','vtysh -c "show bgp ipv4 vpn"',num,'pass','VPN SAFI clear')
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 .* Remotely: *Active: 0",
+ "wait",
+ "All registrations cleared",
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 .* Remotely: *Active: 0",
+ "wait",
+ "All registrations cleared",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 .* Remotely: *Active: 0",
+ "wait",
+ "All registrations cleared",
+)
-luCommand('r1','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns',20)
-luCommand('r3','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns')
-luCommand('r4','vtysh -c "show vnc registrations"','Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0','wait','No holddowns')
+num = "0 exist"
+luCommand("r1", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear")
+luCommand("r2", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear")
+luCommand("r3", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear")
+luCommand("r4", 'vtysh -c "show bgp ipv4 vpn"', num, "pass", "VPN SAFI clear")
-luCommand('r1','vtysh -c "show vnc summary"','.','none')
-luCommand('r3','vtysh -c "show vnc summary"','.','none')
-luCommand('r4','vtysh -c "show vnc summary"','.','none')
+luCommand(
+ "r1",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0",
+ "wait",
+ "No holddowns",
+ 20,
+)
+luCommand(
+ "r3",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0",
+ "wait",
+ "No holddowns",
+)
+luCommand(
+ "r4",
+ 'vtysh -c "show vnc registrations"',
+ "Locally: *Active: 0 .* Remotely: *Active: 0 .*In Holddown: *Active: 0",
+ "wait",
+ "No holddowns",
+)
+luCommand("r1", 'vtysh -c "show vnc summary"', ".", "none")
+luCommand("r3", 'vtysh -c "show vnc summary"', ".", "none")
+luCommand("r4", 'vtysh -c "show vnc summary"', ".", "none")
import sys
import pytest
-sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
+sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from lib.ltemplate import *
+
def test_add_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/add_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/add_routes.py", False, CliOnFail, CheckFunc)
+
def test_adjacencies():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/adjacencies.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/adjacencies.py", False, CliOnFail, CheckFunc)
+
def test_check_routes():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/check_routes.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/check_routes.py", False, CliOnFail, CheckFunc)
+
def test_check_close():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/check_close.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/check_close.py", False, CliOnFail, CheckFunc)
+
def test_check_timeout():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/check_timeout.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/check_timeout.py", False, CliOnFail, CheckFunc)
+
def test_cleanup_all():
CliOnFail = None
# For debugging, uncomment the next line
- #CliOnFail = 'tgen.mininet_cli'
- CheckFunc = 'ltemplateVersionCheck(\'3.1\')'
- #uncomment next line to start cli *before* script is run
- #CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
- ltemplateTest('scripts/cleanup_all.py', False, CliOnFail, CheckFunc)
+ # CliOnFail = 'tgen.mininet_cli'
+ CheckFunc = "ltemplateVersionCheck('3.1')"
+ # uncomment next line to start cli *before* script is run
+ # CheckFunc = 'ltemplateVersionCheck(\'3.1\', cli=True)'
+ ltemplateTest("scripts/cleanup_all.py", False, CliOnFail, CheckFunc)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
retval = pytest.main(["-s"])
sys.exit(retval)
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
##
#####################################################
+
class NetworkTopo(Topo):
"BGP_RR_IBGP Topology 1"
tgen = get_topogen(self)
- tgen.add_router('tor1')
- tgen.add_router('tor2')
- tgen.add_router('spine1')
+ tgen.add_router("tor1")
+ tgen.add_router("tor2")
+ tgen.add_router("spine1")
# First switch is for a dummy interface (for local network)
# on tor1
- # 192.168.1.0/24
- switch = tgen.add_switch('sw1')
- switch.add_link(tgen.gears['tor1'])
+ # 192.168.1.0/24
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["tor1"])
- # 192.168.2.0/24 - tor1 <-> spine1 connection
- switch = tgen.add_switch('sw2')
- switch.add_link(tgen.gears['tor1'])
- switch.add_link(tgen.gears['spine1'])
+ # 192.168.2.0/24 - tor1 <-> spine1 connection
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["tor1"])
+ switch.add_link(tgen.gears["spine1"])
# 3rd switch is for a dummy interface (for local netwokr)
- # 192.168.3.0/24 - tor2
- switch = tgen.add_switch('sw3')
- switch.add_link(tgen.gears['tor2'])
+ # 192.168.3.0/24 - tor2
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["tor2"])
- # 192.168.4.0/24 - tor2 <-> spine1 connection
- switch = tgen.add_switch('sw4')
- switch.add_link(tgen.gears['tor2'])
- switch.add_link(tgen.gears['spine1'])
+ # 192.168.4.0/24 - tor2 <-> spine1 connection
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["tor2"])
+ switch.add_link(tgen.gears["spine1"])
#####################################################
##
#####################################################
+
def setup_module(module):
"Setup topology"
tgen = Topogen(NetworkTopo, module.__name__)
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- topotest.sleep(5, 'Waiting for BGP_RR_IBGP convergence')
+ topotest.sleep(5, "Waiting for BGP_RR_IBGP convergence")
def test_bgp_rr_ibgp_routes():
# Verify BGP_RR_IBGP Status
logger.info("Verifying BGP_RR_IBGP routes")
+
def test_zebra_ipv4_routingTable():
"Test 'show ip route'"
failures = 0
router_list = tgen.routers().values()
for router in router_list:
- output = router.vtysh_cmd('show ip route json', isjson=True)
- refTableFile = '{}/{}/show_ip_route.json_ref'.format(CWD, router.name)
+ output = router.vtysh_cmd("show ip route json", isjson=True)
+ refTableFile = "{}/{}/show_ip_route.json_ref".format(CWD, router.name)
expected = json.loads(open(refTableFile).read())
- assertmsg = 'Zebra IPv4 Routing Table verification failed for router {}'.format(router.name)
+ assertmsg = "Zebra IPv4 Routing Table verification failed for router {}".format(
+ router.name
+ )
assert topotest.json_cmp(output, expected) is None, assertmsg
+
def test_shutdown_check_stderr():
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- pytest.skip('Skipping test for Stderr output and memory leaks')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ pytest.skip("Skipping test for Stderr output and memory leaks")
tgen = get_topogen()
# Don't run this test if we have any failure.
for router in router_list:
router.stop()
- log = tgen.net[router.name].getStdErr('bgpd')
+ log = tgen.net[router.name].getStdErr("bgpd")
if log:
- logger.error('BGPd StdErr Log:' + log)
- log = tgen.net[router.name].getStdErr('zebra')
+ logger.error("BGPd StdErr Log:" + log)
+ log = tgen.net[router.name].getStdErr("zebra")
if log:
- logger.error('Zebra StdErr Log:' + log)
+ logger.error("Zebra StdErr Log:" + log)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_sender_as_path_loop_detection():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r2']
+ router = tgen.gears["r2"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.2 json"))
expected = {
- '192.168.255.2': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 2
- }
- }
+ "192.168.255.2": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_has_route_from_r1(router):
output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json"))
expected = {
- 'paths': [
+ "paths": [
{
- 'aspath': {
- 'segments': [
- {
- 'type': 'as-sequence',
- 'list': [
- 65001,
- 65003
- ]
- }
- ],
- 'length': 2
+ "aspath": {
+ "segments": [{"type": "as-sequence", "list": [65001, 65003]}],
+ "length": 2,
}
}
]
return topotest.json_cmp(output, expected)
def _bgp_suppress_route_to_r3(router):
- output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.254.2 advertised-routes json"))
- expected = {
- 'totalPrefixCounter': 0
- }
+ output = json.loads(
+ router.vtysh_cmd(
+ "show ip bgp neighbor 192.168.254.2 advertised-routes json"
+ )
+ )
+ expected = {"totalPrefixCounter": 0}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_converge, router)
test_func = functools.partial(_bgp_suppress_route_to_r3, router)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
- assert result is None, 'Route 172.16.255.254/32 should not be sent to r3 "{}"'.format(router)
+ assert (
+ result is None
+ ), 'Route 172.16.255.254/32 should not be sent to r3 "{}"'.format(router)
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_set_local_preference():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r1']
+ router = tgen.gears["r1"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor json"))
expected = {
- '192.168.255.2': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 2
- }
- }
+ "192.168.255.2": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
+ },
+ "192.168.255.3": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
},
- '192.168.255.3': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 2
- }
- }
- }
}
return topotest.json_cmp(output, expected)
def _bgp_check_local_preference(router):
output = json.loads(router.vtysh_cmd("show ip bgp 172.16.255.254/32 json"))
expected = {
- 'paths': [
- {
- 'locPrf': 50,
- 'nexthops': [
- {
- 'ip': '192.168.255.3'
- }
- ]
- },
- {
- 'locPrf': 150,
- 'nexthops': [
- {
- 'ip': '192.168.255.2'
- }
- ]
- }
+ "paths": [
+ {"locPrf": 50, "nexthops": [{"ip": "192.168.255.3"}]},
+ {"locPrf": 150, "nexthops": [{"ip": "192.168.255.2"}]},
]
}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_check_local_preference, router)
success, result = topotest.run_and_expect(test_func, None, count=15, wait=0.5)
- assert result is None, 'Failed to see applied BGP local-preference in "{}"'.format(router)
+ assert result is None, 'Failed to see applied BGP local-preference in "{}"'.format(
+ router
+ )
+
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import functools
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topolog import logger
from mininet.topo import Topo
+
class TemplateTopo(Topo):
def build(self, *_args, **_opts):
tgen = get_topogen(self)
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
tgen = Topogen(TemplateTopo, mod.__name__)
for i, (rname, router) in enumerate(router_list.iteritems(), 1):
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
+
def test_bgp_show_ip_bgp_hostname():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- router = tgen.gears['r2']
+ router = tgen.gears["r2"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip bgp neighbor 192.168.255.1 json"))
expected = {
- '192.168.255.1': {
- 'bgpState': 'Established',
- 'addressFamilyInfo': {
- 'ipv4Unicast': {
- 'acceptedPrefixCounter': 2
- }
- }
+ "192.168.255.1": {
+ "bgpState": "Established",
+ "addressFamilyInfo": {"ipv4Unicast": {"acceptedPrefixCounter": 2}},
}
}
return topotest.json_cmp(output, expected)
def _bgp_show_nexthop_hostname_and_ip(router):
output = json.loads(router.vtysh_cmd("show ip bgp json"))
- for nh in output['routes']['172.16.255.253/32'][0]['nexthops']:
- if 'hostname' in nh and 'ip' in nh:
+ for nh in output["routes"]["172.16.255.253/32"][0]["nexthops"]:
+ if "hostname" in nh and "ip" in nh:
return True
return False
assert result is None, 'Failed bgp convergence in "{}"'.format(router)
assert _bgp_show_nexthop_hostname_and_ip(router) == True
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
class BGPIPV6RTADVVRFTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 2 routers.
- tgen.add_router('r1')
- tgen.add_router('r2')
+ tgen.add_router("r1")
+ tgen.add_router("r2")
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
def setup_module(mod):
"Sets up the pytest environment"
router_list = tgen.routers()
- logger.info('Testing with VRF Lite support')
+ logger.info("Testing with VRF Lite support")
krel = platform.release()
# May need to adjust handling of vrf traffic depending on kernel version
l3mdev_accept = 0
- if topotest.version_cmp(krel, '4.15') >= 0 and \
- topotest.version_cmp(krel, '4.18') <= 0:
+ if (
+ topotest.version_cmp(krel, "4.15") >= 0
+ and topotest.version_cmp(krel, "4.18") <= 0
+ ):
l3mdev_accept = 1
- if topotest.version_cmp(krel, '5.0') >= 0:
+ if topotest.version_cmp(krel, "5.0") >= 0:
l3mdev_accept = 1
- logger.info('krel \'{0}\' setting net.ipv4.tcp_l3mdev_accept={1}'.format(
- krel, l3mdev_accept))
+ logger.info(
+ "krel '{0}' setting net.ipv4.tcp_l3mdev_accept={1}".format(krel, l3mdev_accept)
+ )
- cmds = ['ip link add {0}-cust1 type vrf table 1001',
- 'ip link add loop1 type dummy',
- 'ip link set loop1 master {0}-cust1',
- 'ip link set {0}-eth0 master {0}-cust1']
+ cmds = [
+ "ip link add {0}-cust1 type vrf table 1001",
+ "ip link add loop1 type dummy",
+ "ip link set loop1 master {0}-cust1",
+ "ip link set {0}-eth0 master {0}-cust1",
+ ]
for rname, router in router_list.iteritems():
for cmd in cmds:
output = tgen.net[rname].cmd(cmd.format(rname))
- output = tgen.net[rname].cmd('sysctl -n net.ipv4.tcp_l3mdev_accept')
+ output = tgen.net[rname].cmd("sysctl -n net.ipv4.tcp_l3mdev_accept")
logger.info(
- 'router {0}: existing tcp_l3mdev_accept was {1}'.format(
- rname, output))
+ "router {0}: existing tcp_l3mdev_accept was {1}".format(rname, output)
+ )
if l3mdev_accept:
output = tgen.net[rname].cmd(
- 'sysctl -w net.ipv4.tcp_l3mdev_accept={}'.format(l3mdev_accept))
+ "sysctl -w net.ipv4.tcp_l3mdev_accept={}".format(l3mdev_accept)
+ )
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
+
def teardown_module(_mod):
"Teardown the pytest environment"
tgen = get_topogen()
logger.info("Checking IPv4 routes for convergence")
for router in tgen.routers().values():
- json_file = '{}/{}/ipv4_routes.json'.format(CWD, router.name)
+ json_file = "{}/{}/ipv4_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ip route vrf {}-cust1 json'.format(router.name), expected)
- _, result = topotest.run_and_expect(test_func, None, count=160,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip route vrf {}-cust1 json".format(router.name),
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
# Check IPv6 routing tables.
logger.info("Checking IPv6 routes for convergence")
for router in tgen.routers().values():
- json_file = '{}/{}/ipv6_routes.json'.format(CWD, router.name)
+ json_file = "{}/{}/ipv6_routes.json".format(CWD, router.name)
if not os.path.isfile(json_file):
- logger.info('skipping file {}'.format(json_file))
+ logger.info("skipping file {}".format(json_file))
continue
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- router, 'show ipv6 route vrf {}-cust1 json'.format(router.name), expected)
- _, result = topotest.run_and_expect(test_func, None, count=160,
- wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ipv6 route vrf {}-cust1 json".format(router.name),
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(router.name)
assert result is None, assertmsg
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
exa-receive.py: Save received routes form ExaBGP into file
"""
-from sys import stdin,argv
+from sys import stdin, argv
from datetime import datetime
# 1st arg is peer number
# When the parent dies we are seeing continual newlines, so we only access so many before stopping
counter = 0
-routesavefile = open('/tmp/peer%s-received.log' % peer, 'w')
+routesavefile = open("/tmp/peer%s-received.log" % peer, "w")
while True:
try:
line = stdin.readline()
- timestamp = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
+ timestamp = datetime.now().strftime("%Y%m%d_%H:%M:%S - ")
routesavefile.write(timestamp + line)
routesavefile.flush()
exa-send.py: Send a few testroutes with ExaBGP
"""
-from sys import stdout,argv
+from sys import stdout, argv
from time import sleep
sleep(5)
# Announce numRoutes equal routes per PE - different neighbor AS
for i in range(0, numRoutes):
- stdout.write('announce route 10.201.%s.0/24 med 100 community %i:1 next-hop 10.0.%i.%i\n' % (i, i, (((peer-1) / 5) + 1), peer+100))
+ stdout.write(
+ "announce route 10.201.%s.0/24 med 100 community %i:1 next-hop 10.0.%i.%i\n"
+ % (i, i, (((peer - 1) / 5) + 1), peer + 100)
+ )
stdout.flush()
-#Loop endlessly to allow ExaBGP to continue running
+# Loop endlessly to allow ExaBGP to continue running
while True:
sleep(1)
-
"10.0.1.101":{
"outq":0,
"inq":0,
- "prefixReceivedCount":10,
+ "pfxRcd":10,
"state":"Established"
}
},
"10.0.1.101":{
"outq":0,
"inq":0,
- "prefixReceivedCount":10,
+ "pfxRcd":10,
"state":"Established"
}
},
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
##
#####################################################
+
class BGPVRFNETNSTopo1(Topo):
"BGP EBGP VRF NETNS Topology 1"
tgen = get_topogen(self)
# Setup Routers
- tgen.add_router('r1')
+ tgen.add_router("r1")
# Setup Switches
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
# Add eBGP ExaBGP neighbors
- peer_ip = '10.0.1.101'
- peer_route = 'via 10.0.1.1'
- peer = tgen.add_exabgp_peer('peer1',
- ip=peer_ip, defaultRoute=peer_route)
- switch = tgen.gears['s1']
+ peer_ip = "10.0.1.101"
+ peer_route = "via 10.0.1.1"
+ peer = tgen.add_exabgp_peer("peer1", ip=peer_ip, defaultRoute=peer_route)
+ switch = tgen.gears["s1"]
switch.add_link(peer)
##
#####################################################
+
def setup_module(module):
tgen = Topogen(BGPVRFNETNSTopo1, module.__name__)
tgen.start_topology()
# Get r1 reference
- router = tgen.gears['r1']
+ router = tgen.gears["r1"]
# check for zebra capability
if CustomizeVrfWithNetns == True:
- if router.check_capability(
- TopoRouter.RD_ZEBRA,
- '--vrfwnetns'
- ) == False:
- return pytest.skip('Skipping BGP VRF NETNS Test. VRF NETNS backend not available on FRR')
- if os.system('ip netns list') != 0:
- return pytest.skip('Skipping BGP VRF NETNS Test. NETNS not available on System')
+ if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False:
+ return pytest.skip(
+ "Skipping BGP VRF NETNS Test. VRF NETNS backend not available on FRR"
+ )
+ if os.system("ip netns list") != 0:
+ return pytest.skip(
+ "Skipping BGP VRF NETNS Test. NETNS not available on System"
+ )
# retrieve VRF backend kind
if CustomizeVrfWithNetns == True:
- logger.info('Testing with VRF Namespace support')
+ logger.info("Testing with VRF Namespace support")
# create VRF r1-cust1
# move r1-eth0 to VRF r1-cust1
- cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi',
- 'ip netns add {0}-cust1',
- 'ip link set dev {0}-eth0 netns {0}-cust1',
- 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up']
+ cmds = [
+ "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi",
+ "ip netns add {0}-cust1",
+ "ip link set dev {0}-eth0 netns {0}-cust1",
+ "ip netns exec {0}-cust1 ifconfig {0}-eth0 up",
+ ]
for cmd in cmds:
- cmd = cmd.format('r1')
- logger.info('cmd: '+cmd);
- output = router.run(cmd.format('r1'))
+ cmd = cmd.format("r1")
+ logger.info("cmd: " + cmd)
+ output = router.run(cmd.format("r1"))
if output != None and len(output) > 0:
- logger.info('Aborting due to unexpected output: cmd="{}" output=\n{}'.format(cmd, output))
- return pytest.skip('Skipping BGP VRF NETNS Test. Unexpected output to command: '+cmd)
- #run daemons
+ logger.info(
+ 'Aborting due to unexpected output: cmd="{}" output=\n{}'.format(
+ cmd, output
+ )
+ )
+ return pytest.skip(
+ "Skipping BGP VRF NETNS Test. Unexpected output to command: " + cmd
+ )
+ # run daemons
router.load_config(
TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format('r1')),
- '--vrfwnetns'
+ os.path.join(CWD, "{}/zebra.conf".format("r1")),
+ "--vrfwnetns",
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format('r1'))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format("r1"))
)
- logger.info('Launching BGP and ZEBRA')
+ logger.info("Launching BGP and ZEBRA")
# BGP and ZEBRA start without underlying VRF
router.start()
# Starting Hosts and init ExaBGP on each of them
- logger.info('starting exaBGP on peer1')
+ logger.info("starting exaBGP on peer1")
peer_list = tgen.exabgp_peers()
for pname, peer in peer_list.iteritems():
peer_dir = os.path.join(CWD, pname)
- env_file = os.path.join(CWD, 'exabgp.env')
- logger.info('Running ExaBGP peer')
+ env_file = os.path.join(CWD, "exabgp.env")
+ logger.info("Running ExaBGP peer")
peer.start(peer_dir, env_file)
logger.info(pname)
+
def teardown_module(module):
tgen = get_topogen()
# move back r1-eth0 to default VRF
# delete VRF r1-cust1
- cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1',
- 'ip netns delete {0}-cust1']
+ cmds = [
+ "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1",
+ "ip netns delete {0}-cust1",
+ ]
for cmd in cmds:
- tgen.net['r1'].cmd(cmd.format('r1'))
+ tgen.net["r1"].cmd(cmd.format("r1"))
tgen.stop_topology()
+
def test_bgp_vrf_learn():
"Test daemon learnt VRF context"
tgen = get_topogen()
pytest.skip(tgen.errors)
# Expected result
- output = tgen.gears['r1'].vtysh_cmd("show vrf", isjson=False)
- logger.info('output is: {}'.format(output))
+ output = tgen.gears["r1"].vtysh_cmd("show vrf", isjson=False)
+ logger.info("output is: {}".format(output))
- output = tgen.gears['r1'].vtysh_cmd("show bgp vrfs", isjson=False)
- logger.info('output is: {}'.format(output))
+ output = tgen.gears["r1"].vtysh_cmd("show bgp vrfs", isjson=False)
+ logger.info("output is: {}".format(output))
def test_bgp_convergence():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('waiting for bgp convergence')
+ logger.info("waiting for bgp convergence")
# Expected result
- router = tgen.gears['r1']
- if router.has_version('<', '3.0'):
- reffile = os.path.join(CWD, 'r1/summary20.txt')
+ router = tgen.gears["r1"]
+ if router.has_version("<", "3.0"):
+ reffile = os.path.join(CWD, "r1/summary20.txt")
else:
- reffile = os.path.join(CWD, 'r1/summary.txt')
+ reffile = os.path.join(CWD, "r1/summary.txt")
expected = json.loads(open(reffile).read())
- test_func = functools.partial(topotest.router_json_cmp,
- router, 'show bgp vrf r1-cust1 summary json', expected)
+ test_func = functools.partial(
+ topotest.router_json_cmp, router, "show bgp vrf r1-cust1 summary json", expected
+ )
_, res = topotest.run_and_expect(test_func, None, count=90, wait=0.5)
- assertmsg = 'BGP router network did not converge'
+ assertmsg = "BGP router network did not converge"
assert res is None, assertmsg
+
def test_bgp_vrf_netns():
tgen = get_topogen()
pytest.skip(tgen.errors)
expect = {
- 'routerId': '10.0.1.1',
- 'routes': {
- },
+ "routerId": "10.0.1.1",
+ "routes": {},
}
for subnet in range(0, 10):
- netkey = '10.201.{}.0/24'.format(subnet)
- expect['routes'][netkey] = []
- peer = {'valid': True}
- expect['routes'][netkey].append(peer)
-
- test_func = functools.partial(topotest.router_json_cmp,
- tgen.gears['r1'], 'show ip bgp vrf r1-cust1 ipv4 json', expect)
+ netkey = "10.201.{}.0/24".format(subnet)
+ expect["routes"][netkey] = []
+ peer = {"valid": True}
+ expect["routes"][netkey].append(peer)
+
+ test_func = functools.partial(
+ topotest.router_json_cmp,
+ tgen.gears["r1"],
+ "show ip bgp vrf r1-cust1 ipv4 json",
+ expect,
+ )
_, res = topotest.run_and_expect(test_func, None, count=12, wait=0.5)
assertmsg = 'expected routes in "show ip bgp vrf r1-cust1 ipv4" output'
assert res is None, assertmsg
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
ret = pytest.main(args)
topology_only = False
+
def pytest_addoption(parser):
"""
Add topology-only option to the topology tester. This option makes pytest
only run the setup_module() to setup the topology without running any tests.
"""
- parser.addoption('--topology-only', action='store_true',
- help='Only set up this topology, don\'t run tests')
+ parser.addoption(
+ "--topology-only",
+ action="store_true",
+ help="Only set up this topology, don't run tests",
+ )
+
def pytest_runtest_call():
"""
# Allow user to play with the setup.
tgen.mininet_cli()
- pytest.exit('the topology executed successfully')
+ pytest.exit("the topology executed successfully")
+
def pytest_assertrepr_compare(op, left, right):
"""
return json_result.errors
+
def pytest_configure(config):
"Assert that the environment is correctly configured."
global topology_only
if not diagnose_env():
- pytest.exit('enviroment has errors, please read the logs')
+ pytest.exit("enviroment has errors, please read the logs")
- if config.getoption('--topology-only'):
+ if config.getoption("--topology-only"):
topology_only = True
+
def pytest_runtest_makereport(item, call):
"Log all assert messages to default logger with error level"
# Nothing happened
modname = parent.module.__name__
# Treat skips as non errors
- if call.excinfo.typename != 'AssertionError':
- logger.info('assert skipped at "{}/{}": {}'.format(
- modname, item.name, call.excinfo.value))
+ if call.excinfo.typename != "AssertionError":
+ logger.info(
+ 'assert skipped at "{}/{}": {}'.format(
+ modname, item.name, call.excinfo.value
+ )
+ )
return
# Handle assert failures
parent._previousfailed = item
- logger.error('assert failed at "{}/{}": {}'.format(
- modname, item.name, call.excinfo.value))
+ logger.error(
+ 'assert failed at "{}/{}": {}'.format(modname, item.name, call.excinfo.value)
+ )
# (topogen) Set topology error to avoid advancing in the test.
tgen = get_topogen()
if tgen is not None:
# This will cause topogen to report error on `routers_have_failure`.
- tgen.set_error('{}/{}'.format(modname, item.name))
+ tgen.set_error("{}/{}".format(modname, item.name))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
##
#####################################################
+
class NetworkTopo(Topo):
"EIGRP Topology 1"
tgen = get_topogen(self)
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# On main router
# First switch is for a dummy interface (for local network)
- switch = tgen.add_switch('sw1')
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["r1"])
# Switches for EIGRP
# switch 2 switch is for connection to EIGRP router
- switch = tgen.add_switch('sw2')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
# switch 4 is stub on remote EIGRP router
- switch = tgen.add_switch('sw4')
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["r3"])
# switch 3 is between EIGRP routers
- switch = tgen.add_switch('sw3')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
#####################################################
##
#####################################################
+
def setup_module(module):
"Setup topology"
tgen = Topogen(NetworkTopo, module.__name__)
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_EIGRP,
- os.path.join(CWD, '{}/eigrpd.conf'.format(rname))
+ TopoRouter.RD_EIGRP, os.path.join(CWD, "{}/eigrpd.conf".format(rname))
)
tgen.start_router()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- topotest.sleep(5, 'Waiting for EIGRP convergence')
+ topotest.sleep(5, "Waiting for EIGRP convergence")
def test_eigrp_routes():
router_list = tgen.routers().values()
for router in router_list:
- refTableFile = '{}/{}/show_ip_eigrp.json'.format(CWD, router.name)
+ refTableFile = "{}/{}/show_ip_eigrp.json".format(CWD, router.name)
# Read expected result from file
expected = json.loads(open(refTableFile).read())
assertmsg = '"show ip eigrp topo" mismatches on {}'.format(router.name)
assert topotest.json_cmp(actual, expected) is None, assertmsg
+
def test_zebra_ipv4_routingTable():
"Test 'show ip route'"
failures = 0
router_list = tgen.routers().values()
for router in router_list:
- output = router.vtysh_cmd('show ip route json', isjson=True)
- refTableFile = '{}/{}/show_ip_route.json_ref'.format(CWD, router.name)
+ output = router.vtysh_cmd("show ip route json", isjson=True)
+ refTableFile = "{}/{}/show_ip_route.json_ref".format(CWD, router.name)
expected = json.loads(open(refTableFile).read())
- assertmsg = 'Zebra IPv4 Routing Table verification failed for router {}'.format(router.name)
+ assertmsg = "Zebra IPv4 Routing Table verification failed for router {}".format(
+ router.name
+ )
assert topotest.json_cmp(output, expected) is None, assertmsg
+
def test_shut_interface_and_recover():
"Test shutdown of an interface and recovery of the interface"
tgen = get_topogen()
- router = tgen.gears['r1']
- router.run('ip link set r1-eth1 down')
- topotest.sleep(5, 'Waiting for EIGRP convergence')
- router.run('ip link set r1-eth1 up')
-
+ router = tgen.gears["r1"]
+ router.run("ip link set r1-eth1 down")
+ topotest.sleep(5, "Waiting for EIGRP convergence")
+ router.run("ip link set r1-eth1 up")
def test_shutdown_check_stderr():
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- pytest.skip('Skipping test for Stderr output and memory leaks')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ pytest.skip("Skipping test for Stderr output and memory leaks")
tgen = get_topogen()
# Don't run this test if we have any failure.
for router in router_list:
router.stop()
- log = tgen.net[router.name].getStdErr('eigrpd')
+ log = tgen.net[router.name].getStdErr("eigrpd")
if log:
- logger.error('EIGRPd StdErr Log:' + log)
- log = tgen.net[router.name].getStdErr('zebra')
+ logger.error("EIGRPd StdErr Log:" + log)
+ log = tgen.net[router.name].getStdErr("zebra")
if log:
- logger.error('Zebra StdErr Log:' + log)
+ logger.error("Zebra StdErr Log:" + log)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
}
}
"""
- output = topotest.normalize_text(node.vtysh_cmd('show ip eigrp topo')).splitlines()
+ output = topotest.normalize_text(node.vtysh_cmd("show ip eigrp topo")).splitlines()
result = {}
for idx, line in enumerate(output):
- columns = line.split(' ', 1)
+ columns = line.split(" ", 1)
# Parse the following format into python dicts
# code A.B.C.D/E, X successors, FD is Y, serno: Z
# via FOO, interface-name
code = columns[0]
- if code not in ['P', 'A', 'U', 'Q', 'R', 'r', 's']:
+ if code not in ["P", "A", "U", "Q", "R", "r", "s"]:
continue
if not result.has_key(code):
result[code] = {}
# Split network from the rest
- columns = columns[1].split(',')
+ columns = columns[1].split(",")
# Parse first line data
network = columns[0]
if column == columns[0]:
continue
- match = re.search(r'(\d+) successors', column)
+ match = re.search(r"(\d+) successors", column)
if match is not None:
- result[code][network]['successors'] = match.group(1)
+ result[code][network]["successors"] = match.group(1)
continue
- match = re.search(r'FD is (\d+)', column)
+ match = re.search(r"FD is (\d+)", column)
if match is not None:
- result[code][network]['fd'] = match.group(1)
+ result[code][network]["fd"] = match.group(1)
continue
- match = re.search(r'serno: (\d+)', column)
+ match = re.search(r"serno: (\d+)", column)
if match is not None:
- result[code][network]['serno'] = match.group(1)
+ result[code][network]["serno"] = match.group(1)
continue
# Parse second line data
nextline = output[idx + 1]
- columns = topotest.normalize_text(nextline).split(',')
+ columns = topotest.normalize_text(nextline).split(",")
for column in columns:
- match = re.search(r'via (.+)', column)
+ match = re.search(r"via (.+)", column)
if match is not None:
- result[code][network]['via'] = match.group(1)
+ result[code][network]["via"] = match.group(1)
continue
- match = re.search(r'(.+)', column)
+ match = re.search(r"(.+)", column)
if match is not None:
- result[code][network]['interface'] = match.group(1)
+ result[code][network]["interface"] = match.group(1)
continue
return result
"tableVersion":0,
"outq":0,
"inq":0,
- "prefixReceivedCount":3,
"pfxRcd":3,
"pfxSnt":7,
"state":"Established",
"tableVersion":0,
"outq":0,
"inq":0,
- "prefixReceivedCount":3,
"pfxRcd":3,
"pfxSnt":7,
"state":"Established",
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
##
#####################################################
+
class NetworkTopo(Topo):
"evpn-pim Topology 1"
tgen = get_topogen(self)
- tgen.add_router('spine')
- tgen.add_router('leaf1')
- tgen.add_router('leaf2')
- tgen.add_router('host1')
- tgen.add_router('host2')
+ tgen.add_router("spine")
+ tgen.add_router("leaf1")
+ tgen.add_router("leaf2")
+ tgen.add_router("host1")
+ tgen.add_router("host2")
# On main router
# First switch is for a dummy interface (for local network)
# spine-eth0 is connected to leaf1-eth0
- switch = tgen.add_switch('sw1')
- switch.add_link(tgen.gears['spine'])
- switch.add_link(tgen.gears['leaf1'])
+ switch = tgen.add_switch("sw1")
+ switch.add_link(tgen.gears["spine"])
+ switch.add_link(tgen.gears["leaf1"])
# spine-eth1 is connected to leaf2-eth0
- switch = tgen.add_switch('sw2')
- switch.add_link(tgen.gears['spine'])
- switch.add_link(tgen.gears['leaf2'])
+ switch = tgen.add_switch("sw2")
+ switch.add_link(tgen.gears["spine"])
+ switch.add_link(tgen.gears["leaf2"])
# leaf1-eth1 is connected to host1-eth0
- switch = tgen.add_switch('sw3')
- switch.add_link(tgen.gears['leaf1'])
- switch.add_link(tgen.gears['host1'])
+ switch = tgen.add_switch("sw3")
+ switch.add_link(tgen.gears["leaf1"])
+ switch.add_link(tgen.gears["host1"])
# leaf2-eth1 is connected to host2-eth0
- switch = tgen.add_switch('sw4')
- switch.add_link(tgen.gears['leaf2'])
- switch.add_link(tgen.gears['host2'])
-
+ switch = tgen.add_switch("sw4")
+ switch.add_link(tgen.gears["leaf2"])
+ switch.add_link(tgen.gears["host2"])
#####################################################
##
#####################################################
+
def setup_module(module):
"Setup topology"
tgen = Topogen(NetworkTopo, module.__name__)
tgen.start_topology()
- leaf1 = tgen.gears['leaf1']
- leaf2 = tgen.gears['leaf2']
-
- leaf1.run('brctl addbr brleaf1')
- leaf2.run('brctl addbr brleaf2')
- leaf1.run('ip link set dev brleaf1 up')
- leaf2.run('ip link set dev brleaf2 up')
- leaf1.run('ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf1-eth1 dstport 4789')
- leaf2.run('ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf2-eth1 dstport 4789')
- leaf1.run('brctl addif brleaf1 vxlan0')
- leaf2.run('brctl addif brleaf2 vxlan0')
- leaf1.run('ip link set up dev vxlan0')
- leaf2.run('ip link set up dev vxlan0')
- #tgen.mininet_cli()
+ leaf1 = tgen.gears["leaf1"]
+ leaf2 = tgen.gears["leaf2"]
+
+ leaf1.run("brctl addbr brleaf1")
+ leaf2.run("brctl addbr brleaf2")
+ leaf1.run("ip link set dev brleaf1 up")
+ leaf2.run("ip link set dev brleaf2 up")
+ leaf1.run(
+ "ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf1-eth1 dstport 4789"
+ )
+ leaf2.run(
+ "ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev leaf2-eth1 dstport 4789"
+ )
+ leaf1.run("brctl addif brleaf1 vxlan0")
+ leaf2.run("brctl addif brleaf2 vxlan0")
+ leaf1.run("ip link set up dev vxlan0")
+ leaf2.run("ip link set up dev vxlan0")
+ # tgen.mininet_cli()
# This is a sample of configuration loading.
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_PIM,
- os.path.join(CWD, '{}/pimd.conf'.format(rname))
+ TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname))
)
tgen.start_router()
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
+
def teardown_module(_mod):
"Teardown the pytest environment"
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- spine = tgen.gears['spine']
- json_file = '{}/{}/bgp.summ.json'.format(CWD, spine.name)
+ spine = tgen.gears["spine"]
+ json_file = "{}/{}/bgp.summ.json".format(CWD, spine.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- spine, 'show bgp ipv4 uni summ json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, spine, "show bgp ipv4 uni summ json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=125, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(spine.name)
assert result is None, assertmsg
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
+
def test_multicast_groups_on_rp():
"Ensure the multicast groups show up on the spine"
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- spine = tgen.gears['spine']
- json_file = '{}/{}/join-info.json'.format(CWD, spine.name)
+ spine = tgen.gears["spine"]
+ json_file = "{}/{}/join-info.json".format(CWD, spine.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- spine, 'show ip pim join json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, spine, "show ip pim join json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=30, wait=1)
assertmsg = '"{}" JSON output mismatches'.format(spine.name)
assert result is None, assertmsg
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
+
def test_shutdown_check_stderr():
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- pytest.skip('Skipping test for Stderr output and memory leaks')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ pytest.skip("Skipping test for Stderr output and memory leaks")
tgen = get_topogen()
# Don't run this test if we have any failure.
for router in router_list:
router.stop()
- log = tgen.net[router.name].getStdErr('pimd')
+ log = tgen.net[router.name].getStdErr("pimd")
if log:
- logger.error('PIMd StdErr Log:' + log)
- log = tgen.net[router.name].getStdErr('bgpd')
+ logger.error("PIMd StdErr Log:" + log)
+ log = tgen.net[router.name].getStdErr("bgpd")
if log:
- logger.error('BGPd StdErr Log:' + log)
- log = tgen.net[router.name].getStdErr('zebra')
+ logger.error("BGPd StdErr Log:" + log)
+ log = tgen.net[router.name].getStdErr("zebra")
if log:
- logger.error('Zebra StdErr Log:' + log)
+ logger.error("Zebra StdErr Log:" + log)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
-
fatal_error = ""
+
def setup_module(module):
- print ("setup_module module:%s" % module.__name__)
+ print("setup_module module:%s" % module.__name__)
+
def teardown_module(module):
- print ("teardown_module module:%s" % module.__name__)
+ print("teardown_module module:%s" % module.__name__)
+
def setup_function(function):
- print ("setup_function function:%s" % function.__name__)
+ print("setup_function function:%s" % function.__name__)
+
def teardown_function(function):
- print ("teardown_function function:%s" % function.__name__)
+ print("teardown_function function:%s" % function.__name__)
+
def test_numbers_compare():
a = 12
- print ("Dummy Output")
- assert( a == 12 )
+ print("Dummy Output")
+ assert a == 12
+
def test_fail_example():
assert True, "Some Text with explaination in case of failure"
+
def test_ls_exits_zero():
"Tests for ls command on invalid file"
global fatal_error
proc = subprocess.Popen(
- ["ls", "/some/nonexistant/file"],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
+ ["ls", "/some/nonexistant/file"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
- if (proc.returncode != 0):
+ if proc.returncode != 0:
# Mark this as a fatal error which skips some other tests on failure
fatal_error = "test_fail_example failed"
assert proc.returncode == 0, "Return Code is non-Zero:\n%s" % stderr
+
def test_skipped_on_fatalerror():
global fatal_error
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
assert True, "Some Text with explaination in case of failure"
-if __name__ == '__main__':
+
+if __name__ == "__main__":
retval = pytest.main(["-s"])
sys.exit(retval)
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class TemplateTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
#
# Create 2 routers
for routern in range(1, 3):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# Create a switch with just one router connected to it to simulate a
# empty network.
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
# Create a connection between r1 and r2
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
def setup_module(mod):
"Sets up the pytest environment"
router.load_config(
TopoRouter.RD_ZEBRA,
# Uncomment next line to load configuration from ./router/zebra.conf
- #os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ # os.path.join(CWD, '{}/zebra.conf'.format(rname))
)
# After loading the configurations, this function loads configured daemons.
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
# This function tears down the whole topology.
tgen.stop_topology()
+
def test_call_mininet_cli():
"Dummy test that just calls mininet CLI so we can interact with the build."
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('calling mininet CLI')
+ logger.info("calling mininet CLI")
tgen.mininet_cli()
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
from lib.topogen import Topogen, get_topogen
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, verify_rib
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ verify_rib,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence
-)
+from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/example_topojson_multiple_links.json".format(CWD)
try:
- with open(jsonFile, 'r') as topoJson:
+ with open(jsonFile, "r") as topoJson:
topo = json.load(topoJson)
except IOError:
assert False, "Could not read file {}".format(jsonFile)
# Api call verify whether BGP is converged
bgp_convergence = verify_bgp_convergence(tgen, topo)
- assert bgp_convergence is True, "test_bgp_convergence failed.. \n" \
- " Error: {}".format(bgp_convergence)
+ assert (
+ bgp_convergence is True
+ ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence)
logger.info("BGP is converged successfully \n")
write_test_footer(tc_name)
tgen = get_topogen()
if bgp_convergence is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
# Static routes are created as part of initial configuration,
# verifying RIB
- dut = 'r3'
- protocol = 'bgp'
- next_hop = '10.0.0.1'
+ dut = "r3"
+ protocol = "bgp"
+ next_hop = "10.0.0.1"
input_dict = {"r1": topo["routers"]["r1"]}
# Uncomment below to debug
# tgen.mininet_cli()
- result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
-from lib.topogen import Topogen, get_topogen
+from lib.topogen import Topogen, get_topogen
# Required to instantiate the topology builder class.
from mininet.topo import Topo
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, verify_rib
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ verify_rib,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence
-)
+from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/example_topojson.json".format(CWD)
try:
- with open(jsonFile, 'r') as topoJson:
+ with open(jsonFile, "r") as topoJson:
topo = json.load(topoJson)
except IOError:
assert False, "Could not read file {}".format(jsonFile)
bgp_convergence = False
input_dict = {}
+
class TemplateTopo(Topo):
"""
Test topology builder
# Building topology from json file
build_topo_from_json(tgen, topo)
+
def setup_module(mod):
"""
Sets up the pytest environment
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
- logger.info("="*40)
+ logger.info("=" * 40)
logger.info("Running setup_module to create topology")
logger.info("Running setup_module() done")
+
def teardown_module(mod):
"""
Teardown the pytest environment
# Api call verify whether BGP is converged
bgp_convergence = verify_bgp_convergence(tgen, topo)
- assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\
- " Error: {}".format(bgp_convergence)
+ assert (
+ bgp_convergence is True
+ ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence)
logger.info("BGP is converged successfully \n")
write_test_footer(tc_name)
tgen = get_topogen()
if bgp_convergence is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
# Static routes are created as part of initial configuration,
# verifying RIB
- dut = 'r3'
- next_hop = '10.0.0.1'
+ dut = "r3"
+ next_hop = "10.0.0.1"
input_dict = {"r1": topo["routers"]["r1"]}
# Uncomment below to debug
# tgen.mininet_cli()
- result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../"))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
-from lib.topogen import Topogen, get_topogen
+from lib.topogen import Topogen, get_topogen
# Required to instantiate the topology builder class.
from mininet.topo import Topo
# Import topoJson from lib, to create topology and initial configuration
from lib.common_config import (
- start_topology, write_test_header,
- write_test_footer, verify_rib
+ start_topology,
+ write_test_header,
+ write_test_footer,
+ verify_rib,
)
from lib.topolog import logger
-from lib.bgp import (
- verify_bgp_convergence
-)
+from lib.bgp import verify_bgp_convergence
from lib.topojson import build_topo_from_json, build_config_from_json
# Reading the data from JSON File for topology and configuration creation
jsonFile = "{}/example_topojson.json".format(CWD)
try:
- with open(jsonFile, 'r') as topoJson:
+ with open(jsonFile, "r") as topoJson:
topo = json.load(topoJson)
except IOError:
assert False, "Could not read file {}".format(jsonFile)
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
- logger.info("="*40)
+ logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# Api call verify whether BGP is converged
bgp_convergence = verify_bgp_convergence(tgen, topo)
- assert bgp_convergence is True, "test_bgp_convergence failed.. \n"\
- " Error: {}".format(bgp_convergence)
+ assert (
+ bgp_convergence is True
+ ), "test_bgp_convergence failed.. \n" " Error: {}".format(bgp_convergence)
logger.info("BGP is converged successfully \n")
write_test_footer(tc_name)
tgen = get_topogen()
if bgp_convergence is not True:
- pytest.skip('skipped because of BGP Convergence failure')
+ pytest.skip("skipped because of BGP Convergence failure")
# test case name
tc_name = request.node.name
# Static routes are created as part of initial configuration,
# verifying RIB
- dut = 'r3'
- next_hop = ['10.0.0.1', '10.0.0.5']
+ dut = "r3"
+ next_hop = ["10.0.0.1", "10.0.0.5"]
input_dict = {
"r1": {
"static_routes": [
"network": "100.0.20.1/32",
"no_of_ip": 9,
"admin_distance": 100,
- "next_hop": "10.0.0.1"
+ "next_hop": "10.0.0.1",
}
]
}
}
# Uncomment below to debug
# tgen.mininet_cli()
- result = verify_rib(tgen, 'ipv4', dut, input_dict, next_hop=next_hop)
- assert result is True, "Testcase {} :Failed \n Error: {}". \
- format(tc_name, result)
+ result = verify_rib(tgen, "ipv4", dut, input_dict, next_hop=next_hop)
+ assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
import time
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
class ISISTopo1(Topo):
"Simple two layer ISIS topology"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# \ /
# r5
for routern in range(1, 6):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# r1 <- sw1 -> r3
- sw = tgen.add_switch('sw1')
- sw.add_link(tgen.gears['r1'])
- sw.add_link(tgen.gears['r3'])
+ sw = tgen.add_switch("sw1")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r3"])
# r2 <- sw2 -> r4
- sw = tgen.add_switch('sw2')
- sw.add_link(tgen.gears['r2'])
- sw.add_link(tgen.gears['r4'])
+ sw = tgen.add_switch("sw2")
+ sw.add_link(tgen.gears["r2"])
+ sw.add_link(tgen.gears["r4"])
# r3 <- sw3 -> r5
- sw = tgen.add_switch('sw3')
- sw.add_link(tgen.gears['r3'])
- sw.add_link(tgen.gears['r5'])
+ sw = tgen.add_switch("sw3")
+ sw.add_link(tgen.gears["r3"])
+ sw.add_link(tgen.gears["r5"])
# r4 <- sw4 -> r5
- sw = tgen.add_switch('sw4')
- sw.add_link(tgen.gears['r4'])
- sw.add_link(tgen.gears['r5'])
+ sw = tgen.add_switch("sw4")
+ sw.add_link(tgen.gears["r4"])
+ sw.add_link(tgen.gears["r5"])
def setup_module(mod):
# For all registered routers, load the zebra configuration file
for rname, router in tgen.routers().iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_ISIS,
- os.path.join(CWD, '{}/isisd.conf'.format(rname))
+ TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname))
)
# After loading the configurations, this function loads configured daemons.
has_version_20 = False
for router in tgen.routers().values():
- if router.has_version('<', '3'):
+ if router.has_version("<", "3"):
has_version_20 = True
if has_version_20:
- logger.info('Skipping ISIS tests for FRR 2.0')
- tgen.set_error('ISIS has convergence problems with IPv6')
+ logger.info("Skipping ISIS tests for FRR 2.0")
+ tgen.set_error("ISIS has convergence problems with IPv6")
def teardown_module(mod):
# )
for rname, router in tgen.routers().iteritems():
- filename = '{0}/{1}/{1}_topology.json'.format(CWD, rname)
+ filename = "{0}/{1}/{1}_topology.json".format(CWD, rname)
expected = json.loads(open(filename).read())
def compare_isis_topology(router, expected):
return topotest.json_cmp(actual, expected)
test_func = functools.partial(compare_isis_topology, router, expected)
- (result, diff) = topotest.run_and_expect(test_func, None,
- wait=0.5, count=120)
- assert result, 'ISIS did not converge on {}:\n{}'.format(rname, diff)
+ (result, diff) = topotest.run_and_expect(test_func, None, wait=0.5, count=120)
+ assert result, "ISIS did not converge on {}:\n{}".format(rname, diff)
def test_isis_route_installation():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('Checking routers for installed ISIS routes')
+ logger.info("Checking routers for installed ISIS routes")
# Check for routes in 'show ip route json'
for rname, router in tgen.routers().iteritems():
- filename = '{0}/{1}/{1}_route.json'.format(CWD, rname)
- expected = json.loads(open(filename, 'r').read())
- actual = router.vtysh_cmd('show ip route json', isjson=True)
+ filename = "{0}/{1}/{1}_route.json".format(CWD, rname)
+ expected = json.loads(open(filename, "r").read())
+ actual = router.vtysh_cmd("show ip route json", isjson=True)
# Older FRR versions don't list interfaces in some ISIS routes
- if router.has_version('<', '3.1'):
+ if router.has_version("<", "3.1"):
for network, routes in expected.iteritems():
for route in routes:
- if route['protocol'] != 'isis':
+ if route["protocol"] != "isis":
continue
- for nexthop in route['nexthops']:
- nexthop.pop('interfaceIndex', None)
- nexthop.pop('interfaceName', None)
+ for nexthop in route["nexthops"]:
+ nexthop.pop("interfaceIndex", None)
+ nexthop.pop("interfaceName", None)
assertmsg = "Router '{}' routes mismatch".format(rname)
assert topotest.json_cmp(actual, expected) is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('Checking routers for installed ISIS routes in OS')
+ logger.info("Checking routers for installed ISIS routes in OS")
# Check for routes in `ip route`
for rname, router in tgen.routers().iteritems():
- filename = '{0}/{1}/{1}_route_linux.json'.format(CWD, rname)
- expected = json.loads(open(filename, 'r').read())
+ filename = "{0}/{1}/{1}_route_linux.json".format(CWD, rname)
+ expected = json.loads(open(filename, "r").read())
actual = topotest.ip4_route(router)
# Older FRR versions install routes using different proto
- if router.has_version('<', '3.1'):
+ if router.has_version("<", "3.1"):
for network, netoptions in expected.iteritems():
- if 'proto' in netoptions and netoptions['proto'] == '187':
- netoptions['proto'] = 'zebra'
+ if "proto" in netoptions and netoptions["proto"] == "187":
+ netoptions["proto"] = "zebra"
assertmsg = "Router '{}' OS routes mismatch".format(rname)
assert topotest.json_cmp(actual, expected) is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('Checking routers for installed ISIS IPv6 routes')
+ logger.info("Checking routers for installed ISIS IPv6 routes")
# Check for routes in 'show ip route json'
for rname, router in tgen.routers().iteritems():
- filename = '{0}/{1}/{1}_route6.json'.format(CWD, rname)
- expected = json.loads(open(filename, 'r').read())
- actual = router.vtysh_cmd('show ipv6 route json', isjson=True)
+ filename = "{0}/{1}/{1}_route6.json".format(CWD, rname)
+ expected = json.loads(open(filename, "r").read())
+ actual = router.vtysh_cmd("show ipv6 route json", isjson=True)
# Older FRR versions don't list interfaces in some ISIS routes
- if router.has_version('<', '3.1'):
+ if router.has_version("<", "3.1"):
for network, routes in expected.iteritems():
for route in routes:
# Older versions display different metrics for IPv6 routes
- route.pop('metric', None)
+ route.pop("metric", None)
- if route['protocol'] != 'isis':
+ if route["protocol"] != "isis":
continue
- for nexthop in route['nexthops']:
- nexthop.pop('interfaceIndex', None)
- nexthop.pop('interfaceName', None)
+ for nexthop in route["nexthops"]:
+ nexthop.pop("interfaceIndex", None)
+ nexthop.pop("interfaceName", None)
assertmsg = "Router '{}' routes mismatch".format(rname)
assert topotest.json_cmp(actual, expected) is None, assertmsg
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('Checking routers for installed ISIS IPv6 routes in OS')
+ logger.info("Checking routers for installed ISIS IPv6 routes in OS")
# Check for routes in `ip route`
for rname, router in tgen.routers().iteritems():
- filename = '{0}/{1}/{1}_route6_linux.json'.format(CWD, rname)
- expected = json.loads(open(filename, 'r').read())
+ filename = "{0}/{1}/{1}_route6_linux.json".format(CWD, rname)
+ expected = json.loads(open(filename, "r").read())
actual = topotest.ip6_route(router)
# Older FRR versions install routes using different proto
- if router.has_version('<', '3.1'):
+ if router.has_version("<", "3.1"):
for network, netoptions in expected.iteritems():
- if 'proto' in netoptions and netoptions['proto'] == '187':
- netoptions['proto'] = 'zebra'
+ if "proto" in netoptions and netoptions["proto"] == "187":
+ netoptions["proto"] = "zebra"
assertmsg = "Router '{}' OS routes mismatch".format(rname)
assert topotest.json_cmp(actual, expected) is None, assertmsg
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
https://gist.github.com/angstwad/bf22d1822c38a92ec0a9
"""
for k, v in merge_dct.iteritems():
- if (k in dct and isinstance(dct[k], dict)
- and isinstance(merge_dct[k], collections.Mapping)):
+ if (
+ k in dct
+ and isinstance(dct[k], dict)
+ and isinstance(merge_dct[k], collections.Mapping)
+ ):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
if area_match:
area = area_match.group(1)
if area not in areas:
- areas[area] = {
- level: {
- 'ipv4': [],
- 'ipv6': []
- }
- }
+ areas[area] = {level: {"ipv4": [], "ipv6": []}}
ipv = None
continue
elif area is None:
continue
if re.match(r"IS\-IS paths to level-. routers that speak IPv6", line):
- ipv = 'ipv6'
+ ipv = "ipv6"
continue
if re.match(r"IS\-IS paths to level-. routers that speak IP", line):
- ipv = 'ipv4'
+ ipv = "ipv4"
continue
- item_match = re.match(
- r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line)
+ item_match = re.match(r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line)
if item_match is not None:
# Skip header
- if (item_match.group(1) == 'Vertex' and
- item_match.group(2) == 'Type' and
- item_match.group(3) == 'Metric' and
- item_match.group(4) == 'Next-Hop' and
- item_match.group(5) == 'Interface' and
- item_match.group(6) == 'Parent'):
+ if (
+ item_match.group(1) == "Vertex"
+ and item_match.group(2) == "Type"
+ and item_match.group(3) == "Metric"
+ and item_match.group(4) == "Next-Hop"
+ and item_match.group(5) == "Interface"
+ and item_match.group(6) == "Parent"
+ ):
continue
- areas[area][level][ipv].append({
- 'vertex': item_match.group(1),
- 'type': item_match.group(2),
- 'metric': item_match.group(3),
- 'next-hop': item_match.group(4),
- 'interface': item_match.group(5),
- 'parent': item_match.group(6),
- })
+ areas[area][level][ipv].append(
+ {
+ "vertex": item_match.group(1),
+ "type": item_match.group(2),
+ "metric": item_match.group(3),
+ "next-hop": item_match.group(4),
+ "interface": item_match.group(5),
+ "parent": item_match.group(6),
+ }
+ )
continue
item_match = re.match(r"([^ ]+) ([^ ]+) ([^ ]+) ([^ ]+)", line)
if item_match is not None:
- areas[area][level][ipv].append({
- 'vertex': item_match.group(1),
- 'type': item_match.group(2),
- 'metric': item_match.group(3),
- 'parent': item_match.group(4),
- })
+ areas[area][level][ipv].append(
+ {
+ "vertex": item_match.group(1),
+ "type": item_match.group(2),
+ "metric": item_match.group(3),
+ "parent": item_match.group(4),
+ }
+ )
continue
item_match = re.match(r"([^ ]+)", line)
if item_match is not None:
- areas[area][level][ipv].append({'vertex': item_match.group(1)})
+ areas[area][level][ipv].append({"vertex": item_match.group(1)})
continue
return areas
}
"""
l1out = topotest.normalize_text(
- router.vtysh_cmd('show isis topology level-1')
+ router.vtysh_cmd("show isis topology level-1")
).splitlines()
l2out = topotest.normalize_text(
- router.vtysh_cmd('show isis topology level-2')
+ router.vtysh_cmd("show isis topology level-2")
).splitlines()
- l1 = parse_topology(l1out, 'level-1')
- l2 = parse_topology(l2out, 'level-2')
+ l1 = parse_topology(l1out, "level-1")
+ l2 = parse_topology(l2out, "level-2")
dict_merge(l1, l2)
return l1
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class TemplateTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
#
# Define FRR Routers
#
- for router in ['r1', 'r2', 'r3', 'r4']:
+ for router in ["r1", "r2", "r3", "r4"]:
tgen.add_router(router)
#
# Define connections
#
- switch = tgen.add_switch('s0')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s0")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
"Sets up the pytest environment"
# For all registered routers, load the zebra configuration file
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
# Don't start ospfd and ldpd in the CE nodes
- if router.name[0] == 'r':
+ if router.name[0] == "r":
router.load_config(
- TopoRouter.RD_OSPF,
- os.path.join(CWD, '{}/ospfd.conf'.format(rname))
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_LDP,
- os.path.join(CWD, '{}/ldpd.conf'.format(rname))
+ TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
logger.info('Comparing router "%s" "%s" output', rname, command)
tgen = get_topogen()
- filename = '{}/{}/{}'.format(CWD, rname, reference)
+ filename = "{}/{}/{}".format(CWD, rname, reference)
expected = json.loads(open(filename).read())
# Run test function until we get an result. Wait at most 80 seconds.
- test_func = partial(topotest.router_json_cmp,
- tgen.gears[rname], command, expected)
+ test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
_, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
+
def test_ospf_convergence():
logger.info("Test: check OSPF adjacencies")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json"
+ )
+
def test_rib():
logger.info("Test: verify RIB")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
+ for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(rname, "show ip route json", "show_ip_route.ref")
+
def test_ldp_adjacencies():
logger.info("Test: verify LDP adjacencies")
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show mpls ldp discovery json", "show_ldp_discovery.ref")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show mpls ldp discovery json", "show_ldp_discovery.ref"
+ )
+
def test_ldp_neighbors():
logger.info("Test: verify LDP neighbors")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref"
+ )
+
def test_ldp_bindings():
logger.info("Test: verify LDP bindings")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_binding.ref")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show mpls ldp binding json", "show_ldp_binding.ref"
+ )
+
def test_ldp_bindings_all_routes():
logger.info("Test: verify LDP bindings after host filter removed")
pytest.skip(tgen.errors)
# remove ACL that blocks advertising everything but host routes */
- cmd = 'vtysh -c \"configure terminal\" -c \"mpls ldp\" -c \"address-family ipv4\" -c \"no label local allocate host-routes\"'
- tgen.net['r1'].cmd(cmd)
+ cmd = 'vtysh -c "configure terminal" -c "mpls ldp" -c "address-family ipv4" -c "no label local allocate host-routes"'
+ tgen.net["r1"].cmd(cmd)
sleep(2)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_all_binding.ref")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show mpls ldp binding json", "show_ldp_all_binding.ref"
+ )
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class TemplateTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
#
# Define FRR Routers
#
- for router in ['r1', 'r2', 'r3', 'r4']:
+ for router in ["r1", "r2", "r3", "r4"]:
tgen.add_router(router)
#
# Define connections
#
- switch = tgen.add_switch('s0')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s0")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
"Sets up the pytest environment"
# For all registered routers, load the zebra configuration file
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
# Don't start ospfd and ldpd in the CE nodes
- if router.name[0] == 'r':
+ if router.name[0] == "r":
router.load_config(
- TopoRouter.RD_OSPF,
- os.path.join(CWD, '{}/ospfd.conf'.format(rname))
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_LDP,
- os.path.join(CWD, '{}/ldpd.conf'.format(rname))
+ TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
logger.info('Comparing router "%s" "%s" output', rname, command)
tgen = get_topogen()
- filename = '{}/{}/{}'.format(CWD, rname, reference)
+ filename = "{}/{}/{}".format(CWD, rname, reference)
expected = json.loads(open(filename).read())
# Run test function until we get an result. Wait at most 80 seconds.
- test_func = partial(topotest.router_json_cmp,
- tgen.gears[rname], command, expected)
+ test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
_, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
+
def test_ospf_convergence():
logger.info("Test: check OSPF adjacencies")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json"
+ )
+
def test_rib():
logger.info("Test: verify RIB")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
+ for rname in ["r1", "r2", "r3", "r4"]:
router_compare_json_output(rname, "show ip route json", "show_ip_route.ref")
+
def test_ldp_adjacencies():
logger.info("Test: verify LDP adjacencies")
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show mpls ldp discovery json", "show_ldp_discovery.ref")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show mpls ldp discovery json", "show_ldp_discovery.ref"
+ )
+
def test_ldp_neighbors():
logger.info("Test: verify LDP neighbors")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref"
+ )
+
def test_ldp_bindings():
logger.info("Test: verify LDP bindings")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3', 'r4']:
- router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_binding.ref")
+ for rname in ["r1", "r2", "r3", "r4"]:
+ router_compare_json_output(
+ rname, "show mpls ldp binding json", "show_ldp_binding.ref"
+ )
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
-O 1.1.1.1/32 [110/0] is directly connected, lo
-O>* 2.2.2.2/32 [110/10] via 10.0.1.2, r1-eth0, label implicit-null
-O>* 3.3.3.3/32 [110/20] via 10.0.1.2, r1-eth0, label xxx
-O>* 4.4.4.4/32 [110/20] via 10.0.1.2, r1-eth0, label xxx
-O 10.0.1.0/24 [110/10] is directly connected, r1-eth0
-O>* 10.0.2.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null
-O>* 10.0.3.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null
+O 1.1.1.1/32 [110/0] is directly connected, lo, weight 1
+O>* 2.2.2.2/32 [110/10] via 10.0.1.2, r1-eth0, label implicit-null, weight 1
+O>* 3.3.3.3/32 [110/20] via 10.0.1.2, r1-eth0, label xxx, weight 1
+O>* 4.4.4.4/32 [110/20] via 10.0.1.2, r1-eth0, label xxx, weight 1
+O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1
+O>* 10.0.2.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null, weight 1
+O>* 10.0.3.0/24 [110/20] via 10.0.1.2, r1-eth0, label implicit-null, weight 1
-O>* 1.1.1.1/32 [110/10] via 10.0.1.1, r2-eth0, label implicit-null
-O 2.2.2.2/32 [110/0] is directly connected, lo
-O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r2-eth1, label implicit-null
-O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r2-eth1, label implicit-null
-O 10.0.1.0/24 [110/10] is directly connected, r2-eth0
-O 10.0.2.0/24 [110/10] is directly connected, r2-eth1
-O 10.0.3.0/24 [110/10] is directly connected, r2-eth2
+O>* 1.1.1.1/32 [110/10] via 10.0.1.1, r2-eth0, label implicit-null, weight 1
+O 2.2.2.2/32 [110/0] is directly connected, lo, weight 1
+O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r2-eth1, label implicit-null, weight 1
+O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r2-eth1, label implicit-null, weight 1
+O 10.0.1.0/24 [110/10] is directly connected, r2-eth0, weight 1
+O 10.0.2.0/24 [110/10] is directly connected, r2-eth1, weight 1
+O 10.0.3.0/24 [110/10] is directly connected, r2-eth2, weight 1
-O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r3-eth0, label xxx
-O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r3-eth0, label implicit-null
-O 3.3.3.3/32 [110/0] is directly connected, lo
-O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r3-eth0, label implicit-null
-O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r3-eth0, label implicit-null
-O 10.0.2.0/24 [110/10] is directly connected, r3-eth0
-O 10.0.3.0/24 [110/10] is directly connected, r3-eth1
+O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r3-eth0, label xxx, weight 1
+O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r3-eth0, label implicit-null, weight 1
+O 3.3.3.3/32 [110/0] is directly connected, lo, weight 1
+O>* 4.4.4.4/32 [110/10] via 10.0.2.4, r3-eth0, label implicit-null, weight 1
+O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r3-eth0, label implicit-null, weight 1
+O 10.0.2.0/24 [110/10] is directly connected, r3-eth0, weight 1
+O 10.0.3.0/24 [110/10] is directly connected, r3-eth1, weight 1
-O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r4-eth0, label xxx
-O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r4-eth0, label implicit-null
-O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r4-eth0, label implicit-null
-O 4.4.4.4/32 [110/0] is directly connected, lo
-O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null
-O 10.0.2.0/24 [110/10] is directly connected, r4-eth0
-O>* 10.0.3.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null
+O>* 1.1.1.1/32 [110/20] via 10.0.2.2, r4-eth0, label xxx, weight 1
+O>* 2.2.2.2/32 [110/10] via 10.0.2.2, r4-eth0, label implicit-null, weight 1
+O>* 3.3.3.3/32 [110/10] via 10.0.2.3, r4-eth0, label implicit-null, weight 1
+O 4.4.4.4/32 [110/0] is directly connected, lo, weight 1
+O>* 10.0.1.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null, weight 1
+O 10.0.2.0/24 [110/10] is directly connected, r4-eth0, weight 1
+O>* 10.0.3.0/24 [110/20] via 10.0.2.2, r4-eth0, label implicit-null, weight 1
| r3 | | r4 |
| 3.3.3.3 | | 4.4.4.4 |
+-----------+ +---------+
-"""
+"""
import os
import re
##
#####################################################
+
class NetworkTopo(Topo):
"LDP Test Topology 1"
# Setup Routers
router = {}
for i in range(1, 5):
- router[i] = topotest.addRouter(self, 'r%s' % i)
+ router[i] = topotest.addRouter(self, "r%s" % i)
# Setup Switches, add Interfaces and Connections
switch = {}
# First switch
- switch[0] = self.addSwitch('sw0', cls=topotest.LegacySwitch)
- self.addLink(switch[0], router[1], intfName2='r1-eth0', addr1='80:AA:00:00:00:00', addr2='00:11:00:01:00:00')
- self.addLink(switch[0], router[2], intfName2='r2-eth0', addr1='80:AA:00:00:00:01', addr2='00:11:00:02:00:00')
+ switch[0] = self.addSwitch("sw0", cls=topotest.LegacySwitch)
+ self.addLink(
+ switch[0],
+ router[1],
+ intfName2="r1-eth0",
+ addr1="80:AA:00:00:00:00",
+ addr2="00:11:00:01:00:00",
+ )
+ self.addLink(
+ switch[0],
+ router[2],
+ intfName2="r2-eth0",
+ addr1="80:AA:00:00:00:01",
+ addr2="00:11:00:02:00:00",
+ )
# Second switch
- switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch)
- self.addLink(switch[1], router[2], intfName2='r2-eth1', addr1='80:AA:00:01:00:00', addr2='00:11:00:02:00:01')
- self.addLink(switch[1], router[3], intfName2='r3-eth0', addr1='80:AA:00:01:00:01', addr2='00:11:00:03:00:00')
- self.addLink(switch[1], router[4], intfName2='r4-eth0', addr1='80:AA:00:01:00:02', addr2='00:11:00:04:00:00')
+ switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
+ self.addLink(
+ switch[1],
+ router[2],
+ intfName2="r2-eth1",
+ addr1="80:AA:00:01:00:00",
+ addr2="00:11:00:02:00:01",
+ )
+ self.addLink(
+ switch[1],
+ router[3],
+ intfName2="r3-eth0",
+ addr1="80:AA:00:01:00:01",
+ addr2="00:11:00:03:00:00",
+ )
+ self.addLink(
+ switch[1],
+ router[4],
+ intfName2="r4-eth0",
+ addr1="80:AA:00:01:00:02",
+ addr2="00:11:00:04:00:00",
+ )
# Third switch
- switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch)
- self.addLink(switch[2], router[2], intfName2='r2-eth2', addr1='80:AA:00:02:00:00', addr2='00:11:00:02:00:02')
- self.addLink(switch[2], router[3], intfName2='r3-eth1', addr1='80:AA:00:02:00:01', addr2='00:11:00:03:00:01')
+ switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch)
+ self.addLink(
+ switch[2],
+ router[2],
+ intfName2="r2-eth2",
+ addr1="80:AA:00:02:00:00",
+ addr2="00:11:00:02:00:02",
+ )
+ self.addLink(
+ switch[2],
+ router[3],
+ intfName2="r3-eth1",
+ addr1="80:AA:00:02:00:01",
+ addr2="00:11:00:03:00:01",
+ )
#####################################################
##
#####################################################
+
def setup_module(module):
global topo, net
global fatal_error
print("******************************************\n")
print("Cleanup old Mininet runs")
- os.system('sudo mn -c > /dev/null 2>&1')
+ os.system("sudo mn -c > /dev/null 2>&1")
thisDir = os.path.dirname(os.path.realpath(__file__))
topo = NetworkTopo()
# Starting Routers
for i in range(1, 5):
- net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i))
- net['r%s' % i].loadConf('ospfd', '%s/r%s/ospfd.conf' % (thisDir, i))
- net['r%s' % i].loadConf('ldpd', '%s/r%s/ldpd.conf' % (thisDir, i))
- fatal_error = net['r%s' % i].startRouter()
+ net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
+ net["r%s" % i].loadConf("ospfd", "%s/r%s/ospfd.conf" % (thisDir, i))
+ net["r%s" % i].loadConf("ldpd", "%s/r%s/ldpd.conf" % (thisDir, i))
+ fatal_error = net["r%s" % i].startRouter()
if fatal_error != "":
break
# For debugging after starting FRR/Quagga daemons, uncomment the next line
# CLI(net)
+
def teardown_module(module):
global net
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
print("\n\n** Check if FRR/Quagga is running on each Router node")
# Starting Routers
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
# CLI(net)
+
def test_mpls_interfaces():
global fatal_error
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_interface.ref'
+ refTableFile = "%s/r%s/show_mpls_ldp_interface.ref"
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp interface" 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show mpls ldp interface" 2> /dev/null')
+ .rstrip()
+ )
# Mask out Timer in Uptime
actual = re.sub(r" [0-9][0-9]:[0-9][0-9]:[0-9][0-9] ", " xx:xx:xx ", actual)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual MPLS LDP interface status",
- title2="expected MPLS LDP interface status")
+ title2="expected MPLS LDP interface status",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed MPLS LDP Interface status Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed MPLS LDP Interface status Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- if failures>0:
+ if failures > 0:
fatal_error = "MPLS LDP Interface status failed"
- assert failures == 0, "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff)
+ assert (
+ failures == 0
+ ), "MPLS LDP Interface status failed for router r%s:\n%s" % (i, diff)
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
# Wait for MPLS LDP neighbors to establish.
sys.stdout.flush()
# Look for any node not yet converged
for i in range(1, 5):
- established = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip()
+ established = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null')
+ .rstrip()
+ )
# On current version, we need to make sure they all turn to OPERATIONAL on all lines
#
- lines = ('\n'.join(established.splitlines()) + '\n').splitlines(1)
+ lines = ("\n".join(established.splitlines()) + "\n").splitlines(1)
# Check all lines to be either table header (starting with ^AF or show OPERATIONAL)
- header = r'^AF.*'
- operational = r'^ip.*OPERATIONAL.*'
+ header = r"^AF.*"
+ operational = r"^ip.*OPERATIONAL.*"
found_operational = 0
for j in range(1, len(lines)):
- if (not re.search(header, lines[j])) and (not re.search(operational, lines[j])):
+ if (not re.search(header, lines[j])) and (
+ not re.search(operational, lines[j])
+ ):
established = "" # Empty string shows NOT established
if re.search(operational, lines[j]):
found_operational += 1
# Need at least one operational neighbor
established = "" # Empty string shows NOT established
if not established:
- print('Waiting for r%s' %i)
+ print("Waiting for r%s" % i)
sys.stdout.flush()
break
if not established:
sleep(5)
timeout -= 5
else:
- print('Done')
+ print("Done")
break
else:
# Bail out with error if a router fails to converge
# Only wait if we actually went through a convergence
print("\nwaiting 15s for LDP sessions to establish")
sleep(15)
-
+
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_discovery.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_mpls_ldp_discovery.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null')
+ .rstrip()
+ )
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show mpls ldp discovery" 2> /dev/null')
+ .rstrip()
+ )
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual MPLS LDP discovery output",
- title2="expected MPLS LDP discovery output")
+ title2="expected MPLS LDP discovery output",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed MPLS LDP discovery output Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed MPLS LDP discovery output Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "MPLS LDP Interface discovery output for router r%s:\n%s" % (i, diff)
+ assert (
+ failures == 0
+ ), "MPLS LDP Interface discovery output for router r%s:\n%s" % (i, diff)
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_neighbor.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_mpls_ldp_neighbor.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show mpls ldp neighbor" 2> /dev/null')
+ .rstrip()
+ )
# Mask out changing parts in output
# Mask out Timer in Uptime
- actual = re.sub(r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]", r"\1xx:xx:xx", actual)
+ actual = re.sub(
+ r"(ipv4 [0-9\.]+ +OPERATIONAL [0-9\.]+ +)[0-9][0-9]:[0-9][0-9]:[0-9][0-9]",
+ r"\1xx:xx:xx",
+ actual,
+ )
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual MPLS LDP neighbor output",
- title2="expected MPLS LDP neighbor output")
+ title2="expected MPLS LDP neighbor output",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed MPLS LDP neighbor output Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed MPLS LDP neighbor output Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "MPLS LDP Interface neighbor output for router r%s:\n%s" % (i, diff)
+ assert (
+ failures == 0
+ ), "MPLS LDP Interface neighbor output for router r%s:\n%s" % (i, diff)
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
- #CLI(net)
+ # CLI(net)
def test_mpls_ldp_binding():
# pytest.skip("Skipping test_mpls_ldp_binding")
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_ldp_binding.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_mpls_ldp_binding.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show mpls ldp binding" 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show mpls ldp binding" 2> /dev/null')
+ .rstrip()
+ )
# Mask out changing parts in output
# Mask out label
- actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual)
- actual = re.sub(r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual)
+ actual = re.sub(
+ r"(ipv4 [0-9\./]+ +[0-9\.]+ +)[0-9][0-9] (.*)", r"\1xxx\2", actual
+ )
+ actual = re.sub(
+ r"(ipv4 [0-9\./]+ +[0-9\.]+ +[a-z\-]+ +)[0-9][0-9] (.*)",
+ r"\1xxx\2",
+ actual,
+ )
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Sort lines which start with "xx via inet "
- pattern = r'^\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+'
+ pattern = r"^\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+"
swapped = True
while swapped:
swapped = False
for j in range(1, len(actual)):
- if re.search(pattern, actual[j]) and re.search(pattern, actual[j-1]):
- if actual[j-1] > actual[j]:
- temp = actual[j-1]
- actual[j-1] = actual[j]
+ if re.search(pattern, actual[j]) and re.search(
+ pattern, actual[j - 1]
+ ):
+ if actual[j - 1] > actual[j]:
+ temp = actual[j - 1]
+ actual[j - 1] = actual[j]
actual[j] = temp
swapped = True
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual MPLS LDP binding output",
- title2="expected MPLS LDP binding output")
+ title2="expected MPLS LDP binding output",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed MPLS LDP binding output Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed MPLS LDP binding output Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "MPLS LDP Interface binding output for router r%s:\n%s" % (i, diff)
+ assert (
+ failures == 0
+ ), "MPLS LDP Interface binding output for router r%s:\n%s" % (i, diff)
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
- #CLI(net)
+ # CLI(net)
def test_zebra_ipv4_routingTable():
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_ipv4_route.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_ipv4_route.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ip route" 2> /dev/null | grep "^O"').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^O"')
+ .rstrip()
+ )
# Drop timers on end of line (older Quagga Versions)
actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual)
# and translating remaining implicit (single-digit) labels to label implicit-null
actual = re.sub(r" label [0-9]+", " label implicit-null", actual)
# Check if we have implicit labels - if not, then remove them from reference
- if (not re.search(r" label implicit-null", actual)):
+ if not re.search(r" label implicit-null", actual):
expected = re.sub(r", label implicit-null", "", expected)
# now fix newlines of expected (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual IPv4 zebra routing table",
- title2="expected IPv4 zebra routing table")
+ title2="expected IPv4 zebra routing table",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed IPv4 Zebra Routing Table Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed IPv4 Zebra Routing Table Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "IPv4 Zebra Routing Table verification failed for router r%s:\n%s" % (i, diff)
+ assert failures == 0, (
+ "IPv4 Zebra Routing Table verification failed for router r%s:\n%s"
+ % (i, diff)
+ )
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/show_mpls_table.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_mpls_table.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show mpls table" 2> /dev/null')
+ actual = net["r%s" % i].cmd('vtysh -c "show mpls table" 2> /dev/null')
# Fix inconsistent Label numbers at beginning of line
actual = re.sub(r"(\s+)[0-9]+(\s+LDP)", r"\1XX\2", actual)
# Fix inconsistent Label numbers at end of line
- actual = re.sub(r"(\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+)[0-9][0-9]", r"\1XX", actual)
+ actual = re.sub(
+ r"(\s+[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+)[0-9][0-9]", r"\1XX", actual
+ )
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Sort lines which start with " XX LDP"
- pattern = r'^\s+[0-9X]+\s+LDP'
+ pattern = r"^\s+[0-9X]+\s+LDP"
swapped = True
while swapped:
swapped = False
for j in range(1, len(actual)):
- if re.search(pattern, actual[j]) and re.search(pattern, actual[j-1]):
- if actual[j-1] > actual[j]:
- temp = actual[j-1]
- actual[j-1] = actual[j]
+ if re.search(pattern, actual[j]) and re.search(
+ pattern, actual[j - 1]
+ ):
+ if actual[j - 1] > actual[j]:
+ temp = actual[j - 1]
+ actual[j - 1] = actual[j]
actual[j] = temp
swapped = True
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual MPLS table output",
- title2="expected MPLS table output")
+ title2="expected MPLS table output",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed MPLS table output Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed MPLS table output Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global fatal_error
global net
- # Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ # Skip if previous fatal error condition is raised
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 5):
- refTableFile = '%s/r%s/ip_mpls_route.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/ip_mpls_route.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('ip -o -family mpls route 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i].cmd("ip -o -family mpls route 2> /dev/null").rstrip()
+ )
# Mask out label and protocol
actual = re.sub(r"[0-9][0-9] via inet ", "xx via inet ", actual)
# Sort nexthops
nexthop_sorted = []
for line in actual.splitlines():
- tokens = re.split(r'\\\t', line.strip())
- nexthop_sorted.append('{} {}'.format(
- tokens[0].strip(),
- ' '.join([ token.strip() for token in sorted(tokens[1:]) ])
- ).strip())
+ tokens = re.split(r"\\\t", line.strip())
+ nexthop_sorted.append(
+ "{} {}".format(
+ tokens[0].strip(),
+ " ".join([token.strip() for token in sorted(tokens[1:])]),
+ ).strip()
+ )
# Sort lines and fixup differences between old and new iproute
- actual = '\n'.join(sorted(nexthop_sorted))
+ actual = "\n".join(sorted(nexthop_sorted))
actual = re.sub(r"nexthop via", "nexthopvia", actual)
actual = re.sub(r" nexthop as to xx via inet ", " nexthopvia inet ", actual)
actual = re.sub(r" weight 1", "", actual)
actual = re.sub(r" [ ]+", " ", actual)
# put \n back at line ends
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual Linux Kernel MPLS route",
- title2="expected Linux Kernel MPLS route")
+ title2="expected Linux Kernel MPLS route",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed Linux Kernel MPLS route output Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed Linux Kernel MPLS route output Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "Linux Kernel MPLS route output for router r%s:\n%s" % (i, diff)
+ assert (
+ failures == 0
+ ), "Linux Kernel MPLS route output for router r%s:\n%s" % (i, diff)
# Make sure that all daemons are running
for i in range(1, 5):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n")
- pytest.skip('Skipping test for Stderr output')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ print(
+ "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n"
+ )
+ pytest.skip("Skipping test for Stderr output")
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
for i in range(1, 5):
- net['r%s' % i].stopRouter()
- log = net['r%s' % i].getStdErr('ldpd')
+ net["r%s" % i].stopRouter()
+ log = net["r%s" % i].getStdErr("ldpd")
if log:
print("\nRouter r%s LDPd StdErr Log:\n%s" % (i, log))
- log = net['r%s' % i].getStdErr('ospfd')
+ log = net["r%s" % i].getStdErr("ospfd")
if log:
print("\nRouter r%s OSPFd StdErr Log:\n%s" % (i, log))
- log = net['r%s' % i].getStdErr('zebra')
+ log = net["r%s" % i].getStdErr("zebra")
if log:
print("\nRouter r%s Zebra StdErr Log:\n%s" % (i, log))
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
- if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None:
- print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n")
- pytest.skip('Skipping test for memory leaks')
-
+ if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None:
+ print(
+ "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n"
+ )
+ pytest.skip("Skipping test for memory leaks")
+
thisDir = os.path.dirname(os.path.realpath(__file__))
for i in range(1, 5):
- net['r%s' % i].stopRouter()
- net['r%s' % i].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__))
+ net["r%s" % i].stopRouter()
+ net["r%s" % i].report_memory_leaks(
+ os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__)
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
- setLogLevel('info')
+ setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class TemplateTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
#
# Define FRR Routers
#
- for router in ['ce1', 'ce2', 'ce3', 'r1', 'r2', 'r3']:
+ for router in ["ce1", "ce2", "ce3", "r1", "r2", "r3"]:
tgen.add_router(router)
#
# Define connections
#
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['ce1'])
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["ce1"])
+ switch.add_link(tgen.gears["r1"])
+
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["ce2"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['ce2'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["ce3"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['ce3'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s5')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
- switch = tgen.add_switch('s6')
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
def setup_module(mod):
"Sets up the pytest environment"
# For all registered routers, load the zebra configuration file
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
# Don't start ospfd and ldpd in the CE nodes
- if router.name[0] == 'r':
+ if router.name[0] == "r":
router.load_config(
- TopoRouter.RD_OSPF,
- os.path.join(CWD, '{}/ospfd.conf'.format(rname))
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_LDP,
- os.path.join(CWD, '{}/ldpd.conf'.format(rname))
+ TopoRouter.RD_LDP, os.path.join(CWD, "{}/ldpd.conf".format(rname))
)
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
logger.info('Comparing router "%s" "%s" output', rname, command)
tgen = get_topogen()
- filename = '{}/{}/{}'.format(CWD, rname, reference)
+ filename = "{}/{}/{}".format(CWD, rname, reference)
expected = json.loads(open(filename).read())
# Run test function until we get an result. Wait at most 80 seconds.
- test_func = partial(topotest.router_json_cmp,
- tgen.gears[rname], command, expected)
+ test_func = partial(topotest.router_json_cmp, tgen.gears[rname], command, expected)
_, diff = topotest.run_and_expect(test_func, None, count=160, wait=0.5)
assertmsg = '"{}" JSON output mismatches the expected result'.format(rname)
assert diff is None, assertmsg
+
def test_ospf_convergence():
logger.info("Test: check OSPF adjacencies")
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3']:
- router_compare_json_output(rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json")
+ for rname in ["r1", "r2", "r3"]:
+ router_compare_json_output(
+ rname, "show ip ospf neighbor json", "show_ip_ospf_neighbor.json"
+ )
+
def test_rib():
logger.info("Test: verify RIB")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3']:
+ for rname in ["r1", "r2", "r3"]:
router_compare_json_output(rname, "show ip route json", "show_ip_route.ref")
+
def test_ldp_adjacencies():
logger.info("Test: verify LDP adjacencies")
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3']:
- router_compare_json_output(rname, "show mpls ldp discovery json", "show_ldp_discovery.ref")
+ for rname in ["r1", "r2", "r3"]:
+ router_compare_json_output(
+ rname, "show mpls ldp discovery json", "show_ldp_discovery.ref"
+ )
+
def test_ldp_neighbors():
logger.info("Test: verify LDP neighbors")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3']:
- router_compare_json_output(rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref")
+ for rname in ["r1", "r2", "r3"]:
+ router_compare_json_output(
+ rname, "show mpls ldp neighbor json", "show_ldp_neighbor.ref"
+ )
+
def test_ldp_bindings():
logger.info("Test: verify LDP bindings")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3']:
- router_compare_json_output(rname, "show mpls ldp binding json", "show_ldp_binding.ref")
+ for rname in ["r1", "r2", "r3"]:
+ router_compare_json_output(
+ rname, "show mpls ldp binding json", "show_ldp_binding.ref"
+ )
+
def test_ldp_pwid_bindings():
logger.info("Test: verify LDP PW-ID bindings")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3']:
- router_compare_json_output(rname, "show l2vpn atom binding json", "show_l2vpn_binding.ref")
+ for rname in ["r1", "r2", "r3"]:
+ router_compare_json_output(
+ rname, "show l2vpn atom binding json", "show_l2vpn_binding.ref"
+ )
+
def test_ldp_pseudowires():
logger.info("Test: verify LDP pseudowires")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- for rname in ['r1', 'r2', 'r3']:
- router_compare_json_output(rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref")
+ for rname in ["r1", "r2", "r3"]:
+ router_compare_json_output(
+ rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref"
+ )
+
def test_ldp_pseudowires_after_link_down():
logger.info("Test: verify LDP pseudowires after r1-r2 link goes down")
# Shut down r1-r2 link */
tgen = get_topogen()
- tgen.gears['r1'].peer_link_enable('r1-eth1', False)
+ tgen.gears["r1"].peer_link_enable("r1-eth1", False)
topotest.sleep(5, "Waiting for the network to reconverge")
# check if the pseudowire is still up (using an alternate path for nexthop resolution)
- for rname in ['r1', 'r2', 'r3']:
- router_compare_json_output(rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref")
+ for rname in ["r1", "r2", "r3"]:
+ router_compare_json_output(
+ rname, "show l2vpn atom vc json", "show_l2vpn_vc.ref"
+ )
+
# Memory leak test template
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
from lib.topolog import logger
# Import common_config to use commomnly used APIs
-from lib.common_config import (create_common_configuration,
- InvalidCLIError,
- load_config_to_router,
- check_address_types,
- generate_ips,
- find_interface_with_greater_ip,
- run_frr_cmd, retry)
+from lib.common_config import (
+ create_common_configuration,
+ InvalidCLIError,
+ load_config_to_router,
+ check_address_types,
+ generate_ips,
+ find_interface_with_greater_ip,
+ run_frr_cmd,
+ retry,
+)
BGP_CONVERGENCE_TIMEOUT = 10
bgp_addr_data = bgp_data.setdefault("address_family", {})
if not bgp_addr_data:
- logger.debug("Router %s: 'address_family' not present in "
- "input_dict for BGP", router)
+ logger.debug(
+ "Router %s: 'address_family' not present in " "input_dict for BGP",
+ router,
+ )
else:
ipv4_data = bgp_addr_data.setdefault("ipv4", {})
ipv6_data = bgp_addr_data.setdefault("ipv6", {})
- neigh_unicast = True if ipv4_data.setdefault("unicast", {}) \
- or ipv6_data.setdefault("unicast", {}) else False
+ neigh_unicast = (
+ True
+ if ipv4_data.setdefault("unicast", {})
+ or ipv6_data.setdefault("unicast", {})
+ else False
+ )
if neigh_unicast:
data_all_bgp = __create_bgp_unicast_neighbor(
- tgen, topo, input_dict, router,
- config_data=data_all_bgp)
+ tgen, topo, input_dict, router, config_data=data_all_bgp
+ )
try:
- result = create_common_configuration(tgen, router, data_all_bgp,
- "bgp", build)
+ result = create_common_configuration(
+ tgen, router, data_all_bgp, "bgp", build
+ )
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
config_data = []
if "local_as" not in bgp_data and build:
- logger.error("Router %s: 'local_as' not present in input_dict"
- "for BGP", router)
+ logger.error(
+ "Router %s: 'local_as' not present in input_dict" "for BGP", router
+ )
return False
local_as = bgp_data.setdefault("local_as", "")
if del_router_id:
config_data.append("no bgp router-id")
if router_id:
- config_data.append("bgp router-id {}".format(
- router_id))
+ config_data.append("bgp router-id {}".format(router_id))
return config_data
-def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router,
- config_data=None):
+def __create_bgp_unicast_neighbor(tgen, topo, input_dict, router, config_data=None):
"""
Helper API to create configuration for address-family unicast
addr_data = addr_dict["unicast"]
if addr_data:
- config_data.append("address-family {} unicast".format(
- addr_type
- ))
- advertise_network = addr_data.setdefault("advertise_networks",
- [])
+ config_data.append("address-family {} unicast".format(addr_type))
+ advertise_network = addr_data.setdefault("advertise_networks", [])
for advertise_network_dict in advertise_network:
network = advertise_network_dict["network"]
if type(network) is not list:
else:
no_of_network = 1
- del_action = advertise_network_dict.setdefault("delete",
- False)
+ del_action = advertise_network_dict.setdefault("delete", False)
# Generating IPs for verification
- prefix = str(
- ipaddr.IPNetwork(unicode(network[0])).prefixlen)
+ prefix = str(ipaddr.IPNetwork(unicode(network[0])).prefixlen)
network_list = generate_ips(network, no_of_network)
for ip in network_list:
ip = str(ipaddr.IPNetwork(unicode(ip)).network)
ibgp = max_paths.setdefault("ibgp", None)
ebgp = max_paths.setdefault("ebgp", None)
if ibgp:
- config_data.append("maximum-paths ibgp {}".format(
- ibgp
- ))
+ config_data.append("maximum-paths ibgp {}".format(ibgp))
if ebgp:
- config_data.append("maximum-paths {}".format(
- ebgp
- ))
+ config_data.append("maximum-paths {}".format(ebgp))
aggregate_addresses = addr_data.setdefault("aggregate_address", [])
for aggregate_address in aggregate_addresses:
network = aggregate_address.setdefault("network", None)
if not network:
- logger.debug("Router %s: 'network' not present in "
- "input_dict for BGP", router)
+ logger.debug(
+ "Router %s: 'network' not present in " "input_dict for BGP", router
+ )
else:
cmd = "aggregate-address {}".format(network)
if redistribute_data:
for redistribute in redistribute_data:
if "redist_type" not in redistribute:
- logger.error("Router %s: 'redist_type' not present in "
- "input_dict", router)
+ logger.error(
+ "Router %s: 'redist_type' not present in " "input_dict", router
+ )
else:
- cmd = "redistribute {}".format(
- redistribute["redist_type"])
- redist_attr = redistribute.setdefault("attribute",
- None)
+ cmd = "redistribute {}".format(redistribute["redist_type"])
+ redist_attr = redistribute.setdefault("attribute", None)
if redist_attr:
cmd = "{} {}".format(cmd, redist_attr)
del_action = redistribute.setdefault("delete", False)
config_data.append(cmd)
if "neighbor" in addr_data:
- neigh_data = __create_bgp_neighbor(topo, input_dict,
- router, addr_type, add_neigh)
+ neigh_data = __create_bgp_neighbor(
+ topo, input_dict, router, addr_type, add_neigh
+ )
config_data.extend(neigh_data)
for addr_type, addr_dict in bgp_data.iteritems():
addr_data = addr_dict["unicast"]
if "neighbor" in addr_data:
neigh_addr_data = __create_bgp_unicast_address_family(
- topo, input_dict, router, addr_type, add_neigh)
+ topo, input_dict, router, addr_type, add_neigh
+ )
config_data.extend(neigh_addr_data)
-
logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
return config_data
update_source = None
if dest_link in nh_details["links"].keys():
- ip_addr = \
- nh_details["links"][dest_link][addr_type].split("/")[0]
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
# Loopback interface
if "source_link" in peer and peer["source_link"] == "lo":
- update_source = topo[router]["links"]["lo"][
- addr_type].split("/")[0]
+ update_source = topo[router]["links"]["lo"][addr_type].split("/")[0]
neigh_cxt = "neighbor {}".format(ip_addr)
config_data.append("address-family ipv6 unicast")
config_data.append("{} activate".format(neigh_cxt))
- disable_connected = peer.setdefault("disable_connected_check",
- False)
+ disable_connected = peer.setdefault("disable_connected_check", False)
keep_alive = peer.setdefault("keepalivetimer", 60)
hold_down = peer.setdefault("holddowntimer", 180)
password = peer.setdefault("password", None)
max_hop_limit = peer.setdefault("ebgp_multihop", 1)
if update_source:
- config_data.append("{} update-source {}".format(
- neigh_cxt, update_source))
+ config_data.append(
+ "{} update-source {}".format(neigh_cxt, update_source)
+ )
if disable_connected:
- config_data.append("{} disable-connected-check".format(
- disable_connected))
+ config_data.append(
+ "{} disable-connected-check".format(disable_connected)
+ )
if update_source:
- config_data.append("{} update-source {}".format(neigh_cxt,
- update_source))
+ config_data.append(
+ "{} update-source {}".format(neigh_cxt, update_source)
+ )
if int(keep_alive) != 60 and int(hold_down) != 180:
config_data.append(
- "{} timers {} {}".format(neigh_cxt, keep_alive,
- hold_down))
+ "{} timers {} {}".format(neigh_cxt, keep_alive, hold_down)
+ )
if password:
- config_data.append(
- "{} password {}".format(neigh_cxt, password))
+ config_data.append("{} password {}".format(neigh_cxt, password))
if max_hop_limit > 1:
- config_data.append("{} ebgp-multihop {}".format(neigh_cxt,
- max_hop_limit))
+ config_data.append(
+ "{} ebgp-multihop {}".format(neigh_cxt, max_hop_limit)
+ )
config_data.append("{} enforce-multihop".format(neigh_cxt))
logger.debug("Exiting lib API: __create_bgp_unicast_neighbor()")
return config_data
-def __create_bgp_unicast_address_family(topo, input_dict, router, addr_type,
- add_neigh=True):
+def __create_bgp_unicast_address_family(
+ topo, input_dict, router, addr_type, add_neigh=True
+):
"""
API prints bgp global config to bgp_json file.
nh_details = topo[peer_name]
# Loopback interface
if "source_link" in peer and peer["source_link"] == "lo":
- for destRouterLink, data in sorted(nh_details["links"].
- iteritems()):
+ for destRouterLink, data in sorted(nh_details["links"].iteritems()):
if "type" in data and data["type"] == "loopback":
if dest_link == destRouterLink:
- ip_addr = \
- nh_details["links"][destRouterLink][
- addr_type].split("/")[0]
+ ip_addr = nh_details["links"][destRouterLink][
+ addr_type
+ ].split("/")[0]
# Physical interface
else:
if dest_link in nh_details["links"].keys():
- ip_addr = nh_details["links"][dest_link][
- addr_type].split("/")[0]
+ ip_addr = nh_details["links"][dest_link][addr_type].split("/")[0]
if addr_type == "ipv4" and bgp_data["ipv6"]:
- deactivate = nh_details["links"][
- dest_link]["ipv6"].split("/")[0]
+ deactivate = nh_details["links"][dest_link]["ipv6"].split("/")[
+ 0
+ ]
neigh_cxt = "neighbor {}".format(ip_addr)
- config_data.append("address-family {} unicast".format(
- addr_type
- ))
+ config_data.append("address-family {} unicast".format(addr_type))
if deactivate:
- config_data.append(
- "no neighbor {} activate".format(deactivate))
+ config_data.append("no neighbor {} activate".format(deactivate))
next_hop_self = peer.setdefault("next_hop_self", None)
send_community = peer.setdefault("send_community", None)
# no_send_community
if no_send_community:
- config_data.append("no {} send-community {}".format(
- neigh_cxt, no_send_community))
+ config_data.append(
+ "no {} send-community {}".format(neigh_cxt, no_send_community)
+ )
if prefix_lists:
for prefix_list in prefix_lists:
direction = prefix_list.setdefault("direction", "in")
del_action = prefix_list.setdefault("delete", False)
if not name:
- logger.info("Router %s: 'name' not present in "
- "input_dict for BGP neighbor prefix lists",
- router)
+ logger.info(
+ "Router %s: 'name' not present in "
+ "input_dict for BGP neighbor prefix lists",
+ router,
+ )
else:
- cmd = "{} prefix-list {} {}".format(neigh_cxt, name,
- direction)
+ cmd = "{} prefix-list {} {}".format(neigh_cxt, name, direction)
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
direction = route_map.setdefault("direction", "in")
del_action = route_map.setdefault("delete", False)
if not name:
- logger.info("Router %s: 'name' not present in "
- "input_dict for BGP neighbor route name",
- router)
+ logger.info(
+ "Router %s: 'name' not present in "
+ "input_dict for BGP neighbor route name",
+ router,
+ )
else:
- cmd = "{} route-map {} {}".format(neigh_cxt, name,
- direction)
+ cmd = "{} route-map {} {}".format(neigh_cxt, name, direction)
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
rnode = tgen.routers()[router]
- del_router_id = input_dict[router]["bgp"].setdefault(
- "del_router_id", False)
+ del_router_id = input_dict[router]["bgp"].setdefault("del_router_id", False)
logger.info("Checking router %s router-id", router)
- show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True)
router_id_out = show_bgp_json["ipv4Unicast"]["routerId"]
router_id_out = ipaddr.IPv4Address(unicode(router_id_out))
router_id = ipaddr.IPv4Address(unicode(router_id))
if router_id == router_id_out:
- logger.info("Found expected router-id %s for router %s",
- router_id, router)
+ logger.info("Found expected router-id %s for router %s", router_id, router)
else:
- errormsg = "Router-id for router:{} mismatch, expected:" \
- " {} but found:{}".format(router, router_id,
- router_id_out)
+ errormsg = (
+ "Router-id for router:{} mismatch, expected:"
+ " {} but found:{}".format(router, router_id, router_id_out)
+ )
return errormsg
logger.debug("Exiting lib API: verify_router_id()")
logger.debug("Entering lib API: verify_bgp_convergence()")
for router, rnode in tgen.routers().iteritems():
logger.info("Verifying BGP Convergence on router %s", router)
- show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
for dest_link in peer_data["dest_link"].keys():
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
- neighbor_ip = \
- data[dest_link][addr_type].split("/")[0]
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
if addr_type == "ipv4":
- ipv4_data = show_bgp_json["ipv4Unicast"][
- "peers"]
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
nh_state = ipv4_data[neighbor_ip]["state"]
else:
- ipv6_data = show_bgp_json["ipv6Unicast"][
- "peers"]
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
nh_state = ipv6_data[neighbor_ip]["state"]
if nh_state == "Established":
if no_of_peer == total_peer:
logger.info("BGP is Converged for router %s", router)
else:
- errormsg = "BGP is not converged for router {}".format(
- router)
+ errormsg = "BGP is not converged for router {}".format(router)
return errormsg
logger.debug("Exiting API: verify_bgp_convergence()")
for router in input_dict.keys():
# Remove bgp configuration
- router_dict.update({
- router: {
- "bgp": {
- "delete": True
- }
- }
- })
+ router_dict.update({router: {"bgp": {"delete": True}}})
- new_topo[router]["bgp"]["local_as"] = \
- input_dict[router]["bgp"]["local_as"]
+ new_topo[router]["bgp"]["local_as"] = input_dict[router]["bgp"]["local_as"]
logger.info("Removing bgp configuration")
create_router_bgp(tgen, topo, router_dict)
logger.info("Verifying AS numbers for dut %s:", router)
- show_ip_bgp_neighbor_json = run_frr_cmd(rnode,
- "show ip bgp neighbor json", isjson=True)
+ show_ip_bgp_neighbor_json = run_frr_cmd(
+ rnode, "show ip bgp neighbor json", isjson=True
+ )
local_as = input_dict[router]["bgp"]["local_as"]
bgp_addr_type = topo["routers"][router]["bgp"]["address_family"]
if not check_address_types(addr_type):
continue
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"][
- "neighbor"]
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
remote_as = input_dict[bgp_neighbor]["bgp"]["local_as"]
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
- neighbor_ip = data[dest_link][addr_type]. \
- split("/")[0]
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
neigh_data = show_ip_bgp_neighbor_json[neighbor_ip]
# Verify Local AS for router
if neigh_data["localAs"] != local_as:
- errormsg = "Failed: Verify local_as for dut {}," \
- " found: {} but expected: {}".format(
- router, neigh_data["localAs"],
- local_as)
+ errormsg = (
+ "Failed: Verify local_as for dut {},"
+ " found: {} but expected: {}".format(
+ router, neigh_data["localAs"], local_as
+ )
+ )
return errormsg
else:
- logger.info("Verified local_as for dut %s, found"
- " expected: %s", router, local_as)
+ logger.info(
+ "Verified local_as for dut %s, found" " expected: %s",
+ router,
+ local_as,
+ )
# Verify Remote AS for neighbor
if neigh_data["remoteAs"] != remote_as:
- errormsg = "Failed: Verify remote_as for dut " \
- "{}'s neighbor {}, found: {} but " \
- "expected: {}".format(
- router, bgp_neighbor,
- neigh_data["remoteAs"], remote_as)
+ errormsg = (
+ "Failed: Verify remote_as for dut "
+ "{}'s neighbor {}, found: {} but "
+ "expected: {}".format(
+ router, bgp_neighbor, neigh_data["remoteAs"], remote_as
+ )
+ )
return errormsg
else:
- logger.info("Verified remote_as for dut %s's "
- "neighbor %s, found expected: %s",
- router, bgp_neighbor, remote_as)
+ logger.info(
+ "Verified remote_as for dut %s's "
+ "neighbor %s, found expected: %s",
+ router,
+ bgp_neighbor,
+ remote_as,
+ )
logger.debug("Exiting lib API: verify_AS_numbers()")
return True
for retry in range(31):
sleeptime = 3
# Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
+ logger.info(
+ "Waiting for %s sec for BGP to converge on router" " %s...",
+ sleeptime,
+ router,
+ )
sleep(sleeptime)
- show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
if dest_link in data:
neighbor_ip = data[dest_link][addr_type].split("/")[0]
if addr_type == "ipv4":
- ipv4_data = show_bgp_json["ipv4Unicast"][
- "peers"]
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
nh_state = ipv4_data[neighbor_ip]["state"]
# Peer up time dictionary
- peer_uptime_before_clear_bgp[bgp_neighbor] = \
- ipv4_data[neighbor_ip]["peerUptimeEstablishedEpoch"]
+ peer_uptime_before_clear_bgp[bgp_neighbor] = ipv4_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
else:
- ipv6_data = show_bgp_json["ipv6Unicast"][
- "peers"]
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
nh_state = ipv6_data[neighbor_ip]["state"]
# Peer up time dictionary
- peer_uptime_before_clear_bgp[bgp_neighbor] = \
- ipv6_data[neighbor_ip]["peerUptimeEstablishedEpoch"]
+ peer_uptime_before_clear_bgp[bgp_neighbor] = ipv6_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
if nh_state == "Established":
no_of_peer += 1
if no_of_peer == total_peer:
- logger.info("BGP is Converged for router %s before bgp"
- " clear", router)
+ logger.info("BGP is Converged for router %s before bgp" " clear", router)
break
else:
- logger.info("BGP is not yet Converged for router %s "
- "before bgp clear", router)
+ logger.info(
+ "BGP is not yet Converged for router %s " "before bgp clear", router
+ )
else:
- errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
- " router {}".format(router)
+ errormsg = (
+ "TIMEOUT!! BGP is not converged in 30 seconds for"
+ " router {}".format(router)
+ )
return errormsg
logger.info(peer_uptime_before_clear_bgp)
for retry in range(31):
sleeptime = 3
# Waiting for BGP to converge
- logger.info("Waiting for %s sec for BGP to converge on router"
- " %s...", sleeptime, router)
+ logger.info(
+ "Waiting for %s sec for BGP to converge on router" " %s...",
+ sleeptime,
+ router,
+ )
sleep(sleeptime)
-
- show_bgp_json = run_frr_cmd(rnode, "show bgp summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(rnode, "show bgp summary json", isjson=True)
# Verifying output dictionary show_bgp_json is empty or not
if not bool(show_bgp_json):
errormsg = "BGP is not running"
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
- neighbor_ip = data[dest_link][addr_type].\
- split("/")[0]
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
if addr_type == "ipv4":
- ipv4_data = show_bgp_json["ipv4Unicast"][
- "peers"]
+ ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
nh_state = ipv4_data[neighbor_ip]["state"]
- peer_uptime_after_clear_bgp[bgp_neighbor] = \
- ipv4_data[neighbor_ip]["peerUptimeEstablishedEpoch"]
+ peer_uptime_after_clear_bgp[bgp_neighbor] = ipv4_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
else:
- ipv6_data = show_bgp_json["ipv6Unicast"][
- "peers"]
+ ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
nh_state = ipv6_data[neighbor_ip]["state"]
# Peer up time dictionary
- peer_uptime_after_clear_bgp[bgp_neighbor] = \
- ipv6_data[neighbor_ip]["peerUptimeEstablishedEpoch"]
+ peer_uptime_after_clear_bgp[bgp_neighbor] = ipv6_data[
+ neighbor_ip
+ ]["peerUptimeEstablishedEpoch"]
if nh_state == "Established":
no_of_peer += 1
if no_of_peer == total_peer:
- logger.info("BGP is Converged for router %s after bgp clear",
- router)
+ logger.info("BGP is Converged for router %s after bgp clear", router)
break
else:
- logger.info("BGP is not yet Converged for router %s after"
- " bgp clear", router)
+ logger.info(
+ "BGP is not yet Converged for router %s after" " bgp clear", router
+ )
else:
- errormsg = "TIMEOUT!! BGP is not converged in 30 seconds for" \
- " router {}".format(router)
+ errormsg = (
+ "TIMEOUT!! BGP is not converged in 30 seconds for"
+ " router {}".format(router)
+ )
return errormsg
logger.info(peer_uptime_after_clear_bgp)
# Comparing peerUptimeEstablishedEpoch dictionaries
if peer_uptime_before_clear_bgp != peer_uptime_after_clear_bgp:
- logger.info("BGP neighborship is reset after clear BGP on router %s",
- router)
+ logger.info("BGP neighborship is reset after clear BGP on router %s", router)
else:
- errormsg = "BGP neighborship is not reset after clear bgp on router" \
- " {}".format(router)
+ errormsg = (
+ "BGP neighborship is not reset after clear bgp on router"
+ " {}".format(router)
+ )
return errormsg
logger.debug("Exiting lib API: clear_bgp_and_verify()")
rnode = router_list[router]
- logger.info("Verifying bgp timers functionality, DUT is %s:",
- router)
+ logger.info("Verifying bgp timers functionality, DUT is %s:", router)
- show_ip_bgp_neighbor_json = \
- run_frr_cmd(rnode, "show ip bgp neighbor json", isjson=True)
+ show_ip_bgp_neighbor_json = run_frr_cmd(
+ rnode, "show ip bgp neighbor json", isjson=True
+ )
bgp_addr_type = input_dict[router]["bgp"]["address_family"]
if not check_address_types(addr_type):
continue
- bgp_neighbors = bgp_addr_type[addr_type]["unicast"][
- "neighbor"]
+ bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
for bgp_neighbor, peer_data in bgp_neighbors.iteritems():
for dest_link, peer_dict in peer_data["dest_link"].iteritems():
data = topo["routers"][bgp_neighbor]["links"]
holddowntimer = peer_dict["holddowntimer"]
if dest_link in data:
- neighbor_ip = data[dest_link][addr_type]. \
- split("/")[0]
+ neighbor_ip = data[dest_link][addr_type].split("/")[0]
neighbor_intf = data[dest_link]["interface"]
# Verify HoldDownTimer for neighbor
- bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[
- neighbor_ip]["bgpTimerHoldTimeMsecs"]
+ bgpHoldTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][
+ "bgpTimerHoldTimeMsecs"
+ ]
if bgpHoldTimeMsecs != holddowntimer * 1000:
- errormsg = "Verifying holddowntimer for bgp " \
- "neighbor {} under dut {}, found: {} " \
- "but expected: {}".format(
- neighbor_ip, router,
- bgpHoldTimeMsecs,
- holddowntimer * 1000)
+ errormsg = (
+ "Verifying holddowntimer for bgp "
+ "neighbor {} under dut {}, found: {} "
+ "but expected: {}".format(
+ neighbor_ip,
+ router,
+ bgpHoldTimeMsecs,
+ holddowntimer * 1000,
+ )
+ )
return errormsg
# Verify KeepAliveTimer for neighbor
- bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[
- neighbor_ip]["bgpTimerKeepAliveIntervalMsecs"]
+ bgpKeepAliveTimeMsecs = show_ip_bgp_neighbor_json[neighbor_ip][
+ "bgpTimerKeepAliveIntervalMsecs"
+ ]
if bgpKeepAliveTimeMsecs != keepalivetimer * 1000:
- errormsg = "Verifying keepalivetimer for bgp " \
- "neighbor {} under dut {}, found: {} " \
- "but expected: {}".format(
- neighbor_ip, router,
- bgpKeepAliveTimeMsecs,
- keepalivetimer * 1000)
+ errormsg = (
+ "Verifying keepalivetimer for bgp "
+ "neighbor {} under dut {}, found: {} "
+ "but expected: {}".format(
+ neighbor_ip,
+ router,
+ bgpKeepAliveTimeMsecs,
+ keepalivetimer * 1000,
+ )
+ )
return errormsg
####################
# Wait till keep alive time
logger.info("=" * 20)
logger.info("Scenario 1:")
- logger.info("Shutdown and bring up peer interface: %s "
- "in keep alive time : %s sec and verify "
- " BGP neighborship is intact in %s sec ",
- neighbor_intf, keepalivetimer,
- (holddowntimer - keepalivetimer))
+ logger.info(
+ "Shutdown and bring up peer interface: %s "
+ "in keep alive time : %s sec and verify "
+ " BGP neighborship is intact in %s sec ",
+ neighbor_intf,
+ keepalivetimer,
+ (holddowntimer - keepalivetimer),
+ )
logger.info("=" * 20)
logger.info("Waiting for %s sec..", keepalivetimer)
sleep(keepalivetimer)
# Shutting down peer ineterface
- logger.info("Shutting down interface %s on router %s",
- neighbor_intf, bgp_neighbor)
+ logger.info(
+ "Shutting down interface %s on router %s",
+ neighbor_intf,
+ bgp_neighbor,
+ )
topotest.interface_set_status(
- router_list[bgp_neighbor], neighbor_intf,
- ifaceaction=False)
+ router_list[bgp_neighbor], neighbor_intf, ifaceaction=False
+ )
# Bringing up peer interface
sleep(5)
- logger.info("Bringing up interface %s on router %s..",
- neighbor_intf, bgp_neighbor)
+ logger.info(
+ "Bringing up interface %s on router %s..",
+ neighbor_intf,
+ bgp_neighbor,
+ )
topotest.interface_set_status(
- router_list[bgp_neighbor], neighbor_intf,
- ifaceaction=True)
+ router_list[bgp_neighbor], neighbor_intf, ifaceaction=True
+ )
# Verifying BGP neighborship is intact in
# (holddown - keepalive) time
- for timer in range(keepalivetimer, holddowntimer,
- int(holddowntimer / 3)):
+ for timer in range(
+ keepalivetimer, holddowntimer, int(holddowntimer / 3)
+ ):
logger.info("Waiting for %s sec..", keepalivetimer)
sleep(keepalivetimer)
sleep(2)
- show_bgp_json = \
- run_frr_cmd(rnode, "show bgp summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(
+ rnode, "show bgp summary json", isjson=True
+ )
if addr_type == "ipv4":
ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
ipv6_data = show_bgp_json["ipv6Unicast"]["peers"]
nh_state = ipv6_data[neighbor_ip]["state"]
- if timer == \
- (holddowntimer - keepalivetimer):
+ if timer == (holddowntimer - keepalivetimer):
if nh_state != "Established":
- errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}" \
- .format(timer, bgp_neighbor)
+ errormsg = (
+ "BGP neighborship has not gone "
+ "down in {} sec for neighbor {}".format(
+ timer, bgp_neighbor
+ )
+ )
return errormsg
else:
- logger.info("BGP neighborship is intact in %s"
- " sec for neighbor %s",
- timer, bgp_neighbor)
+ logger.info(
+ "BGP neighborship is intact in %s"
+ " sec for neighbor %s",
+ timer,
+ bgp_neighbor,
+ )
####################
# Shutting down peer interface and verifying that BGP
####################
logger.info("=" * 20)
logger.info("Scenario 2:")
- logger.info("Shutdown peer interface: %s and verify BGP"
- " neighborship has gone down in hold down "
- "time %s sec", neighbor_intf, holddowntimer)
+ logger.info(
+ "Shutdown peer interface: %s and verify BGP"
+ " neighborship has gone down in hold down "
+ "time %s sec",
+ neighbor_intf,
+ holddowntimer,
+ )
logger.info("=" * 20)
- logger.info("Shutting down interface %s on router %s..",
- neighbor_intf, bgp_neighbor)
- topotest.interface_set_status(router_list[bgp_neighbor],
- neighbor_intf,
- ifaceaction=False)
+ logger.info(
+ "Shutting down interface %s on router %s..",
+ neighbor_intf,
+ bgp_neighbor,
+ )
+ topotest.interface_set_status(
+ router_list[bgp_neighbor], neighbor_intf, ifaceaction=False
+ )
# Verifying BGP neighborship is going down in holddown time
- for timer in range(keepalivetimer,
- (holddowntimer + keepalivetimer),
- int(holddowntimer / 3)):
+ for timer in range(
+ keepalivetimer,
+ (holddowntimer + keepalivetimer),
+ int(holddowntimer / 3),
+ ):
logger.info("Waiting for %s sec..", keepalivetimer)
sleep(keepalivetimer)
sleep(2)
- show_bgp_json = \
- run_frr_cmd(rnode, "show bgp summary json",
- isjson=True)
+ show_bgp_json = run_frr_cmd(
+ rnode, "show bgp summary json", isjson=True
+ )
if addr_type == "ipv4":
ipv4_data = show_bgp_json["ipv4Unicast"]["peers"]
if timer == holddowntimer:
if nh_state == "Established":
- errormsg = "BGP neighborship has not gone " \
- "down in {} sec for neighbor {}" \
- .format(timer, bgp_neighbor)
+ errormsg = (
+ "BGP neighborship has not gone "
+ "down in {} sec for neighbor {}".format(
+ timer, bgp_neighbor
+ )
+ )
return errormsg
else:
- logger.info("BGP neighborship has gone down in"
- " %s sec for neighbor %s",
- timer, bgp_neighbor)
+ logger.info(
+ "BGP neighborship has gone down in"
+ " %s sec for neighbor %s",
+ timer,
+ bgp_neighbor,
+ )
logger.debug("Exiting lib API: verify_bgp_timers_and_functionality()")
return True
@retry(attempts=3, wait=4, return_is_str=True)
-def verify_bgp_attributes(tgen, addr_type, dut, static_routes, rmap_name,
- input_dict, seq_id=None):
+def verify_bgp_attributes(
+ tgen, addr_type, dut, static_routes, rmap_name, input_dict, seq_id=None
+):
"""
API will verify BGP attributes set by Route-map for given prefix and
DUT. it will run "show bgp ipv4/ipv6 {prefix_address} json" command
if router != dut:
continue
- logger.info('Verifying BGP set attributes for dut {}:'.format(router))
+ logger.info("Verifying BGP set attributes for dut {}:".format(router))
for static_route in static_routes:
cmd = "show bgp {} {} json".format(addr_type, static_route)
dict_to_test = []
tmp_list = []
for rmap_router in input_dict.keys():
- for rmap, values in input_dict[rmap_router][
- "route_maps"].items():
+ for rmap, values in input_dict[rmap_router]["route_maps"].items():
if rmap == rmap_name:
dict_to_test = values
for rmap_dict in values:
seq_id = [seq_id]
if "seq_id" in rmap_dict:
- rmap_seq_id = \
- rmap_dict["seq_id"]
+ rmap_seq_id = rmap_dict["seq_id"]
for _seq_id in seq_id:
if _seq_id == rmap_seq_id:
tmp_list.append(rmap_dict)
for rmap_dict in dict_to_test:
if "set" in rmap_dict:
for criteria in rmap_dict["set"].keys():
- if criteria not in show_bgp_json[
- "paths"][0]:
- errormsg = ("BGP attribute: {}"
- " is not found in"
- " cli: {} output "
- "in router {}".
- format(criteria,
- cmd,
- router))
+ if criteria not in show_bgp_json["paths"][0]:
+ errormsg = (
+ "BGP attribute: {}"
+ " is not found in"
+ " cli: {} output "
+ "in router {}".format(criteria, cmd, router)
+ )
return errormsg
- if rmap_dict["set"][criteria] == \
- show_bgp_json["paths"][0][
- criteria]:
- logger.info("Verifying BGP "
- "attribute {} for"
- " route: {} in "
- "router: {}, found"
- " expected value:"
- " {}".
- format(criteria,
- static_route,
- dut,
- rmap_dict[
- "set"][
- criteria]))
+ if (
+ rmap_dict["set"][criteria]
+ == show_bgp_json["paths"][0][criteria]
+ ):
+ logger.info(
+ "Verifying BGP "
+ "attribute {} for"
+ " route: {} in "
+ "router: {}, found"
+ " expected value:"
+ " {}".format(
+ criteria,
+ static_route,
+ dut,
+ rmap_dict["set"][criteria],
+ )
+ )
else:
- errormsg = \
- ("Failed: Verifying BGP "
- "attribute {} for route:"
- " {} in router: {}, "
- " expected value: {} but"
- " found: {}".
- format(criteria,
- static_route,
- dut,
- rmap_dict["set"]
- [criteria],
- show_bgp_json[
- 'paths'][
- 0][criteria]))
+ errormsg = (
+ "Failed: Verifying BGP "
+ "attribute {} for route:"
+ " {} in router: {}, "
+ " expected value: {} but"
+ " found: {}".format(
+ criteria,
+ static_route,
+ dut,
+ rmap_dict["set"][criteria],
+ show_bgp_json["paths"][0][criteria],
+ )
+ )
return errormsg
logger.debug("Exiting lib API: verify_bgp_attributes()")
return True
+
@retry(attempts=4, wait=2, return_is_str=True, initial_wait=2)
-def verify_best_path_as_per_bgp_attribute(tgen, addr_type, router, input_dict,
- attribute):
+def verify_best_path_as_per_bgp_attribute(
+ tgen, addr_type, router, input_dict, attribute
+):
"""
API is to verify best path according to BGP attributes for given routes.
"show bgp ipv4/6 json" command will be run and verify best path according
# AS_PATH attribute
if attribute == "path":
# Find next_hop for the route have minimum as_path
- _next_hop = min(attribute_dict, key=lambda x: len(set(
- attribute_dict[x])))
+ _next_hop = min(
+ attribute_dict, key=lambda x: len(set(attribute_dict[x]))
+ )
compare = "SHORTEST"
# LOCAL_PREF attribute
elif attribute == "locPrf":
# Find next_hop for the route have highest local preference
- _next_hop = max(attribute_dict, key=(lambda k:
- attribute_dict[k]))
+ _next_hop = max(attribute_dict, key=(lambda k: attribute_dict[k]))
compare = "HIGHEST"
# WEIGHT attribute
elif attribute == "weight":
# Find next_hop for the route have highest weight
- _next_hop = max(attribute_dict, key=(lambda k:
- attribute_dict[k]))
+ _next_hop = max(attribute_dict, key=(lambda k: attribute_dict[k]))
compare = "HIGHEST"
# ORIGIN attribute
elif attribute == "origin":
# Find next_hop for the route have IGP as origin, -
# - rule is IGP>EGP>INCOMPLETE
- _next_hop = [key for (key, value) in
- attribute_dict.iteritems()
- if value == "IGP"][0]
+ _next_hop = [
+ key for (key, value) in attribute_dict.iteritems() if value == "IGP"
+ ][0]
compare = ""
# MED attribute
elif attribute == "metric":
# Find next_hop for the route have LOWEST MED
- _next_hop = min(attribute_dict, key=(lambda k:
- attribute_dict[k]))
+ _next_hop = min(attribute_dict, key=(lambda k: attribute_dict[k]))
compare = "LOWEST"
# Show ip route
# Verifying output dictionary rib_routes_json is not empty
if not bool(rib_routes_json):
- errormsg = "No route found in RIB of router {}..". \
- format(router)
+ errormsg = "No route found in RIB of router {}..".format(router)
return errormsg
st_found = False
if route in rib_routes_json:
st_found = True
# Verify next_hop in rib_routes_json
- if rib_routes_json[route][0]["nexthops"][0]["ip"] in \
- attribute_dict:
+ if rib_routes_json[route][0]["nexthops"][0]["ip"] in attribute_dict:
nh_found = True
else:
- errormsg = "Incorrect Nexthop for BGP route {} in " \
- "RIB of router {}, Expected: {}, Found:" \
- " {}\n".format(route, router,
- rib_routes_json[route][0][
- "nexthops"][0]["ip"],
- _next_hop)
+ errormsg = (
+ "Incorrect Nexthop for BGP route {} in "
+ "RIB of router {}, Expected: {}, Found:"
+ " {}\n".format(
+ route,
+ router,
+ rib_routes_json[route][0]["nexthops"][0]["ip"],
+ _next_hop,
+ )
+ )
return errormsg
if st_found and nh_found:
logger.info(
"Best path for prefix: %s with next_hop: %s is "
"installed according to %s %s: (%s) in RIB of "
- "router %s", route, _next_hop, compare,
- attribute, attribute_dict[_next_hop], router)
+ "router %s",
+ route,
+ _next_hop,
+ compare,
+ attribute,
+ attribute_dict[_next_hop],
+ router,
+ )
logger.debug("Exiting lib API: verify_best_path_as_per_bgp_attribute()")
return True
-def verify_best_path_as_per_admin_distance(tgen, addr_type, router, input_dict,
- attribute):
+def verify_best_path_as_per_admin_distance(
+ tgen, addr_type, router, input_dict, attribute
+):
"""
API is to verify best path according to admin distance for given
route. "show ip/ipv6 route json" command will be run and verify
for routes_from_router in input_dict.keys():
sh_ip_route_json = router_list[routes_from_router].vtysh_cmd(
- command, isjson=True)
+ command, isjson=True
+ )
networks = input_dict[routes_from_router]["static_routes"]
for network in networks:
route = network["network"]
attribute_dict[next_hop_ip] = route_attribute["distance"]
# Find next_hop for the route have LOWEST Admin Distance
- _next_hop = min(attribute_dict, key=(lambda k:
- attribute_dict[k]))
+ _next_hop = min(attribute_dict, key=(lambda k: attribute_dict[k]))
compare = "LOWEST"
# Show ip route
if route in rib_routes_json:
st_found = True
# Verify next_hop in rib_routes_json
- if rib_routes_json[route][0]["nexthops"][0]["ip"] == \
- _next_hop:
+ if rib_routes_json[route][0]["nexthops"][0]["ip"] == _next_hop:
nh_found = True
else:
- errormsg = ("Nexthop {} is Missing for BGP route {}"
- " in RIB of router {}\n".format(_next_hop,
- route, router))
+ errormsg = (
+ "Nexthop {} is Missing for BGP route {}"
+ " in RIB of router {}\n".format(_next_hop, route, router)
+ )
return errormsg
if st_found and nh_found:
- logger.info("Best path for prefix: %s is installed according"
- " to %s %s: (%s) in RIB of router %s", route,
- compare, attribute,
- attribute_dict[_next_hop], router)
-
- logger.info(
- "Exiting lib API: verify_best_path_as_per_admin_distance()")
+ logger.info(
+ "Best path for prefix: %s is installed according"
+ " to %s %s: (%s) in RIB of router %s",
+ route,
+ compare,
+ attribute,
+ attribute_dict[_next_hop],
+ router,
+ )
+
+ logger.info("Exiting lib API: verify_best_path_as_per_admin_distance()")
return True
# with this program; see the file COPYING; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-#
+#
# want_rd_routes = [
# {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'},
# {'rd':'10:1', 'p':'5.1.0.0/24', 'n':'1.1.1.1'},
-#
+#
# {'rd':'10:3', 'p':'5.1.0.0/24', 'n':'3.3.3.3'},
# ]
-#
+#
# ribRequireVpnRoutes('r2','Customer routes',want_rd_routes)
#
# want_unicast_routes = [
# ribRequireUnicastRoutes('r1','ipv4','','Customer routes in default',want_unicast_routes)
#
-from lutil import luCommand,luResult
+from lutil import luCommand, luResult
import json
import re
# gpz: get rib in json form and compare against desired routes
class BgpRib:
- def routes_include_wanted(self,pfxtbl,want,debug):
- # helper function to RequireVpnRoutes
- for pfx in pfxtbl.iterkeys():
- if debug:
- print 'trying pfx ' + pfx
- if pfx != want['p']:
- if debug:
- print 'want pfx=' + want['p'] + ', not ' + pfx
- continue
- if debug:
- print 'have pfx=' + pfx
- for r in pfxtbl[pfx]:
- if debug:
- print 'trying route'
- nexthops = r['nexthops']
- for nh in nexthops:
- if debug:
- print 'trying nh ' + nh['ip']
- if nh['ip'] == want['n']:
- if debug:
- print 'found ' + want['n']
- return 1
- else:
- if debug:
- print 'want nh=' + want['n'] + ', not ' + nh['ip']
- if debug:
- print 'missing route: pfx=' + want['p'] + ', nh=' + want['n']
- return 0
+ def routes_include_wanted(self, pfxtbl, want, debug):
+ # helper function to RequireVpnRoutes
+ for pfx in pfxtbl.iterkeys():
+ if debug:
+ print "trying pfx " + pfx
+ if pfx != want["p"]:
+ if debug:
+ print "want pfx=" + want["p"] + ", not " + pfx
+ continue
+ if debug:
+ print "have pfx=" + pfx
+ for r in pfxtbl[pfx]:
+ if debug:
+ print "trying route"
+ nexthops = r["nexthops"]
+ for nh in nexthops:
+ if debug:
+ print "trying nh " + nh["ip"]
+ if nh["ip"] == want["n"]:
+ if debug:
+ print "found " + want["n"]
+ return 1
+ else:
+ if debug:
+ print "want nh=" + want["n"] + ", not " + nh["ip"]
+ if debug:
+ print "missing route: pfx=" + want["p"] + ", nh=" + want["n"]
+ return 0
def RequireVpnRoutes(self, target, title, wantroutes, debug=0):
- import json
+ import json
+
logstr = "RequireVpnRoutes " + str(wantroutes)
- #non json form for humans
- luCommand(target,'vtysh -c "show bgp ipv4 vpn"','.','None','Get VPN RIB (non-json)')
- ret = luCommand(target,'vtysh -c "show bgp ipv4 vpn json"','.*','None','Get VPN RIB (json)')
- if re.search(r'^\s*$', ret):
+ # non json form for humans
+ luCommand(
+ target,
+ 'vtysh -c "show bgp ipv4 vpn"',
+ ".",
+ "None",
+ "Get VPN RIB (non-json)",
+ )
+ ret = luCommand(
+ target,
+ 'vtysh -c "show bgp ipv4 vpn json"',
+ ".*",
+ "None",
+ "Get VPN RIB (json)",
+ )
+ if re.search(r"^\s*$", ret):
# degenerate case: empty json means no routes
if len(wantroutes) > 0:
luResult(target, False, title, logstr)
return
luResult(target, True, title, logstr)
- rib = json.loads(ret)
- rds = rib['routes']['routeDistinguishers']
- for want in wantroutes:
- found = 0
- if debug:
- print "want rd " + want['rd']
- for rd in rds.iterkeys():
- if rd != want['rd']:
- continue
- if debug:
- print "found rd " + rd
- table = rds[rd]
- if self.routes_include_wanted(table,want,debug):
- found = 1
- break
- if not found:
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ rib = json.loads(ret)
+ rds = rib["routes"]["routeDistinguishers"]
+ for want in wantroutes:
+ found = 0
+ if debug:
+ print "want rd " + want["rd"]
+ for rd in rds.iterkeys():
+ if rd != want["rd"]:
+ continue
+ if debug:
+ print "found rd " + rd
+ table = rds[rd]
+ if self.routes_include_wanted(table, want, debug):
+ found = 1
+ break
+ if not found:
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
- def RequireUnicastRoutes(self,target,afi,vrf,title,wantroutes,debug=0):
+ def RequireUnicastRoutes(self, target, afi, vrf, title, wantroutes, debug=0):
logstr = "RequireVpnRoutes " + str(wantroutes)
- vrfstr = ''
- if vrf != '':
- vrfstr = 'vrf %s' % (vrf)
+ vrfstr = ""
+ if vrf != "":
+ vrfstr = "vrf %s" % (vrf)
- if (afi != 'ipv4') and (afi != 'ipv6'):
- print "ERROR invalid afi";
+ if (afi != "ipv4") and (afi != "ipv6"):
+ print "ERROR invalid afi"
- cmdstr = 'show bgp %s %s unicast' % (vrfstr, afi)
- #non json form for humans
- cmd = 'vtysh -c "%s"' % cmdstr
- luCommand(target,cmd,'.','None','Get %s %s RIB (non-json)' % (vrfstr, afi))
+ cmdstr = "show bgp %s %s unicast" % (vrfstr, afi)
+ # non json form for humans
+ cmd = 'vtysh -c "%s"' % cmdstr
+ luCommand(target, cmd, ".", "None", "Get %s %s RIB (non-json)" % (vrfstr, afi))
cmd = 'vtysh -c "%s json"' % cmdstr
- ret = luCommand(target,cmd,'.*','None','Get %s %s RIB (json)' % (vrfstr, afi))
- if re.search(r'^\s*$', ret):
+ ret = luCommand(
+ target, cmd, ".*", "None", "Get %s %s RIB (json)" % (vrfstr, afi)
+ )
+ if re.search(r"^\s*$", ret):
# degenerate case: empty json means no routes
if len(wantroutes) > 0:
luResult(target, False, title, logstr)
return
luResult(target, True, title, logstr)
- rib = json.loads(ret)
+ rib = json.loads(ret)
try:
- table = rib['routes']
- # KeyError: 'routes' probably means missing/bad VRF
+ table = rib["routes"]
+ # KeyError: 'routes' probably means missing/bad VRF
except KeyError as err:
- if vrf != '':
- errstr = '-script ERROR: check if wrong vrf (%s)' % (vrf)
+ if vrf != "":
+ errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf)
else:
- errstr = '-script ERROR: check if vrf missing'
- luResult(target, False, title + errstr, logstr)
- return
- for want in wantroutes:
- if not self.routes_include_wanted(table,want,debug):
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ errstr = "-script ERROR: check if vrf missing"
+ luResult(target, False, title + errstr, logstr)
+ return
+ for want in wantroutes:
+ if not self.routes_include_wanted(table, want, debug):
+ luResult(target, False, title, logstr)
+ return
+ luResult(target, True, title, logstr)
-BgpRib=BgpRib()
+BgpRib = BgpRib()
+
def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0):
BgpRib.RequireVpnRoutes(target, title, wantroutes, debug)
+
def bgpribRequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug=0):
BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug)
frrtest_log_file = frrtest_log_dir + logfile_name + str(time_stamp)
print("frrtest_log_file..", frrtest_log_file)
- logger = logger_config.get_logger(name="test_execution_logs",
- log_level=loglevel,
- target=frrtest_log_file)
+ logger = logger_config.get_logger(
+ name="test_execution_logs", log_level=loglevel, target=frrtest_log_file
+ )
print("Logs will be sent to logfile: {}".format(frrtest_log_file))
if config.has_option("topogen", "show_router_config"):
# Saves sequence id numbers
-SEQ_ID = {
- "prefix_lists": {},
- "route_maps": {}
-}
+SEQ_ID = {"prefix_lists": {}, "route_maps": {}}
def get_seq_id(obj_type, router, obj_name):
class InvalidCLIError(Exception):
"""Raise when the CLI command is wrong"""
+
pass
else:
print_data = ret_data
- logger.info('Output for command [ %s] on router %s:\n%s',
- cmd.rstrip("json"), rnode.name, print_data)
+ logger.info(
+ "Output for command [ %s] on router %s:\n%s",
+ cmd.rstrip("json"),
+ rnode.name,
+ print_data,
+ )
return ret_data
else:
- raise InvalidCLIError('No actual cmd passed')
+ raise InvalidCLIError("No actual cmd passed")
-def create_common_configuration(tgen, router, data, config_type=None,
- build=False):
+def create_common_configuration(tgen, router, data, config_type=None, build=False):
"""
API to create object of class FRRConfig and also create frr_json.conf
file. It will create interface and common configurations and save it to
fname = "{}/{}/{}".format(TMPDIR, router, FRRCFG_FILE)
- config_map = OrderedDict({
- "general_config": "! FRR General Config\n",
- "interface_config": "! Interfaces Config\n",
- "static_route": "! Static Route Config\n",
- "prefix_list": "! Prefix List Config\n",
- "bgp_community_list": "! Community List Config\n",
- "route_maps": "! Route Maps Config\n",
- "bgp": "! BGP Config\n"
- })
+ config_map = OrderedDict(
+ {
+ "general_config": "! FRR General Config\n",
+ "interface_config": "! Interfaces Config\n",
+ "static_route": "! Static Route Config\n",
+ "prefix_list": "! Prefix List Config\n",
+ "bgp_community_list": "! Community List Config\n",
+ "route_maps": "! Route Maps Config\n",
+ "bgp": "! BGP Config\n",
+ }
+ )
if build:
mode = "a"
frr_cfg_fd.write("\n")
except IOError as err:
- logger.error("Unable to open FRR Config File. error(%s): %s" %
- (err.errno, err.strerror))
+ logger.error(
+ "Unable to open FRR Config File. error(%s): %s" % (err.errno, err.strerror)
+ )
return False
finally:
frr_cfg_fd.close()
continue
router = router_list[rname]
- logger.info("Configuring router %s to initial test configuration",
- rname)
+ logger.info("Configuring router %s to initial test configuration", rname)
cfg = router.run("vtysh -c 'show running'")
fname = "{}/{}/frr.sav".format(TMPDIR, rname)
dname = "{}/{}/delta.conf".format(TMPDIR, rname)
for line in cfg.split("\n"):
line = line.strip()
- if (line == "Building configuration..." or
- line == "Current configuration:" or
- not line):
+ if (
+ line == "Building configuration..."
+ or line == "Current configuration:"
+ or not line
+ ):
continue
f.write(line)
f.write("\n")
init_cfg_file = "{}/{}/frr_json_initial.conf".format(TMPDIR, rname)
tempdir = mkdtemp()
- with open(os.path.join(tempdir, 'vtysh.conf'), 'w') as fd:
+ with open(os.path.join(tempdir, "vtysh.conf"), "w") as fd:
pass
- command = "/usr/lib/frr/frr-reload.py --confdir {} --input {} --test {} > {}". \
- format(tempdir, run_cfg_file, init_cfg_file, dname)
- result = call(command, shell=True, stderr=SUB_STDOUT,
- stdout=SUB_PIPE)
+ command = "/usr/lib/frr/frr-reload.py --confdir {} --input {} --test {} > {}".format(
+ tempdir, run_cfg_file, init_cfg_file, dname
+ )
+ result = call(command, shell=True, stderr=SUB_STDOUT, stdout=SUB_PIPE)
- os.unlink(os.path.join(tempdir, 'vtysh.conf'))
+ os.unlink(os.path.join(tempdir, "vtysh.conf"))
os.rmdir(tempdir)
# Assert if command fail
if result > 0:
- logger.error("Delta file creation failed. Command executed %s",
- command)
- with open(run_cfg_file, 'r') as fd:
- logger.info('Running configuration saved in %s is:\n%s',
- run_cfg_file, fd.read())
- with open(init_cfg_file, 'r') as fd:
- logger.info('Test configuration saved in %s is:\n%s',
- init_cfg_file, fd.read())
-
- err_cmd = ['/usr/bin/vtysh', '-m', '-f', run_cfg_file]
+ logger.error("Delta file creation failed. Command executed %s", command)
+ with open(run_cfg_file, "r") as fd:
+ logger.info(
+ "Running configuration saved in %s is:\n%s", run_cfg_file, fd.read()
+ )
+ with open(init_cfg_file, "r") as fd:
+ logger.info(
+ "Test configuration saved in %s is:\n%s", init_cfg_file, fd.read()
+ )
+
+ err_cmd = ["/usr/bin/vtysh", "-m", "-f", run_cfg_file]
result = Popen(err_cmd, stdout=SUB_PIPE, stderr=SUB_PIPE)
output = result.communicate()
for out_data in output:
- temp_data = out_data.decode('utf-8').lower()
+ temp_data = out_data.decode("utf-8").lower()
for out_err in ERROR_LIST:
if out_err.lower() in temp_data:
- logger.error("Found errors while validating data in"
- " %s", run_cfg_file)
+ logger.error(
+ "Found errors while validating data in" " %s", run_cfg_file
+ )
raise InvalidCLIError(out_data)
raise InvalidCLIError("Unknown error in %s", output)
t_delta = f.read()
for line in t_delta.split("\n"):
line = line.strip()
- if (line == "Lines To Delete" or
- line == "===============" or
- line == "Lines To Add" or
- line == "============" or
- not line):
+ if (
+ line == "Lines To Delete"
+ or line == "==============="
+ or line == "Lines To Add"
+ or line == "============"
+ or not line
+ ):
continue
delta.write(line)
delta.write("\n")
delta.write("end\n")
- output = router.vtysh_multicmd(delta.getvalue(),
- pretty_output=False)
+ output = router.vtysh_multicmd(delta.getvalue(), pretty_output=False)
delta.close()
delta = StringIO.StringIO()
# Router current configuration to log file or console if
# "show_router_config" is defined in "pytest.ini"
if show_router_config:
- logger.info("Configuration on router {} after config reset:".
- format(rname))
+ logger.info("Configuration on router {} after config reset:".format(rname))
logger.info(delta.getvalue())
delta.close()
router = router_list[rname]
try:
frr_cfg_file = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_FILE)
- frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname,
- FRRCFG_BKUP_FILE)
+ frr_cfg_bkup = "{}/{}/{}".format(TMPDIR, rname, FRRCFG_BKUP_FILE)
with open(frr_cfg_file, "r+") as cfg:
data = cfg.read()
- logger.info("Applying following configuration on router"
- " {}:\n{}".format(rname, data))
+ logger.info(
+ "Applying following configuration on router"
+ " {}:\n{}".format(rname, data)
+ )
if save_bkup:
with open(frr_cfg_bkup, "w") as bkup:
bkup.write(data)
cfg.truncate(0)
except IOError as err:
- errormsg = ("Unable to open config File. error(%s):"
- " %s", (err.errno, err.strerror))
+ errormsg = (
+ "Unable to open config File. error(%s):" " %s",
+ (err.errno, err.strerror),
+ )
return errormsg
# Router current configuration to log file or console if
# Starting deamons
router_list = tgen.routers()
- ROUTER_LIST = sorted(router_list.keys(),
- key=lambda x: int(re_search('\d+', x).group(0)))
+ ROUTER_LIST = sorted(
+ router_list.keys(), key=lambda x: int(re_search("\d+", x).group(0))
+ )
TMPDIR = os.path.join(LOGDIR, tgen.modname)
router_list = tgen.routers()
# Creating router named dir and empty zebra.conf bgpd.conf files
# inside the current directory
- if os.path.isdir('{}'.format(rname)):
+ if os.path.isdir("{}".format(rname)):
os.system("rm -rf {}".format(rname))
- os.mkdir('{}'.format(rname))
- os.system('chmod -R go+rw {}'.format(rname))
- os.chdir('{}/{}'.format(TMPDIR, rname))
- os.system('touch zebra.conf bgpd.conf')
+ os.mkdir("{}".format(rname))
+ os.system("chmod -R go+rw {}".format(rname))
+ os.chdir("{}/{}".format(TMPDIR, rname))
+ os.system("touch zebra.conf bgpd.conf")
else:
- os.mkdir('{}'.format(rname))
- os.system('chmod -R go+rw {}'.format(rname))
- os.chdir('{}/{}'.format(TMPDIR, rname))
- os.system('touch zebra.conf bgpd.conf')
+ os.mkdir("{}".format(rname))
+ os.system("chmod -R go+rw {}".format(rname))
+ os.chdir("{}/{}".format(TMPDIR, rname))
+ os.system("touch zebra.conf bgpd.conf")
except IOError as (errno, strerror):
logger.error("I/O error({0}): {1}".format(errno, strerror))
# Loading empty zebra.conf file to router, to start the zebra deamon
router.load_config(
- TopoRouter.RD_ZEBRA,
- '{}/{}/zebra.conf'.format(TMPDIR, rname)
+ TopoRouter.RD_ZEBRA, "{}/{}/zebra.conf".format(TMPDIR, rname)
)
# Loading empty bgpd.conf file to router, to start the bgp deamon
- router.load_config(
- TopoRouter.RD_BGP,
- '{}/{}/bgpd.conf'.format(TMPDIR, rname)
- )
+ router.load_config(TopoRouter.RD_BGP, "{}/{}/bgpd.conf".format(TMPDIR, rname))
# Starting routers
logger.info("Starting all routers once topology is created")
# Common APIs, will be used by all protocols
#############################################
+
def validate_ip_address(ip_address):
"""
Validates the type of ip address
return "ipv6"
if not v4 and not v6:
- raise Exception("InvalidIpAddr", "%s is neither valid IPv4 or IPv6"
- " address" % ip_address)
+ raise Exception(
+ "InvalidIpAddr", "%s is neither valid IPv4 or IPv6" " address" % ip_address
+ )
def check_address_types(addr_type=None):
return addr_types
if addr_type not in addr_types:
- logger.error("{} not in supported/configured address types {}".
- format(addr_type, addr_types))
+ logger.error(
+ "{} not in supported/configured address types {}".format(
+ addr_type, addr_types
+ )
+ )
return False
return True
return ipaddress_list
-def find_interface_with_greater_ip(topo, router, loopback=True,
- interface=True):
+def find_interface_with_greater_ip(topo, router, loopback=True, interface=True):
"""
Returns highest interface ip for ipv4/ipv6. If loopback is there then
it will return highest IP from loopback IPs otherwise from physical
if loopback:
if "type" in data and data["type"] == "loopback":
lo_exists = True
- ip_address = topo["routers"][router]["links"][
- destRouterLink]["ipv4"].split("/")[0]
+ ip_address = topo["routers"][router]["links"][destRouterLink][
+ "ipv4"
+ ].split("/")[0]
lo_list.append(ip_address)
if interface:
- ip_address = topo["routers"][router]["links"][
- destRouterLink]["ipv4"].split("/")[0]
+ ip_address = topo["routers"][router]["links"][destRouterLink]["ipv4"].split(
+ "/"
+ )[0]
interfaces_list.append(ip_address)
if lo_exists:
def write_test_header(tc_name):
""" Display message at beginning of test case"""
count = 20
- logger.info("*"*(len(tc_name)+count))
+ logger.info("*" * (len(tc_name) + count))
step("START -> Testcase : %s" % tc_name, reset=True)
- logger.info("*"*(len(tc_name)+count))
+ logger.info("*" * (len(tc_name) + count))
def write_test_footer(tc_name):
""" Display message at end of test case"""
count = 21
- logger.info("="*(len(tc_name)+count))
+ logger.info("=" * (len(tc_name) + count))
logger.info("Testcase : %s -> PASSED", tc_name)
- logger.info("="*(len(tc_name)+count))
+ logger.info("=" * (len(tc_name) + count))
def interface_status(tgen, topo, input_dict):
global frr_cfg
for router in input_dict.keys():
- interface_list = input_dict[router]['interface_list']
- status = input_dict[router].setdefault('status', 'up')
+ interface_list = input_dict[router]["interface_list"]
+ status = input_dict[router].setdefault("status", "up")
for intf in interface_list:
rnode = tgen.routers()[router]
interface_set_status(rnode, intf, status)
"""
def _retry(func):
-
@wraps(func)
def func_retry(*args, **kwargs):
- _wait = kwargs.pop('wait', wait)
- _attempts = kwargs.pop('attempts', attempts)
+ _wait = kwargs.pop("wait", wait)
+ _attempts = kwargs.pop("attempts", attempts)
_attempts = int(_attempts)
if _attempts < 0:
raise ValueError("attempts must be 0 or greater")
logger.info("Waiting for [%s]s as initial delay", initial_wait)
sleep(initial_wait)
- _return_is_str = kwargs.pop('return_is_str', return_is_str)
+ _return_is_str = kwargs.pop("return_is_str", return_is_str)
for i in range(1, _attempts + 1):
try:
- _expected = kwargs.setdefault('expected', True)
- kwargs.pop('expected')
+ _expected = kwargs.setdefault("expected", True)
+ kwargs.pop("expected")
ret = func(*args, **kwargs)
logger.debug("Function returned %s" % ret)
if return_is_str and isinstance(ret, bool) and _expected:
return ret
except Exception as err:
if _attempts == i:
- logger.info("Max number of attempts (%r) reached",
- _attempts)
+ logger.info("Max number of attempts (%r) reached", _attempts)
raise
else:
logger.info("Function returned %s", err)
if i < _attempts:
- logger.info("Retry [#%r] after sleeping for %ss"
- % (i, _wait))
+ logger.info("Retry [#%r] after sleeping for %ss" % (i, _wait))
sleep(_wait)
+
func_retry._original = func
return func_retry
+
return _retry
"""
Prints step number for the test case step being executed
"""
+
count = 1
def __call__(self, msg, reset):
interface_name = destRouterLink
else:
interface_name = data["interface"]
- interface_data.append("interface {}".format(
- str(interface_name)
- ))
+ interface_data.append("interface {}".format(str(interface_name)))
if "ipv4" in data:
intf_addr = c_data["links"][destRouterLink]["ipv4"]
- interface_data.append("ip address {}".format(
- intf_addr
- ))
+ interface_data.append("ip address {}".format(intf_addr))
if "ipv6" in data:
intf_addr = c_data["links"][destRouterLink]["ipv6"]
- interface_data.append("ipv6 address {}".format(
- intf_addr
- ))
-
- result = create_common_configuration(tgen, c_router,
- interface_data,
- "interface_config",
- build=build)
+ interface_data.append("ipv6 address {}".format(intf_addr))
+
+ result = create_common_configuration(
+ tgen, c_router, interface_data, "interface_config", build=build
+ )
except InvalidCLIError:
# Traceback
errormsg = traceback.format_exc()
del_action = static_route.setdefault("delete", False)
# No of IPs
no_of_ip = static_route.setdefault("no_of_ip", 1)
- admin_distance = static_route.setdefault("admin_distance",
- None)
+ admin_distance = static_route.setdefault("admin_distance", None)
tag = static_route.setdefault("tag", None)
- if "next_hop" not in static_route or \
- "network" not in static_route:
- errormsg = "'next_hop' or 'network' missing in" \
- " input_dict"
+ if "next_hop" not in static_route or "network" not in static_route:
+ errormsg = "'next_hop' or 'network' missing in" " input_dict"
return errormsg
next_hop = static_route["next_hop"]
static_routes_list.append(cmd)
- result = create_common_configuration(tgen, router,
- static_routes_list,
- "static_route",
- build=build)
+ result = create_common_configuration(
+ tgen, router, static_routes_list, "static_route", build=build
+ )
except InvalidCLIError:
# Traceback
for prefix_name, prefix_list in prefix_data.iteritems():
for prefix_dict in prefix_list:
- if "action" not in prefix_dict or \
- "network" not in prefix_dict:
- errormsg = "'action' or network' missing in" \
- " input_dict"
+ if "action" not in prefix_dict or "network" not in prefix_dict:
+ errormsg = "'action' or network' missing in" " input_dict"
return errormsg
network_addr = prefix_dict["network"]
seqid = prefix_dict.setdefault("seqid", None)
del_action = prefix_dict.setdefault("delete", False)
if seqid is None:
- seqid = get_seq_id("prefix_lists", router,
- prefix_name)
+ seqid = get_seq_id("prefix_lists", router, prefix_name)
else:
- set_seq_id("prefix_lists", router, seqid,
- prefix_name)
+ set_seq_id("prefix_lists", router, seqid, prefix_name)
if addr_type == "ipv4":
protocol = "ip"
cmd = "no {}".format(cmd)
config_data.append(cmd)
- result = create_common_configuration(tgen, router,
- config_data,
- "prefix_list",
- build=build)
+ result = create_common_configuration(
+ tgen, router, config_data, "prefix_list", build=build
+ )
except InvalidCLIError:
# Traceback
logger.debug("route_maps not present in input_dict")
continue
rmap_data = []
- for rmap_name, rmap_value in \
- input_dict[router]["route_maps"].iteritems():
+ for rmap_name, rmap_value in input_dict[router]["route_maps"].iteritems():
for rmap_dict in rmap_value:
del_action = rmap_dict.setdefault("delete", False)
else:
set_seq_id("route_maps", router, seq_id, rmap_name)
- rmap_data.append("route-map {} {} {}".format(
- rmap_name, rmap_action, seq_id
- ))
+ rmap_data.append(
+ "route-map {} {} {}".format(rmap_name, rmap_action, seq_id)
+ )
if "continue" in rmap_dict:
continue_to = rmap_dict["continue"]
if continue_to:
- rmap_data.append("on-match goto {}".
- format(continue_to))
+ rmap_data.append("on-match goto {}".format(continue_to))
else:
- logger.error("In continue, 'route-map entry "
- "sequence number' is not provided")
+ logger.error(
+ "In continue, 'route-map entry "
+ "sequence number' is not provided"
+ )
return False
if "goto" in rmap_dict:
go_to = rmap_dict["goto"]
if go_to:
- rmap_data.append("on-match goto {}".
- format(go_to))
+ rmap_data.append("on-match goto {}".format(go_to))
else:
- logger.error("In goto, 'Goto Clause number' is not"
- " provided")
+ logger.error(
+ "In goto, 'Goto Clause number' is not" " provided"
+ )
return False
if "call" in rmap_dict:
call_rmap = rmap_dict["call"]
if call_rmap:
- rmap_data.append("call {}".
- format(call_rmap))
+ rmap_data.append("call {}".format(call_rmap))
else:
- logger.error("In call, 'destination Route-Map' is"
- " not provided")
+ logger.error(
+ "In call, 'destination Route-Map' is" " not provided"
+ )
return False
# Verifying if SET criteria is defined
set_data = rmap_dict["set"]
ipv4_data = set_data.setdefault("ipv4", {})
ipv6_data = set_data.setdefault("ipv6", {})
- local_preference = set_data.setdefault("locPrf",
- None)
+ local_preference = set_data.setdefault("locPrf", None)
metric = set_data.setdefault("metric", None)
as_path = set_data.setdefault("path", {})
weight = set_data.setdefault("weight", None)
community = set_data.setdefault("community", {})
- large_community = set_data.setdefault(
- "large_community", {})
- large_comm_list = set_data.setdefault(
- "large_comm_list", {})
+ large_community = set_data.setdefault("large_community", {})
+ large_comm_list = set_data.setdefault("large_comm_list", {})
set_action = set_data.setdefault("set_action", None)
nexthop = set_data.setdefault("nexthop", None)
origin = set_data.setdefault("origin", None)
# Local Preference
if local_preference:
- rmap_data.append("set local-preference {}".
- format(local_preference))
+ rmap_data.append(
+ "set local-preference {}".format(local_preference)
+ )
# Metric
if metric:
as_num = as_path.setdefault("as_num", None)
as_action = as_path.setdefault("as_action", None)
if as_action and as_num:
- rmap_data.append("set as-path {} {}".
- format(as_action, as_num))
+ rmap_data.append(
+ "set as-path {} {}".format(as_action, as_num)
+ )
# Community
if community:
cmd = "{} {}".format(cmd, comm_action)
rmap_data.append(cmd)
else:
- logger.error("In community, AS Num not"
- " provided")
+ logger.error("In community, AS Num not" " provided")
return False
if large_community:
num = large_community.setdefault("num", None)
- comm_action = large_community.setdefault("action",
- None)
+ comm_action = large_community.setdefault("action", None)
if num:
cmd = "set large-community {}".format(num)
if comm_action:
rmap_data.append(cmd)
else:
- logger.error("In large_community, AS Num not"
- " provided")
+ logger.error(
+ "In large_community, AS Num not" " provided"
+ )
return False
if large_comm_list:
id = large_comm_list.setdefault("id", None)
- del_comm = large_comm_list.setdefault("delete",
- None)
+ del_comm = large_comm_list.setdefault("delete", None)
if id:
cmd = "set large-comm-list {}".format(id)
if del_comm:
rmap_data.append(cmd)
else:
- logger.error("In large_comm_list 'id' not"
- " provided")
+ logger.error("In large_comm_list 'id' not" " provided")
return False
# Weight
if weight:
- rmap_data.append("set weight {}".format(
- weight))
+ rmap_data.append("set weight {}".format(weight))
if ipv6_data:
nexthop = ipv6_data.setdefault("nexthop", None)
if nexthop:
- rmap_data.append("set ipv6 next-hop {}".format(
- nexthop
- ))
+ rmap_data.append("set ipv6 next-hop {}".format(nexthop))
# Adding MATCH and SET sequence to RMAP if defined
if "match" in rmap_dict:
match_data = rmap_dict["match"]
ipv4_data = match_data.setdefault("ipv4", {})
ipv6_data = match_data.setdefault("ipv6", {})
- community = match_data.setdefault(
- "community_list",{})
- large_community = match_data.setdefault(
- "large_community", {}
- )
+ community = match_data.setdefault("community_list", {})
+ large_community = match_data.setdefault("large_community", {})
large_community_list = match_data.setdefault(
"large_community_list", {}
)
if ipv4_data:
# fetch prefix list data from rmap
- prefix_name = \
- ipv4_data.setdefault("prefix_lists",
- None)
+ prefix_name = ipv4_data.setdefault("prefix_lists", None)
if prefix_name:
- rmap_data.append("match ip address"
- " prefix-list {}".format(prefix_name))
+ rmap_data.append(
+ "match ip address"
+ " prefix-list {}".format(prefix_name)
+ )
# fetch tag data from rmap
tag = ipv4_data.setdefault("tag", None)
# fetch large community data from rmap
large_community_list = ipv4_data.setdefault(
- "large_community_list",{})
+ "large_community_list", {}
+ )
large_community = match_data.setdefault(
- "large_community", {})
+ "large_community", {}
+ )
if ipv6_data:
- prefix_name = ipv6_data.setdefault("prefix_lists",
- None)
+ prefix_name = ipv6_data.setdefault("prefix_lists", None)
if prefix_name:
- rmap_data.append("match ipv6 address"
- " prefix-list {}".format(prefix_name))
+ rmap_data.append(
+ "match ipv6 address"
+ " prefix-list {}".format(prefix_name)
+ )
# fetch tag data from rmap
tag = ipv6_data.setdefault("tag", None)
# fetch large community data from rmap
large_community_list = ipv6_data.setdefault(
- "large_community_list",{})
+ "large_community_list", {}
+ )
large_community = match_data.setdefault(
- "large_community", {})
+ "large_community", {}
+ )
if community:
if "id" not in community:
- logger.error("'id' is mandatory for "
- "community-list in match"
- " criteria")
+ logger.error(
+ "'id' is mandatory for "
+ "community-list in match"
+ " criteria"
+ )
return False
cmd = "match community {}".format(community["id"])
- exact_match = community.setdefault("exact_match",
- False)
+ exact_match = community.setdefault("exact_match", False)
if exact_match:
cmd = "{} exact-match".format(cmd)
rmap_data.append(cmd)
if large_community:
if "id" not in large_community:
- logger.error("'id' is mandatory for "
- "large-community-list in match "
- "criteria")
+ logger.error(
+ "'id' is mandatory for "
+ "large-community-list in match "
+ "criteria"
+ )
return False
cmd = "match large-community {}".format(
- large_community["id"])
+ large_community["id"]
+ )
exact_match = large_community.setdefault(
- "exact_match", False)
+ "exact_match", False
+ )
if exact_match:
cmd = "{} exact-match".format(cmd)
rmap_data.append(cmd)
if large_community_list:
if "id" not in large_community_list:
- logger.error("'id' is mandatory for "
- "large-community-list in match "
- "criteria")
+ logger.error(
+ "'id' is mandatory for "
+ "large-community-list in match "
+ "criteria"
+ )
return False
cmd = "match large-community {}".format(
- large_community_list["id"])
+ large_community_list["id"]
+ )
exact_match = large_community_list.setdefault(
- "exact_match", False)
+ "exact_match", False
+ )
if exact_match:
cmd = "{} exact-match".format(cmd)
rmap_data.append(cmd)
- result = create_common_configuration(tgen, router,
- rmap_data,
- "route_maps",
- build=build)
+ result = create_common_configuration(
+ tgen, router, rmap_data, "route_maps", build=build
+ )
except InvalidCLIError:
# Traceback
rmap_data = input_dict[router]
rmap_data["route_maps"] = {}
for route_map_name in route_maps:
- rmap_data["route_maps"].update({
- route_map_name:
- [{
- "delete": True
- }]
- })
+ rmap_data["route_maps"].update({route_map_name: [{"delete": True}]})
return create_route_maps(tgen, input_dict)
community_list = input_dict[router]["bgp_community_lists"]
for community_dict in community_list:
del_action = community_dict.setdefault("delete", False)
- community_type = community_dict.setdefault("community_type",
- None)
+ community_type = community_dict.setdefault("community_type", None)
action = community_dict.setdefault("action", None)
- value = community_dict.setdefault("value", '')
+ value = community_dict.setdefault("value", "")
large = community_dict.setdefault("large", None)
name = community_dict.setdefault("name", None)
if large:
cmd = "bgp community-list"
if not large and not (community_type and action and value):
- errormsg = "community_type, action and value are " \
- "required in bgp_community_list"
+ errormsg = (
+ "community_type, action and value are "
+ "required in bgp_community_list"
+ )
logger.error(errormsg)
return False
try:
community_type = int(community_type)
- cmd = "{} {} {} {}".format(cmd, community_type, action,
- value)
+ cmd = "{} {} {} {}".format(cmd, community_type, action, value)
except ValueError:
cmd = "{} {} {} {} {}".format(
- cmd, community_type, name, action, value)
+ cmd, community_type, name, action, value
+ )
if del_action:
cmd = "no {}".format(cmd)
config_data.append(cmd)
- result = create_common_configuration(tgen, router, config_data,
- "bgp_community_list",
- build=build)
+ result = create_common_configuration(
+ tgen, router, config_data, "bgp_community_list", build=build
+ )
except InvalidCLIError:
# Traceback
# Verifying output dictionary rib_routes_json is not empty
if bool(rib_routes_json) is False:
- errormsg = "No {} route found in rib of router {}..". \
- format(protocol, router)
+ errormsg = "No {} route found in rib of router {}..".format(
+ protocol, router
+ )
return errormsg
if "static_routes" in input_dict[routerInput]:
if type(next_hop) is not list:
next_hop = [next_hop]
- found_hops = [rib_r["ip"] for rib_r in
- rib_routes_json[st_rt][0][
- "nexthops"]]
+ found_hops = [
+ rib_r["ip"]
+ for rib_r in rib_routes_json[st_rt][0]["nexthops"]
+ ]
for nh in found_hops:
nh_found = False
if nh and nh in next_hop:
nh_found = True
else:
- errormsg = ("Nexthop {} is Missing for {}"
- " route {} in RIB of router"
- " {}\n".format(next_hop,
- protocol,
- st_rt, dut))
+ errormsg = (
+ "Nexthop {} is Missing for {}"
+ " route {} in RIB of router"
+ " {}\n".format(
+ next_hop, protocol, st_rt, dut
+ )
+ )
return errormsg
else:
missing_routes.append(st_rt)
if nh_found:
- logger.info("Found next_hop %s for all routes in RIB of"
- " router %s\n", next_hop, dut)
+ logger.info(
+ "Found next_hop %s for all routes in RIB of" " router %s\n",
+ next_hop,
+ dut,
+ )
if not st_found and len(missing_routes) > 0:
- errormsg = "Missing route in RIB of router {}, routes: " \
- "{}\n".format(dut, missing_routes)
+ errormsg = (
+ "Missing route in RIB of router {}, routes: "
+ "{}\n".format(dut, missing_routes)
+ )
return errormsg
- logger.info("Verified routes in router %s RIB, found routes"
- " are: %s\n", dut, found_routes)
+ logger.info(
+ "Verified routes in router %s RIB, found routes" " are: %s\n",
+ dut,
+ found_routes,
+ )
continue
if "bgp" in input_dict[routerInput]:
- if 'advertise_networks' in input_dict[routerInput]["bgp"]\
- ["address_family"][addr_type]["unicast"]:
+ if (
+ "advertise_networks"
+ in input_dict[routerInput]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]
+ ):
found_routes = []
missing_routes = []
- advertise_network = input_dict[routerInput]["bgp"]\
- ["address_family"][addr_type]["unicast"]\
- ["advertise_networks"]
+ advertise_network = input_dict[routerInput]["bgp"][
+ "address_family"
+ ][addr_type]["unicast"]["advertise_networks"]
for advertise_network_dict in advertise_network:
start_ip = advertise_network_dict["network"]
next_hop = [next_hop]
for index, nh in enumerate(next_hop):
- if rib_routes_json[st_rt][0]\
- ['nexthops'][index]['ip'] == nh:
+ if (
+ rib_routes_json[st_rt][0]["nexthops"][
+ index
+ ]["ip"]
+ == nh
+ ):
nh_found = True
else:
- errormsg=("Nexthop {} is Missing"
- " for {} route {} in "
- "RIB of router {}\n".\
- format(next_hop,
- protocol,
- st_rt, dut))
+ errormsg = (
+ "Nexthop {} is Missing"
+ " for {} route {} in "
+ "RIB of router {}\n".format(
+ next_hop, protocol, st_rt, dut
+ )
+ )
return errormsg
else:
missing_routes.append(st_rt)
if nh_found:
- logger.info("Found next_hop {} for all routes in RIB"
- " of router {}\n".format(next_hop, dut))
+ logger.info(
+ "Found next_hop {} for all routes in RIB"
+ " of router {}\n".format(next_hop, dut)
+ )
if not found and len(missing_routes) > 0:
- errormsg = ("Missing {} route in RIB of router {}, "
- "routes: {} \n".\
- format(addr_type, dut, missing_routes))
+ errormsg = (
+ "Missing {} route in RIB of router {}, "
+ "routes: {} \n".format(addr_type, dut, missing_routes)
+ )
return errormsg
- logger.info("Verified {} routes in router {} RIB, found"
- " routes are: {}\n".\
- format(addr_type, dut, found_routes))
+ logger.info(
+ "Verified {} routes in router {} RIB, found"
+ " routes are: {}\n".format(addr_type, dut, found_routes)
+ )
logger.debug("Exiting lib API: verify_rib()")
return True
command = "show ipv6 route json"
show_ip_route_json = run_frr_cmd(rnode, command, isjson=True)
- logger.info("Verifying admin distance for static route %s"
- " under dut %s:", static_route, router)
+ logger.info(
+ "Verifying admin distance for static route %s" " under dut %s:",
+ static_route,
+ router,
+ )
network = static_route["network"]
next_hop = static_route["next_hop"]
admin_distance = static_route["admin_distance"]
if network in show_ip_route_json:
if route_data["nexthops"][0]["ip"] == next_hop:
if route_data["distance"] != admin_distance:
- errormsg = ("Verification failed: admin distance"
- " for static route {} under dut {},"
- " found:{} but expected:{}".
- format(static_route, router,
- route_data["distance"],
- admin_distance))
+ errormsg = (
+ "Verification failed: admin distance"
+ " for static route {} under dut {},"
+ " found:{} but expected:{}".format(
+ static_route,
+ router,
+ route_data["distance"],
+ admin_distance,
+ )
+ )
return errormsg
else:
- logger.info("Verification successful: admin"
- " distance for static route %s under"
- " dut %s, found:%s", static_route,
- router, route_data["distance"])
+ logger.info(
+ "Verification successful: admin"
+ " distance for static route %s under"
+ " dut %s, found:%s",
+ static_route,
+ router,
+ route_data["distance"],
+ )
else:
- errormsg = ("Static route {} not found in "
- "show_ip_route_json for dut {}".
- format(network, router))
+ errormsg = (
+ "Static route {} not found in "
+ "show_ip_route_json for dut {}".format(network, router)
+ )
return errormsg
logger.debug("Exiting lib API: verify_admin_distance_for_static_routes()")
for prefix_list in prefix_lists_addr[addr_type].keys():
if prefix_list in show_prefix_list:
- errormsg = ("Prefix list {} is/are present in the router"
- " {}".format(prefix_list, router))
+ errormsg = (
+ "Prefix list {} is/are present in the router"
+ " {}".format(prefix_list, router)
+ )
return errormsg
- logger.info("Prefix list %s is/are not present in the router"
- " from router %s", prefix_list, router)
+ logger.info(
+ "Prefix list %s is/are not present in the router" " from router %s",
+ prefix_list,
+ router,
+ )
logger.debug("Exiting lib API: verify_prefix_lists()")
return True
route_maps = input_dict[router]["route_maps"]
for route_map in route_maps:
if route_map in show_route_maps:
- errormsg = ("Route map {} is not deleted from router"
- " {}".format(route_map, router))
+ errormsg = "Route map {} is not deleted from router" " {}".format(
+ route_map, router
+ )
return errormsg
- logger.info("Route map %s is/are deleted successfully from"
- " router %s", route_maps, router)
+ logger.info(
+ "Route map %s is/are deleted successfully from" " router %s",
+ route_maps,
+ router,
+ )
logger.debug("Exiting lib API: verify_route_maps()")
return True
rnode = tgen.routers()[router]
- logger.debug("Verifying BGP community attributes on dut %s: for %s "
- "network %s", router, addr_type, network)
+ logger.debug(
+ "Verifying BGP community attributes on dut %s: for %s " "network %s",
+ router,
+ addr_type,
+ network,
+ )
for net in network:
cmd = "show bgp {} {} json".format(addr_type, net)
show_bgp_json = rnode.vtysh_cmd(cmd, isjson=True)
logger.info(show_bgp_json)
if "paths" not in show_bgp_json:
- return "Prefix {} not found in BGP table of router: {}". \
- format(net, router)
+ return "Prefix {} not found in BGP table of router: {}".format(net, router)
as_paths = show_bgp_json["paths"]
found = False
for i in range(len(as_paths)):
- if "largeCommunity" in show_bgp_json["paths"][i] or \
- "community" in show_bgp_json["paths"][i]:
+ if (
+ "largeCommunity" in show_bgp_json["paths"][i]
+ or "community" in show_bgp_json["paths"][i]
+ ):
found = True
- logger.info("Large Community attribute is found for route:"
- " %s in router: %s", net, router)
+ logger.info(
+ "Large Community attribute is found for route:" " %s in router: %s",
+ net,
+ router,
+ )
if input_dict is not None:
for criteria, comm_val in input_dict.items():
- show_val = show_bgp_json["paths"][i][criteria][
- "string"]
+ show_val = show_bgp_json["paths"][i][criteria]["string"]
if comm_val == show_val:
- logger.info("Verifying BGP %s for prefix: %s"
- " in router: %s, found expected"
- " value: %s", criteria, net, router,
- comm_val)
+ logger.info(
+ "Verifying BGP %s for prefix: %s"
+ " in router: %s, found expected"
+ " value: %s",
+ criteria,
+ net,
+ router,
+ comm_val,
+ )
else:
- errormsg = "Failed: Verifying BGP attribute" \
- " {} for route: {} in router: {}" \
- ", expected value: {} but found" \
- ": {}".format(
- criteria, net, router, comm_val,
- show_val)
+ errormsg = (
+ "Failed: Verifying BGP attribute"
+ " {} for route: {} in router: {}"
+ ", expected value: {} but found"
+ ": {}".format(criteria, net, router, comm_val, show_val)
+ )
return errormsg
if not found:
errormsg = (
"Large Community attribute is not found for route: "
- "{} in router: {} ".format(net, router))
+ "{} in router: {} ".format(net, router)
+ )
return errormsg
logger.debug("Exiting lib API: verify_bgp_community()")
rnode = tgen.routers()[router]
- logger.info("Verifying large-community is created for dut %s:",
- router)
+ logger.info("Verifying large-community is created for dut %s:", router)
for comm_data in input_dict[router]["bgp_community_lists"]:
comm_name = comm_data["name"]
comm_type = comm_data["community_type"]
- show_bgp_community = \
- run_frr_cmd(rnode,
- "show bgp large-community-list {} detail".
- format(comm_name))
+ show_bgp_community = run_frr_cmd(
+ rnode, "show bgp large-community-list {} detail".format(comm_name)
+ )
# Verify community list and type
- if comm_name in show_bgp_community and comm_type in \
- show_bgp_community:
- logger.info("BGP %s large-community-list %s is"
- " created", comm_type, comm_name)
+ if comm_name in show_bgp_community and comm_type in show_bgp_community:
+ logger.info(
+ "BGP %s large-community-list %s is" " created", comm_type, comm_name
+ )
else:
- errormsg = "BGP {} large-community-list {} is not" \
- " created".format(comm_type, comm_name)
+ errormsg = "BGP {} large-community-list {} is not" " created".format(
+ comm_type, comm_name
+ )
return errormsg
logger.debug("Exiting lib API: verify_create_community_list()")
# Save the Current Working Directory to find lib files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
from lib.topotest import json_cmp
+
def test_json_intersect_true():
"Test simple correct JSON intersections"
dcomplete = {
- 'i1': 'item1',
- 'i2': 'item2',
- 'i3': 'item3',
- 'i100': 'item4',
+ "i1": "item1",
+ "i2": "item2",
+ "i3": "item3",
+ "i100": "item4",
}
dsub1 = {
- 'i1': 'item1',
- 'i3': 'item3',
+ "i1": "item1",
+ "i3": "item3",
}
dsub2 = {
- 'i1': 'item1',
- 'i2': 'item2',
+ "i1": "item1",
+ "i2": "item2",
}
dsub3 = {
- 'i100': 'item4',
- 'i2': 'item2',
+ "i100": "item4",
+ "i2": "item2",
}
dsub4 = {
- 'i50': None,
- 'i100': 'item4',
+ "i50": None,
+ "i100": "item4",
}
assert json_cmp(dcomplete, dsub1) is None
assert json_cmp(dcomplete, dsub3) is None
assert json_cmp(dcomplete, dsub4) is None
+
def test_json_intersect_false():
"Test simple incorrect JSON intersections"
dcomplete = {
- 'i1': 'item1',
- 'i2': 'item2',
- 'i3': 'item3',
- 'i100': 'item4',
+ "i1": "item1",
+ "i2": "item2",
+ "i3": "item3",
+ "i100": "item4",
}
# Incorrect value for 'i1'
dsub1 = {
- 'i1': 'item3',
- 'i3': 'item3',
+ "i1": "item3",
+ "i3": "item3",
}
# Non-existing key 'i5'
dsub2 = {
- 'i1': 'item1',
- 'i5': 'item2',
+ "i1": "item1",
+ "i5": "item2",
}
# Key should not exist
dsub3 = {
- 'i100': None,
+ "i100": None,
}
assert json_cmp(dcomplete, dsub1) is not None
assert json_cmp(dcomplete, dsub2) is not None
assert json_cmp(dcomplete, dsub3) is not None
+
def test_json_intersect_multilevel_true():
"Test multi level correct JSON intersections"
dcomplete = {
- 'i1': 'item1',
- 'i2': 'item2',
- 'i3': {
- 'i100': 'item100',
+ "i1": "item1",
+ "i2": "item2",
+ "i3": {"i100": "item100",},
+ "i4": {
+ "i41": {"i411": "item411",},
+ "i42": {"i421": "item421", "i422": "item422",},
},
- 'i4': {
- 'i41': {
- 'i411': 'item411',
- },
- 'i42': {
- 'i421': 'item421',
- 'i422': 'item422',
- }
- }
}
dsub1 = {
- 'i1': 'item1',
- 'i3': {
- 'i100': 'item100',
- },
- 'i10': None,
+ "i1": "item1",
+ "i3": {"i100": "item100",},
+ "i10": None,
}
dsub2 = {
- 'i1': 'item1',
- 'i2': 'item2',
- 'i3': {},
+ "i1": "item1",
+ "i2": "item2",
+ "i3": {},
}
dsub3 = {
- 'i2': 'item2',
- 'i4': {
- 'i41': {
- 'i411': 'item411',
- },
- 'i42': {
- 'i422': 'item422',
- 'i450': None,
- }
- }
- }
- dsub4 = {
- 'i2': 'item2',
- 'i4': {
- 'i41': {},
- 'i42': {
- 'i450': None,
- }
- }
- }
- dsub5 = {
- 'i2': 'item2',
- 'i3': {
- 'i100': 'item100',
- },
- 'i4': {
- 'i42': {
- 'i450': None,
- }
- }
+ "i2": "item2",
+ "i4": {"i41": {"i411": "item411",}, "i42": {"i422": "item422", "i450": None,}},
}
+ dsub4 = {"i2": "item2", "i4": {"i41": {}, "i42": {"i450": None,}}}
+ dsub5 = {"i2": "item2", "i3": {"i100": "item100",}, "i4": {"i42": {"i450": None,}}}
assert json_cmp(dcomplete, dsub1) is None
assert json_cmp(dcomplete, dsub2) is None
assert json_cmp(dcomplete, dsub4) is None
assert json_cmp(dcomplete, dsub5) is None
+
def test_json_intersect_multilevel_false():
"Test multi level incorrect JSON intersections"
dcomplete = {
- 'i1': 'item1',
- 'i2': 'item2',
- 'i3': {
- 'i100': 'item100',
+ "i1": "item1",
+ "i2": "item2",
+ "i3": {"i100": "item100",},
+ "i4": {
+ "i41": {"i411": "item411",},
+ "i42": {"i421": "item421", "i422": "item422",},
},
- 'i4': {
- 'i41': {
- 'i411': 'item411',
- },
- 'i42': {
- 'i421': 'item421',
- 'i422': 'item422',
- }
- }
}
# Incorrect sub-level value
dsub1 = {
- 'i1': 'item1',
- 'i3': {
- 'i100': 'item00',
- },
- 'i10': None,
+ "i1": "item1",
+ "i3": {"i100": "item00",},
+ "i10": None,
}
# Inexistent sub-level
dsub2 = {
- 'i1': 'item1',
- 'i2': 'item2',
- 'i3': None,
+ "i1": "item1",
+ "i2": "item2",
+ "i3": None,
}
# Inexistent sub-level value
dsub3 = {
- 'i1': 'item1',
- 'i3': {
- 'i100': None,
- },
+ "i1": "item1",
+ "i3": {"i100": None,},
}
# Inexistent sub-sub-level value
- dsub4 = {
- 'i4': {
- 'i41': {
- 'i412': 'item412',
- },
- 'i42': {
- 'i421': 'item421',
- }
- }
- }
+ dsub4 = {"i4": {"i41": {"i412": "item412",}, "i42": {"i421": "item421",}}}
# Invalid sub-sub-level value
- dsub5 = {
- 'i4': {
- 'i41': {
- 'i411': 'item411',
- },
- 'i42': {
- 'i421': 'item420000',
- }
- }
- }
+ dsub5 = {"i4": {"i41": {"i411": "item411",}, "i42": {"i421": "item420000",}}}
# sub-sub-level should be value
- dsub6 = {
- 'i4': {
- 'i41': {
- 'i411': 'item411',
- },
- 'i42': 'foobar',
- }
- }
+ dsub6 = {"i4": {"i41": {"i411": "item411",}, "i42": "foobar",}}
assert json_cmp(dcomplete, dsub1) is not None
assert json_cmp(dcomplete, dsub2) is not None
assert json_cmp(dcomplete, dsub5) is not None
assert json_cmp(dcomplete, dsub6) is not None
+
def test_json_with_list_sucess():
"Test successful json comparisons that have lists."
dcomplete = {
- 'list': [
- {
- 'i1': 'item 1',
- 'i2': 'item 2',
- },
- {
- 'i10': 'item 10',
- },
- ],
- 'i100': 'item 100',
+ "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},],
+ "i100": "item 100",
}
# Test list type
dsub1 = {
- 'list': [],
+ "list": [],
}
# Test list correct list items
dsub2 = {
- 'list': [
- {
- 'i1': 'item 1',
- },
- ],
- 'i100': 'item 100',
+ "list": [{"i1": "item 1",},],
+ "i100": "item 100",
}
# Test list correct list size
dsub3 = {
- 'list': [
- {}, {},
- ],
+ "list": [{}, {},],
}
assert json_cmp(dcomplete, dsub1) is None
assert json_cmp(dcomplete, dsub2) is None
assert json_cmp(dcomplete, dsub3) is None
+
def test_json_with_list_failure():
"Test failed json comparisons that have lists."
dcomplete = {
- 'list': [
- {
- 'i1': 'item 1',
- 'i2': 'item 2',
- },
- {
- 'i10': 'item 10',
- },
- ],
- 'i100': 'item 100',
+ "list": [{"i1": "item 1", "i2": "item 2",}, {"i10": "item 10",},],
+ "i100": "item 100",
}
# Test list type
dsub1 = {
- 'list': {},
+ "list": {},
}
# Test list incorrect list items
dsub2 = {
- 'list': [
- {
- 'i1': 'item 2',
- },
- ],
- 'i100': 'item 100',
+ "list": [{"i1": "item 2",},],
+ "i100": "item 100",
}
# Test list correct list size
dsub3 = {
- 'list': [
- {}, {}, {},
- ],
+ "list": [{}, {}, {},],
}
assert json_cmp(dcomplete, dsub1) is not None
"Test JSON encoded data that starts with a list that should succeed."
dcomplete = [
- {
- "id": 100,
- "value": "abc",
- },
- {
- "id": 200,
- "value": "abcd",
- },
- {
- "id": 300,
- "value": "abcde",
- },
+ {"id": 100, "value": "abc",},
+ {"id": 200, "value": "abcd",},
+ {"id": 300, "value": "abcde",},
]
- dsub1 = [
- {
- "id": 100,
- "value": "abc",
- }
- ]
+ dsub1 = [{"id": 100, "value": "abc",}]
- dsub2 = [
- {
- "id": 100,
- "value": "abc",
- },
- {
- "id": 200,
- "value": "abcd",
- }
- ]
+ dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abcd",}]
- dsub3 = [
- {
- "id": 300,
- "value": "abcde",
- }
- ]
+ dsub3 = [{"id": 300, "value": "abcde",}]
- dsub4 = [
- ]
+ dsub4 = []
- dsub5 = [
- {
- "id": 100,
- }
- ]
+ dsub5 = [{"id": 100,}]
assert json_cmp(dcomplete, dsub1) is None
assert json_cmp(dcomplete, dsub2) is None
"Test JSON encoded data that starts with a list that should fail."
dcomplete = [
- {
- "id": 100,
- "value": "abc"
- },
- {
- "id": 200,
- "value": "abcd"
- },
- {
- "id": 300,
- "value": "abcde"
- },
+ {"id": 100, "value": "abc"},
+ {"id": 200, "value": "abcd"},
+ {"id": 300, "value": "abcde"},
]
- dsub1 = [
- {
- "id": 100,
- "value": "abcd",
- }
- ]
+ dsub1 = [{"id": 100, "value": "abcd",}]
- dsub2 = [
- {
- "id": 100,
- "value": "abc",
- },
- {
- "id": 200,
- "value": "abc",
- }
- ]
+ dsub2 = [{"id": 100, "value": "abc",}, {"id": 200, "value": "abc",}]
- dsub3 = [
- {
- "id": 100,
- "value": "abc",
- },
- {
- "id": 350,
- "value": "abcde",
- }
- ]
+ dsub3 = [{"id": 100, "value": "abc",}, {"id": 350, "value": "abcde",}]
- dsub4 = [
- {
- "value": "abcx",
- },
- {
- "id": 300,
- "value": "abcde",
- }
- ]
+ dsub4 = [{"value": "abcx",}, {"id": 300, "value": "abcde",}]
assert json_cmp(dcomplete, dsub1) is not None
assert json_cmp(dcomplete, dsub2) is not None
assert json_cmp(dcomplete, dsub4) is not None
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(pytest.main())
# Save the Current Working Directory to find lib files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
from lib.topotest import run_and_expect_type
+
def test_run_and_expect_type():
"Test basic `run_and_expect_type` functionality."
return True
# Test value success.
- success, value = run_and_expect_type(return_true, bool, count=1, wait=0, avalue=True)
+ success, value = run_and_expect_type(
+ return_true, bool, count=1, wait=0, avalue=True
+ )
assert success is True
assert value is True
# Test value failure.
- success, value = run_and_expect_type(return_true, bool, count=1, wait=0, avalue=False)
+ success, value = run_and_expect_type(
+ return_true, bool, count=1, wait=0, avalue=False
+ )
assert success is False
assert value is True
assert value is True
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(pytest.main())
# Save the Current Working Directory to find lib files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../../'))
+sys.path.append(os.path.join(CWD, "../../"))
# pylint: disable=C0413
from lib.topotest import version_cmp
+
def test_valid_versions():
"Test valid version compare results"
- curver = '3.0'
- samever = '3'
- oldver = '2.0'
- newver = '3.0.1'
- newerver = '3.0.11'
- vercustom = '3.0-dev'
- verysmallinc = '3.0.0.0.0.0.0.1'
+ curver = "3.0"
+ samever = "3"
+ oldver = "2.0"
+ newver = "3.0.1"
+ newerver = "3.0.11"
+ vercustom = "3.0-dev"
+ verysmallinc = "3.0.0.0.0.0.0.1"
assert version_cmp(curver, oldver) == 1
assert version_cmp(curver, newver) == -1
assert version_cmp(verysmallinc, verysmallinc) == 0
assert version_cmp(vercustom, verysmallinc) == -1
+
def test_invalid_versions():
"Test invalid version strings"
- curver = '3.0'
- badver1 = '.1'
- badver2 = '-1.0'
- badver3 = '.'
- badver4 = '3.-0.3'
+ curver = "3.0"
+ badver1 = ".1"
+ badver2 = "-1.0"
+ badver3 = "."
+ badver4 = "3.-0.3"
with pytest.raises(ValueError):
assert version_cmp(curver, badver1)
assert version_cmp(curver, badver3)
assert version_cmp(curver, badver4)
+
def test_regression_1():
"""
Test regression on the following type of comparison: '3.0.2' > '3'
Expected result is 1.
"""
- assert version_cmp('3.0.2', '3') == 1
+ assert version_cmp("3.0.2", "3") == 1
# all test functions without declaring a test local variable.
global_tgen = None
+
def get_topogen(topo=None):
"""
Helper function to retrieve Topogen. Must be called with `topo` when called
global_tgen.topo = topo
return global_tgen
+
def set_topogen(tgen):
"Helper function to set Topogen"
# pylint: disable=W0603
global global_tgen
global_tgen = tgen
+
#
# Main class: topology builder
#
# Topogen configuration defaults
tgen_defaults = {
- 'verbosity': 'info',
- 'frrdir': '/usr/lib/frr',
- 'quaggadir': '/usr/lib/quagga',
- 'routertype': 'frr',
- 'memleak_path': None,
+ "verbosity": "info",
+ "frrdir": "/usr/lib/frr",
+ "quaggadir": "/usr/lib/quagga",
+ "routertype": "frr",
+ "memleak_path": None,
}
+
class Topogen(object):
"A topology test builder helper."
- CONFIG_SECTION = 'topogen'
+ CONFIG_SECTION = "topogen"
- def __init__(self, cls, modname='unnamed'):
+ def __init__(self, cls, modname="unnamed"):
"""
Topogen initialization function, takes the following arguments:
* `cls`: the topology class that is child of mininet.topo
self.switchn = 1
self.modname = modname
self.errorsd = {}
- self.errors = ''
+ self.errors = ""
self.peern = 1
self._init_topo(cls)
- logger.info('loading topology: {}'.format(self.modname))
+ logger.info("loading topology: {}".format(self.modname))
@staticmethod
def _mininet_reset():
"Reset the mininet environment"
# Clean up the mininet environment
- os.system('mn -c > /dev/null 2>&1')
+ os.system("mn -c > /dev/null 2>&1")
def _init_topo(self, cls):
"""
# Test for MPLS Kernel modules available
self.hasmpls = False
- if not topotest.module_present('mpls-router'):
- logger.info('MPLS tests will not run (missing mpls-router kernel module)')
- elif not topotest.module_present('mpls-iptunnel'):
- logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)')
+ if not topotest.module_present("mpls-router"):
+ logger.info("MPLS tests will not run (missing mpls-router kernel module)")
+ elif not topotest.module_present("mpls-iptunnel"):
+ logger.info("MPLS tests will not run (missing mpls-iptunnel kernel module)")
else:
self.hasmpls = True
# Load the default topology configurations
topotests.
"""
self.config = configparser.ConfigParser(tgen_defaults)
- pytestini_path = os.path.join(CWD, '../pytest.ini')
+ pytestini_path = os.path.join(CWD, "../pytest.ini")
self.config.read(pytestini_path)
def add_router(self, name=None, cls=topotest.Router, **params):
Returns a TopoRouter.
"""
if name is None:
- name = 'r{}'.format(self.routern)
+ name = "r{}".format(self.routern)
if name in self.gears:
- raise KeyError('router already exists')
+ raise KeyError("router already exists")
- params['frrdir'] = self.config.get(self.CONFIG_SECTION, 'frrdir')
- params['quaggadir'] = self.config.get(self.CONFIG_SECTION, 'quaggadir')
- params['memleak_path'] = self.config.get(self.CONFIG_SECTION, 'memleak_path')
- if not params.has_key('routertype'):
- params['routertype'] = self.config.get(self.CONFIG_SECTION, 'routertype')
+ params["frrdir"] = self.config.get(self.CONFIG_SECTION, "frrdir")
+ params["quaggadir"] = self.config.get(self.CONFIG_SECTION, "quaggadir")
+ params["memleak_path"] = self.config.get(self.CONFIG_SECTION, "memleak_path")
+ if not params.has_key("routertype"):
+ params["routertype"] = self.config.get(self.CONFIG_SECTION, "routertype")
self.gears[name] = TopoRouter(self, cls, name, **params)
self.routern += 1
Returns the switch name and number.
"""
if name is None:
- name = 's{}'.format(self.switchn)
+ name = "s{}".format(self.switchn)
if name in self.gears:
- raise KeyError('switch already exists')
+ raise KeyError("switch already exists")
self.gears[name] = TopoSwitch(self, cls, name)
self.switchn += 1
* `defaultRoute`: the peer default route (e.g. 'via 1.2.3.1')
"""
if name is None:
- name = 'peer{}'.format(self.peern)
+ name = "peer{}".format(self.peern)
if name in self.gears:
- raise KeyError('exabgp peer already exists')
+ raise KeyError("exabgp peer already exists")
self.gears[name] = TopoExaBGP(self, name, ip=ip, defaultRoute=defaultRoute)
self.peern += 1
* TopoSwitch
"""
if not isinstance(node1, TopoGear):
- raise ValueError('invalid node1 type')
+ raise ValueError("invalid node1 type")
if not isinstance(node2, TopoGear):
- raise ValueError('invalid node2 type')
+ raise ValueError("invalid node2 type")
if ifname1 is None:
ifname1 = node1.new_link()
node1.register_link(ifname1, node2, ifname2)
node2.register_link(ifname2, node1, ifname1)
- self.topo.addLink(node1.name, node2.name,
- intfName1=ifname1, intfName2=ifname2)
+ self.topo.addLink(node1.name, node2.name, intfName1=ifname1, intfName2=ifname2)
def get_gears(self, geartype):
"""
# Do stuff
```
"""
- return dict((name, gear) for name, gear in self.gears.iteritems()
- if isinstance(gear, geartype))
+ return dict(
+ (name, gear)
+ for name, gear in self.gears.iteritems()
+ if isinstance(gear, geartype)
+ )
def routers(self):
"""
"""
# If log_level is not specified use the configuration.
if log_level is None:
- log_level = self.config.get(self.CONFIG_SECTION, 'verbosity')
+ log_level = self.config.get(self.CONFIG_SECTION, "verbosity")
# Set python logger level
logger_config.set_log_level(log_level)
# Run mininet
- if log_level == 'debug':
+ if log_level == "debug":
setLogLevel(log_level)
- logger.info('starting topology: {}'.format(self.modname))
+ logger.info("starting topology: {}".format(self.modname))
self.net.start()
def start_router(self, router=None):
first is a simple kill with no sleep, the second will sleep if not
killed and try with a different signal.
"""
- logger.info('stopping topology: {}'.format(self.modname))
+ logger.info("stopping topology: {}".format(self.modname))
errors = ""
for gear in self.gears.values():
gear.stop(False, False)
"""
if not sys.stdin.isatty():
raise EnvironmentError(
- 'you must run pytest with \'-s\' in order to use mininet CLI')
+ "you must run pytest with '-s' in order to use mininet CLI"
+ )
CLI(self.net)
if self.routers_have_failure():
return False
- memleak_file = (os.environ.get('TOPOTESTS_CHECK_MEMLEAK') or
- self.config.get(self.CONFIG_SECTION, 'memleak_path'))
+ memleak_file = os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.config.get(
+ self.CONFIG_SECTION, "memleak_path"
+ )
if memleak_file is None:
return False
return True
code = len(self.errorsd)
self.errorsd[code] = message
- self.errors += '\n{}: {}'.format(code, message)
+ self.errors += "\n{}: {}".format(code, message)
def has_errors(self):
"Returns whether errors exist or not."
if self.has_errors():
return True
- errors = ''
+ errors = ""
router_list = self.routers().values()
for router in router_list:
result = router.check_router_running()
- if result != '':
- errors += result + '\n'
+ if result != "":
+ errors += result + "\n"
- if errors != '':
- self.set_error(errors, 'router_error')
+ if errors != "":
+ self.set_error(errors, "router_error")
assert False, errors
return True
return False
+
#
# Topology gears (equipment)
#
+
class TopoGear(object):
"Abstract class for type checking"
self.linkn = 0
def __str__(self):
- links = ''
+ links = ""
for myif, dest in self.links.iteritems():
_, destif = dest
- if links != '':
- links += ','
+ if links != "":
+ links += ","
links += '"{}"<->"{}"'.format(myif, destif)
return 'TopoGear<name="{}",links=[{}]>'.format(self.name, links)
enabled: whether we should enable or disable the interface
"""
if myif not in self.links.keys():
- raise KeyError('interface doesn\'t exists')
+ raise KeyError("interface doesn't exists")
if enabled is True:
- operation = 'up'
+ operation = "up"
else:
- operation = 'down'
+ operation = "down"
- logger.info('setting node "{}" link "{}" to state "{}"'.format(
- self.name, myif, operation
- ))
- extract=''
+ logger.info(
+ 'setting node "{}" link "{}" to state "{}"'.format(
+ self.name, myif, operation
+ )
+ )
+ extract = ""
if netns is not None:
- extract = 'ip netns exec {} '.format(netns)
- return self.run('{}ip link set dev {} {}'.format(extract, myif, operation))
+ extract = "ip netns exec {} ".format(netns)
+ return self.run("{}ip link set dev {} {}".format(extract, myif, operation))
def peer_link_enable(self, myif, enabled=True, netns=None):
"""
peer disables their interface our interface status changes to no link.
"""
if myif not in self.links.keys():
- raise KeyError('interface doesn\'t exists')
+ raise KeyError("interface doesn't exists")
node, nodeif = self.links[myif]
node.link_enable(nodeif, enabled, netns)
NOTE: This function should only be called by Topogen.
"""
- ifname = '{}-eth{}'.format(self.name, self.linkn)
+ ifname = "{}-eth{}".format(self.name, self.linkn)
self.linkn += 1
return ifname
NOTE: This function should only be called by Topogen.
"""
if myif in self.links.keys():
- raise KeyError('interface already exists')
+ raise KeyError("interface already exists")
self.links[myif] = (node, nodeif)
+
class TopoRouter(TopoGear):
"""
Router abstraction.
# The default required directories by Quagga/FRR
PRIVATE_DIRS = [
- '/etc/frr',
- '/etc/quagga',
- '/var/run/frr',
- '/var/run/quagga',
- '/var/log'
+ "/etc/frr",
+ "/etc/quagga",
+ "/var/run/frr",
+ "/var/run/quagga",
+ "/var/log",
]
# Router Daemon enumeration definition.
RD_BFD = 13
RD_SHARP = 14
RD = {
- RD_ZEBRA: 'zebra',
- RD_RIP: 'ripd',
- RD_RIPNG: 'ripngd',
- RD_OSPF: 'ospfd',
- RD_OSPF6: 'ospf6d',
- RD_ISIS: 'isisd',
- RD_BGP: 'bgpd',
- RD_PIM: 'pimd',
- RD_LDP: 'ldpd',
- RD_EIGRP: 'eigrpd',
- RD_NHRP: 'nhrpd',
- RD_STATIC: 'staticd',
- RD_BFD: 'bfdd',
- RD_SHARP: 'sharpd',
+ RD_ZEBRA: "zebra",
+ RD_RIP: "ripd",
+ RD_RIPNG: "ripngd",
+ RD_OSPF: "ospfd",
+ RD_OSPF6: "ospf6d",
+ RD_ISIS: "isisd",
+ RD_BGP: "bgpd",
+ RD_PIM: "pimd",
+ RD_LDP: "ldpd",
+ RD_EIGRP: "eigrpd",
+ RD_NHRP: "nhrpd",
+ RD_STATIC: "staticd",
+ RD_BFD: "bfdd",
+ RD_SHARP: "sharpd",
}
def __init__(self, tgen, cls, name, **params):
self.name = name
self.cls = cls
self.options = {}
- self.routertype = params.get('routertype', 'frr')
- if not params.has_key('privateDirs'):
- params['privateDirs'] = self.PRIVATE_DIRS
+ self.routertype = params.get("routertype", "frr")
+ if not params.has_key("privateDirs"):
+ params["privateDirs"] = self.PRIVATE_DIRS
- self.options['memleak_path'] = params.get('memleak_path', None)
+ self.options["memleak_path"] = params.get("memleak_path", None)
# Create new log directory
- self.logdir = '/tmp/topotests/{}'.format(self.tgen.modname)
+ self.logdir = "/tmp/topotests/{}".format(self.tgen.modname)
# Clean up before starting new log files: avoids removing just created
# log files.
self._prepare_tmpfiles()
# Propagate the router log directory
- params['logdir'] = self.logdir
+ params["logdir"] = self.logdir
- #setup the per node directory
- dir = '{}/{}'.format(self.logdir, self.name)
- os.system('mkdir -p ' + dir)
- os.system('chmod -R go+rw /tmp/topotests')
+ # setup the per node directory
+ dir = "{}/{}".format(self.logdir, self.name)
+ os.system("mkdir -p " + dir)
+ os.system("chmod -R go+rw /tmp/topotests")
# Open router log file
- logfile = '{0}/{1}.log'.format(self.logdir, name)
+ logfile = "{0}/{1}.log".format(self.logdir, name)
self.logger = logger_config.get_logger(name=name, target=logfile)
self.tgen.topo.addNode(self.name, cls=self.cls, **params)
def __str__(self):
gear = super(TopoRouter, self).__str__()
- gear += ' TopoRouter<>'
+ gear += " TopoRouter<>"
return gear
def _prepare_tmpfiles(self):
os.chmod(self.logdir, 0o1777)
# Try to find relevant old logfiles in /tmp and delete them
- map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name)))
+ map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
# Remove old core files
- map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name)))
+ map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
def check_capability(self, daemon, param):
"""
"""
Run a series of checks and returns a status string.
"""
- self.logger.info('checking if daemons are running')
+ self.logger.info("checking if daemons are running")
return self.tgen.net[self.name].checkRouterRunning()
def start(self):
* Start daemons (e.g. FRR/Quagga)
* Configure daemon logging files
"""
- self.logger.debug('starting')
+ self.logger.debug("starting")
nrouter = self.tgen.net[self.name]
result = nrouter.startRouter(self.tgen)
for daemon, enabled in nrouter.daemons.iteritems():
if enabled == 0:
continue
- self.vtysh_cmd('configure terminal\nlog commands\nlog file {}.log'.format(
- daemon), daemon=daemon)
+ self.vtysh_cmd(
+ "configure terminal\nlog commands\nlog file {}.log".format(daemon),
+ daemon=daemon,
+ )
- if result != '':
+ if result != "":
self.tgen.set_error(result)
else:
# Enable MPLS processing on all interfaces.
for interface in self.links.keys():
- set_sysctl(nrouter, 'net.mpls.conf.{}.input'.format(interface), 1)
+ set_sysctl(nrouter, "net.mpls.conf.{}.input".format(interface), 1)
return result
Stop router:
* Kill daemons
"""
- self.logger.debug('stopping')
+ self.logger.debug("stopping")
return self.tgen.net[self.name].stopRouter(wait, assertOnError)
def vtysh_cmd(self, command, isjson=False, daemon=None):
return output for each command. See vtysh_multicmd() for more details.
"""
# Detect multi line commands
- if command.find('\n') != -1:
+ if command.find("\n") != -1:
return self.vtysh_multicmd(command, daemon=daemon)
- dparam = ''
+ dparam = ""
if daemon is not None:
- dparam += '-d {}'.format(daemon)
+ dparam += "-d {}".format(daemon)
vtysh_command = 'vtysh {} -c "{}" 2>/dev/null'.format(dparam, command)
output = self.run(vtysh_command)
- self.logger.info('\nvtysh command => {}\nvtysh output <= {}'.format(
- command, output))
+ self.logger.info(
+ "\nvtysh command => {}\nvtysh output <= {}".format(command, output)
+ )
if isjson is False:
return output
try:
return json.loads(output)
except ValueError:
- logger.warning('vtysh_cmd: failed to convert json output')
+ logger.warning("vtysh_cmd: failed to convert json output")
return {}
def vtysh_multicmd(self, commands, pretty_output=True, daemon=None):
# Prepare the temporary file that will hold the commands
fname = topotest.get_file(commands)
- dparam = ''
+ dparam = ""
if daemon is not None:
- dparam += '-d {}'.format(daemon)
+ dparam += "-d {}".format(daemon)
# Run the commands and delete the temporary file
if pretty_output:
- vtysh_command = 'vtysh {} < {}'.format(dparam, fname)
+ vtysh_command = "vtysh {} < {}".format(dparam, fname)
else:
- vtysh_command = 'vtysh {} -f {}'.format(dparam, fname)
+ vtysh_command = "vtysh {} -f {}".format(dparam, fname)
res = self.run(vtysh_command)
os.unlink(fname)
- self.logger.info('\nvtysh command => "{}"\nvtysh output <= "{}"'.format(
- vtysh_command, res))
+ self.logger.info(
+ '\nvtysh command => "{}"\nvtysh output <= "{}"'.format(vtysh_command, res)
+ )
return res
NOTE: to run this you must have the environment variable
TOPOTESTS_CHECK_MEMLEAK set or memleak_path configured in `pytest.ini`.
"""
- memleak_file = os.environ.get('TOPOTESTS_CHECK_MEMLEAK') or self.options['memleak_path']
+ memleak_file = (
+ os.environ.get("TOPOTESTS_CHECK_MEMLEAK") or self.options["memleak_path"]
+ )
if memleak_file is None:
return
self.stop()
- self.logger.info('running memory leak report')
+ self.logger.info("running memory leak report")
self.tgen.net[self.name].report_memory_leaks(memleak_file, testname)
def version_info(self):
"Get equipment information from 'show version'."
- output = self.vtysh_cmd('show version').split('\n')[0]
- columns = topotest.normalize_text(output).split(' ')
+ output = self.vtysh_cmd("show version").split("\n")[0]
+ columns = topotest.normalize_text(output).split(" ")
try:
return {
- 'type': columns[0],
- 'version': columns[1],
+ "type": columns[0],
+ "version": columns[1],
}
except IndexError:
return {
- 'type': None,
- 'version': None,
+ "type": None,
+ "version": None,
}
def has_version(self, cmpop, version):
Compares router type with `rtype`. Returns `True` if the type matches,
otherwise `false`.
"""
- curtype = self.version_info()['type']
+ curtype = self.version_info()["type"]
return rtype == curtype
def has_mpls(self):
nrouter = self.tgen.net[self.name]
return nrouter.hasmpls
+
class TopoSwitch(TopoGear):
"""
Switch abstraction. Has the following properties:
* cls: switch class that will be used to instantiate
* name: switch name
"""
+
# pylint: disable=too-few-public-methods
def __init__(self, tgen, cls, name):
def __str__(self):
gear = super(TopoSwitch, self).__str__()
- gear += ' TopoSwitch<>'
+ gear += " TopoSwitch<>"
return gear
+
class TopoHost(TopoGear):
"Host abstraction."
# pylint: disable=too-few-public-methods
def __str__(self):
gear = super(TopoHost, self).__str__()
gear += ' TopoHost<ip="{}",defaultRoute="{}",privateDirs="{}">'.format(
- self.options['ip'], self.options['defaultRoute'],
- str(self.options['privateDirs']))
+ self.options["ip"],
+ self.options["defaultRoute"],
+ str(self.options["privateDirs"]),
+ )
return gear
+
class TopoExaBGP(TopoHost):
"ExaBGP peer abstraction."
# pylint: disable=too-few-public-methods
PRIVATE_DIRS = [
- '/etc/exabgp',
- '/var/run/exabgp',
- '/var/log',
+ "/etc/exabgp",
+ "/var/run/exabgp",
+ "/var/log",
]
def __init__(self, tgen, name, **params):
has a privateDirs already defined and contains functions to handle ExaBGP
things.
"""
- params['privateDirs'] = self.PRIVATE_DIRS
+ params["privateDirs"] = self.PRIVATE_DIRS
super(TopoExaBGP, self).__init__(tgen, name, **params)
self.tgen.topo.addHost(name, **params)
def __str__(self):
gear = super(TopoExaBGP, self).__str__()
- gear += ' TopoExaBGP<>'.format()
+ gear += " TopoExaBGP<>".format()
return gear
def start(self, peer_dir, env_file=None):
* Make all python files runnable
* Run ExaBGP with env file `env_file` and configuration peer*/exabgp.cfg
"""
- self.run('mkdir /etc/exabgp')
- self.run('chmod 755 /etc/exabgp')
- self.run('cp {}/* /etc/exabgp/'.format(peer_dir))
+ self.run("mkdir /etc/exabgp")
+ self.run("chmod 755 /etc/exabgp")
+ self.run("cp {}/* /etc/exabgp/".format(peer_dir))
if env_file is not None:
- self.run('cp {} /etc/exabgp/exabgp.env'.format(env_file))
- self.run('chmod 644 /etc/exabgp/*')
- self.run('chmod a+x /etc/exabgp/*.py')
- self.run('chown -R exabgp:exabgp /etc/exabgp')
- output = self.run('exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg')
+ self.run("cp {} /etc/exabgp/exabgp.env".format(env_file))
+ self.run("chmod 644 /etc/exabgp/*")
+ self.run("chmod a+x /etc/exabgp/*.py")
+ self.run("chown -R exabgp:exabgp /etc/exabgp")
+ output = self.run("exabgp -e /etc/exabgp/exabgp.env /etc/exabgp/exabgp.cfg")
if output == None or len(output) == 0:
- output = '<none>'
- logger.info('{} exabgp started, output={}'.format(self.name, output))
+ output = "<none>"
+ logger.info("{} exabgp started, output={}".format(self.name, output))
def stop(self, wait=True, assertOnError=True):
"Stop ExaBGP peer and kill the daemon"
- self.run('kill `cat /var/run/exabgp/exabgp.pid`')
+ self.run("kill `cat /var/run/exabgp/exabgp.pid`")
return ""
ret = True
# Test log path exists before installing handler.
- if not os.path.isdir('/tmp'):
- logger.warning('could not find /tmp for logs')
+ if not os.path.isdir("/tmp"):
+ logger.warning("could not find /tmp for logs")
else:
- os.system('mkdir /tmp/topotests')
+ os.system("mkdir /tmp/topotests")
# Log diagnostics to file so it can be examined later.
- fhandler = logging.FileHandler(filename='/tmp/topotests/diagnostics.txt')
+ fhandler = logging.FileHandler(filename="/tmp/topotests/diagnostics.txt")
fhandler.setLevel(logging.DEBUG)
fhandler.setFormatter(
- logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s')
+ logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
logger.addHandler(fhandler)
- logger.info('Running environment diagnostics')
+ logger.info("Running environment diagnostics")
# Load configuration
config = configparser.ConfigParser(tgen_defaults)
- pytestini_path = os.path.join(CWD, '../pytest.ini')
+ pytestini_path = os.path.join(CWD, "../pytest.ini")
config.read(pytestini_path)
# Assert that we are running as root
if os.getuid() != 0:
- logger.error('you must run topotest as root')
+ logger.error("you must run topotest as root")
ret = False
# Assert that we have mininet
- if os.system('which mn >/dev/null 2>/dev/null') != 0:
- logger.error('could not find mininet binary (mininet is not installed)')
+ if os.system("which mn >/dev/null 2>/dev/null") != 0:
+ logger.error("could not find mininet binary (mininet is not installed)")
ret = False
# Assert that we have iproute installed
- if os.system('which ip >/dev/null 2>/dev/null') != 0:
- logger.error('could not find ip binary (iproute is not installed)')
+ if os.system("which ip >/dev/null 2>/dev/null") != 0:
+ logger.error("could not find ip binary (iproute is not installed)")
ret = False
# Assert that we have gdb installed
- if os.system('which gdb >/dev/null 2>/dev/null') != 0:
- logger.error('could not find gdb binary (gdb is not installed)')
+ if os.system("which gdb >/dev/null 2>/dev/null") != 0:
+ logger.error("could not find gdb binary (gdb is not installed)")
ret = False
# Assert that FRR utilities exist
- frrdir = config.get('topogen', 'frrdir')
+ frrdir = config.get("topogen", "frrdir")
hasfrr = False
if not os.path.isdir(frrdir):
- logger.error('could not find {} directory'.format(frrdir))
+ logger.error("could not find {} directory".format(frrdir))
ret = False
else:
hasfrr = True
try:
- pwd.getpwnam('frr')[2]
+ pwd.getpwnam("frr")[2]
except KeyError:
logger.warning('could not find "frr" user')
try:
- grp.getgrnam('frr')[2]
+ grp.getgrnam("frr")[2]
except KeyError:
logger.warning('could not find "frr" group')
try:
- if 'frr' not in grp.getgrnam('frrvty').gr_mem:
- logger.error('"frr" user and group exist, but user is not under "frrvty"')
+ if "frr" not in grp.getgrnam("frrvty").gr_mem:
+ logger.error(
+ '"frr" user and group exist, but user is not under "frrvty"'
+ )
except KeyError:
logger.warning('could not find "frrvty" group')
- for fname in ['zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd',
- 'isisd', 'pimd', 'ldpd']:
+ for fname in [
+ "zebra",
+ "ospfd",
+ "ospf6d",
+ "bgpd",
+ "ripd",
+ "ripngd",
+ "isisd",
+ "pimd",
+ "ldpd",
+ ]:
path = os.path.join(frrdir, fname)
if not os.path.isfile(path):
# LDPd is an exception
- if fname == 'ldpd':
- logger.info('could not find {} in {}'.format(fname, frrdir) +
- '(LDPd tests will not run)')
+ if fname == "ldpd":
+ logger.info(
+ "could not find {} in {}".format(fname, frrdir)
+ + "(LDPd tests will not run)"
+ )
continue
- logger.warning('could not find {} in {}'.format(fname, frrdir))
+ logger.warning("could not find {} in {}".format(fname, frrdir))
ret = False
else:
- if fname != 'zebra':
+ if fname != "zebra":
continue
- os.system(
- '{} -v 2>&1 >/tmp/topotests/frr_zebra.txt'.format(path)
- )
+ os.system("{} -v 2>&1 >/tmp/topotests/frr_zebra.txt".format(path))
# Assert that Quagga utilities exist
- quaggadir = config.get('topogen', 'quaggadir')
+ quaggadir = config.get("topogen", "quaggadir")
if hasfrr:
# if we have frr, don't check for quagga
pass
elif not os.path.isdir(quaggadir):
- logger.info('could not find {} directory (quagga tests will not run)'.format(quaggadir))
+ logger.info(
+ "could not find {} directory (quagga tests will not run)".format(quaggadir)
+ )
else:
ret = True
try:
- pwd.getpwnam('quagga')[2]
+ pwd.getpwnam("quagga")[2]
except KeyError:
logger.info('could not find "quagga" user')
try:
- grp.getgrnam('quagga')[2]
+ grp.getgrnam("quagga")[2]
except KeyError:
logger.info('could not find "quagga" group')
try:
- if 'quagga' not in grp.getgrnam('quaggavty').gr_mem:
- logger.error('"quagga" user and group exist, but user is not under "quaggavty"')
+ if "quagga" not in grp.getgrnam("quaggavty").gr_mem:
+ logger.error(
+ '"quagga" user and group exist, but user is not under "quaggavty"'
+ )
except KeyError:
logger.warning('could not find "quaggavty" group')
- for fname in ['zebra', 'ospfd', 'ospf6d', 'bgpd', 'ripd', 'ripngd',
- 'isisd', 'pimd']:
+ for fname in [
+ "zebra",
+ "ospfd",
+ "ospf6d",
+ "bgpd",
+ "ripd",
+ "ripngd",
+ "isisd",
+ "pimd",
+ ]:
path = os.path.join(quaggadir, fname)
if not os.path.isfile(path):
- logger.warning('could not find {} in {}'.format(fname, quaggadir))
+ logger.warning("could not find {} in {}".format(fname, quaggadir))
ret = False
else:
- if fname != 'zebra':
+ if fname != "zebra":
continue
- os.system(
- '{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt'.format(path)
- )
+ os.system("{} -v 2>&1 >/tmp/topotests/quagga_zebra.txt".format(path))
# Test MPLS availability
krel = platform.release()
- if topotest.version_cmp(krel, '4.5') < 0:
- logger.info('LDPd tests will not run (have kernel "{}", but it requires 4.5)'.format(krel))
+ if topotest.version_cmp(krel, "4.5") < 0:
+ logger.info(
+ 'LDPd tests will not run (have kernel "{}", but it requires 4.5)'.format(
+ krel
+ )
+ )
# Test for MPLS Kernel modules available
- if not topotest.module_present('mpls-router', load=False) != 0:
- logger.info('LDPd tests will not run (missing mpls-router kernel module)')
- if not topotest.module_present('mpls-iptunnel', load=False) != 0:
- logger.info('LDPd tests will not run (missing mpls-iptunnel kernel module)')
+ if not topotest.module_present("mpls-router", load=False) != 0:
+ logger.info("LDPd tests will not run (missing mpls-router kernel module)")
+ if not topotest.module_present("mpls-iptunnel", load=False) != 0:
+ logger.info("LDPd tests will not run (missing mpls-iptunnel kernel module)")
# TODO remove me when we start supporting exabgp >= 4
try:
- output = subprocess.check_output(['exabgp', '-v'])
- line = output.split('\n')[0]
- version = line.split(' ')[2]
- if topotest.version_cmp(version, '4') >= 0:
- logger.warning('BGP topologies are still using exabgp version 3, expect failures')
+ output = subprocess.check_output(["exabgp", "-v"])
+ line = output.split("\n")[0]
+ version = line.split(" ")[2]
+ if topotest.version_cmp(version, "4") >= 0:
+ logger.warning(
+ "BGP topologies are still using exabgp version 3, expect failures"
+ )
# We want to catch all exceptions
# pylint: disable=W0702
except:
- logger.warning('failed to find exabgp or returned error')
+ logger.warning("failed to find exabgp or returned error")
# After we logged the output to file, remove the handler.
logger.removeHandler(fhandler)
return ret
+
def diagnose_env_freebsd():
return True
+
def diagnose_env():
if sys.platform.startswith("linux"):
return diagnose_env_linux()
# Required to instantiate the topology builder class.
from lib.common_config import (
- number_to_row, number_to_column,
+ number_to_row,
+ number_to_column,
load_config_to_router,
create_interfaces_cfg,
create_static_routes,
create_prefix_lists,
create_route_maps,
- create_bgp_community_lists
+ create_bgp_community_lists,
)
from lib.bgp import create_router_bgp
* `topo`: json file data
"""
- ROUTER_LIST = sorted(topo['routers'].keys(),
- key=lambda x: int(re_search('\d+', x).group(0)))
+ ROUTER_LIST = sorted(
+ topo["routers"].keys(), key=lambda x: int(re_search("\d+", x).group(0))
+ )
listRouters = ROUTER_LIST[:]
for routerN in ROUTER_LIST:
- logger.info('Topo: Add router {}'.format(routerN))
+ logger.info("Topo: Add router {}".format(routerN))
tgen.add_router(routerN)
listRouters.append(routerN)
- if 'ipv4base' in topo:
- ipv4Next = ipaddr.IPv4Address(topo['link_ip_start']['ipv4'])
- ipv4Step = 2 ** (32 - topo['link_ip_start']['v4mask'])
- if topo['link_ip_start']['v4mask'] < 32:
+ if "ipv4base" in topo:
+ ipv4Next = ipaddr.IPv4Address(topo["link_ip_start"]["ipv4"])
+ ipv4Step = 2 ** (32 - topo["link_ip_start"]["v4mask"])
+ if topo["link_ip_start"]["v4mask"] < 32:
ipv4Next += 1
- if 'ipv6base' in topo:
- ipv6Next = ipaddr.IPv6Address(topo['link_ip_start']['ipv6'])
- ipv6Step = 2 ** (128 - topo['link_ip_start']['v6mask'])
- if topo['link_ip_start']['v6mask'] < 127:
+ if "ipv6base" in topo:
+ ipv6Next = ipaddr.IPv6Address(topo["link_ip_start"]["ipv6"])
+ ipv6Step = 2 ** (128 - topo["link_ip_start"]["v6mask"])
+ if topo["link_ip_start"]["v6mask"] < 127:
ipv6Next += 1
for router in listRouters:
- topo['routers'][router]['nextIfname'] = 0
+ topo["routers"][router]["nextIfname"] = 0
while listRouters != []:
curRouter = listRouters.pop(0)
# Physical Interfaces
- if 'links' in topo['routers'][curRouter]:
+ if "links" in topo["routers"][curRouter]:
+
def link_sort(x):
- if x == 'lo':
+ if x == "lo":
return 0
- elif 'link' in x:
- return int(x.split('-link')[1])
+ elif "link" in x:
+ return int(x.split("-link")[1])
else:
- return int(re_search('\d+', x).group(0))
- for destRouterLink, data in sorted(topo['routers'][curRouter]['links']. \
- iteritems(),
- key=lambda x: link_sort(x[0])):
- currRouter_lo_json = \
- topo['routers'][curRouter]['links'][destRouterLink]
+ return int(re_search("\d+", x).group(0))
+
+ for destRouterLink, data in sorted(
+ topo["routers"][curRouter]["links"].iteritems(),
+ key=lambda x: link_sort(x[0]),
+ ):
+ currRouter_lo_json = topo["routers"][curRouter]["links"][destRouterLink]
# Loopback interfaces
- if 'type' in data and data['type'] == 'loopback':
- if 'ipv4' in currRouter_lo_json and \
- currRouter_lo_json['ipv4'] == 'auto':
- currRouter_lo_json['ipv4'] = '{}{}.{}/{}'. \
- format(topo['lo_prefix']['ipv4'], number_to_row(curRouter), \
- number_to_column(curRouter), topo['lo_prefix']['v4mask'])
- if 'ipv6' in currRouter_lo_json and \
- currRouter_lo_json['ipv6'] == 'auto':
- currRouter_lo_json['ipv6'] = '{}{}:{}/{}'. \
- format(topo['lo_prefix']['ipv6'], number_to_row(curRouter), \
- number_to_column(curRouter), topo['lo_prefix']['v6mask'])
+ if "type" in data and data["type"] == "loopback":
+ if (
+ "ipv4" in currRouter_lo_json
+ and currRouter_lo_json["ipv4"] == "auto"
+ ):
+ currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format(
+ topo["lo_prefix"]["ipv4"],
+ number_to_row(curRouter),
+ number_to_column(curRouter),
+ topo["lo_prefix"]["v4mask"],
+ )
+ if (
+ "ipv6" in currRouter_lo_json
+ and currRouter_lo_json["ipv6"] == "auto"
+ ):
+ currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format(
+ topo["lo_prefix"]["ipv6"],
+ number_to_row(curRouter),
+ number_to_column(curRouter),
+ topo["lo_prefix"]["v6mask"],
+ )
if "-" in destRouterLink:
# Spliting and storing destRouterLink data in tempList
curRouterLink = curRouter
if destRouter in listRouters:
- currRouter_link_json = \
- topo['routers'][curRouter]['links'][destRouterLink]
- destRouter_link_json = \
- topo['routers'][destRouter]['links'][curRouterLink]
+ currRouter_link_json = topo["routers"][curRouter]["links"][
+ destRouterLink
+ ]
+ destRouter_link_json = topo["routers"][destRouter]["links"][
+ curRouterLink
+ ]
# Assigning name to interfaces
- currRouter_link_json['interface'] = \
- '{}-{}-eth{}'.format(curRouter, destRouter, topo['routers'] \
- [curRouter]['nextIfname'])
- destRouter_link_json['interface'] = \
- '{}-{}-eth{}'.format(destRouter, curRouter, topo['routers'] \
- [destRouter]['nextIfname'])
+ currRouter_link_json["interface"] = "{}-{}-eth{}".format(
+ curRouter, destRouter, topo["routers"][curRouter]["nextIfname"]
+ )
+ destRouter_link_json["interface"] = "{}-{}-eth{}".format(
+ destRouter, curRouter, topo["routers"][destRouter]["nextIfname"]
+ )
- topo['routers'][curRouter]['nextIfname'] += 1
- topo['routers'][destRouter]['nextIfname'] += 1
+ topo["routers"][curRouter]["nextIfname"] += 1
+ topo["routers"][destRouter]["nextIfname"] += 1
# Linking routers to each other as defined in JSON file
- tgen.gears[curRouter].add_link(tgen.gears[destRouter],
- topo['routers'][curRouter]['links'][destRouterLink] \
- ['interface'], topo['routers'][destRouter]['links'] \
- [curRouterLink]['interface'])
+ tgen.gears[curRouter].add_link(
+ tgen.gears[destRouter],
+ topo["routers"][curRouter]["links"][destRouterLink][
+ "interface"
+ ],
+ topo["routers"][destRouter]["links"][curRouterLink][
+ "interface"
+ ],
+ )
# IPv4
- if 'ipv4' in currRouter_link_json:
- if currRouter_link_json['ipv4'] == 'auto':
- currRouter_link_json['ipv4'] = \
- '{}/{}'.format(ipv4Next, topo['link_ip_start'][ \
- 'v4mask'])
- destRouter_link_json['ipv4'] = \
- '{}/{}'.format(ipv4Next + 1, topo['link_ip_start'][ \
- 'v4mask'])
+ if "ipv4" in currRouter_link_json:
+ if currRouter_link_json["ipv4"] == "auto":
+ currRouter_link_json["ipv4"] = "{}/{}".format(
+ ipv4Next, topo["link_ip_start"]["v4mask"]
+ )
+ destRouter_link_json["ipv4"] = "{}/{}".format(
+ ipv4Next + 1, topo["link_ip_start"]["v4mask"]
+ )
ipv4Next += ipv4Step
# IPv6
- if 'ipv6' in currRouter_link_json:
- if currRouter_link_json['ipv6'] == 'auto':
- currRouter_link_json['ipv6'] = \
- '{}/{}'.format(ipv6Next, topo['link_ip_start'][ \
- 'v6mask'])
- destRouter_link_json['ipv6'] = \
- '{}/{}'.format(ipv6Next + 1, topo['link_ip_start'][ \
- 'v6mask'])
+ if "ipv6" in currRouter_link_json:
+ if currRouter_link_json["ipv6"] == "auto":
+ currRouter_link_json["ipv6"] = "{}/{}".format(
+ ipv6Next, topo["link_ip_start"]["v6mask"]
+ )
+ destRouter_link_json["ipv6"] = "{}/{}".format(
+ ipv6Next + 1, topo["link_ip_start"]["v6mask"]
+ )
ipv6Next = ipaddr.IPv6Address(int(ipv6Next) + ipv6Step)
- logger.debug("Generated link data for router: %s\n%s", curRouter,
- json_dumps(topo["routers"][curRouter]["links"],
- indent=4, sort_keys=True))
+ logger.debug(
+ "Generated link data for router: %s\n%s",
+ curRouter,
+ json_dumps(
+ topo["routers"][curRouter]["links"], indent=4, sort_keys=True
+ ),
+ )
def build_config_from_json(tgen, topo, save_bkup=True):
* `topo`: json file data
"""
- func_dict = OrderedDict([
- ("links", create_interfaces_cfg),
- ("static_routes", create_static_routes),
- ("prefix_lists", create_prefix_lists),
- ("bgp_community_list", create_bgp_community_lists),
- ("route_maps", create_route_maps),
- ("bgp", create_router_bgp)
- ])
+ func_dict = OrderedDict(
+ [
+ ("links", create_interfaces_cfg),
+ ("static_routes", create_static_routes),
+ ("prefix_lists", create_prefix_lists),
+ ("bgp_community_list", create_bgp_community_lists),
+ ("route_maps", create_route_maps),
+ ("bgp", create_router_bgp),
+ ]
+ )
data = topo["routers"]
for func_type in func_dict.keys():
- logger.info('Checking for {} configuration in input data'.format(
- func_type))
+ logger.info("Checking for {} configuration in input data".format(func_type))
func_dict.get(func_type)(tgen, data, build=True)
- for router in sorted(topo['routers'].keys()):
- logger.debug('Configuring router {}...'.format(router))
+ for router in sorted(topo["routers"].keys()):
+ logger.debug("Configuring router {}...".format(router))
result = load_config_to_router(tgen, router, save_bkup)
if not result:
logger.info("Failed while configuring {}".format(router))
pytest.exit(1)
-
# Helper dictionary to convert Topogen logging levels to Python's logging.
DEBUG_TOPO2LOGGING = {
- 'debug': logging.DEBUG,
- 'info': logging.INFO,
- 'output': logging.INFO,
- 'warning': logging.WARNING,
- 'error': logging.ERROR,
- 'critical': logging.CRITICAL,
+ "debug": logging.DEBUG,
+ "info": logging.INFO,
+ "output": logging.INFO,
+ "warning": logging.WARNING,
+ "error": logging.ERROR,
+ "critical": logging.CRITICAL,
}
+
class InfoFilter(logging.Filter):
def filter(self, rec):
return rec.levelno in (logging.DEBUG, logging.INFO)
+
#
# Logger class definition
#
+
class Logger(object):
"""
Logger class that encapsulates logging functions, internaly it uses Python
def __init__(self):
# Create default global logger
self.log_level = logging.INFO
- self.logger = logging.Logger('topolog', level=self.log_level)
+ self.logger = logging.Logger("topolog", level=self.log_level)
handler_stdout = logging.StreamHandler(sys.stdout)
handler_stdout.setLevel(logging.DEBUG)
handler_stdout.addFilter(InfoFilter())
handler_stdout.setFormatter(
- logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s')
+ logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
handler_stderr = logging.StreamHandler()
handler_stderr.setLevel(logging.WARNING)
handler_stderr.setFormatter(
- logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s')
+ logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
self.logger.addHandler(handler_stdout)
self.logger.addHandler(handler_stderr)
# Handle more loggers
- self.loggers = {'topolog': self.logger}
+ self.loggers = {"topolog": self.logger}
def set_log_level(self, level):
"Set the logging level"
self.log_level = DEBUG_TOPO2LOGGING.get(level)
self.logger.setLevel(self.log_level)
- def get_logger(self, name='topolog', log_level=None, target=sys.stdout):
+ def get_logger(self, name="topolog", log_level=None, target=sys.stdout):
"""
Get a new logger entry. Allows creating different loggers for formating,
filtering or handling (file, stream or stdout/stderr).
handler = logging.StreamHandler(stream=target)
handler.setFormatter(
- logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s')
+ logging.Formatter(fmt="%(asctime)s %(levelname)s: %(message)s")
)
nlogger.addHandler(handler)
self.loggers[name] = nlogger
return nlogger
+
#
# Global variables
#
from mininet.cli import CLI
from mininet.link import Intf
+
class json_cmp_result(object):
"json_cmp result class for better assertion messages"
return len(self.errors) > 0
def __str__(self):
- return '\n'.join(self.errors)
+ return "\n".join(self.errors)
def json_diff(d1, d2):
Returns a string with the difference between JSON data.
"""
json_format_opts = {
- 'indent': 4,
- 'sort_keys': True,
+ "indent": 4,
+ "sort_keys": True,
}
dstr1 = json.dumps(d1, **json_format_opts)
dstr2 = json.dumps(d2, **json_format_opts)
- return difflines(dstr2, dstr1, title1='Expected value', title2='Current value', n=0)
+ return difflines(dstr2, dstr1, title1="Expected value", title2="Current value", n=0)
def _json_list_cmp(list1, list2, parent, result):
# Check second list2 type
if not isinstance(list1, type([])) or not isinstance(list2, type([])):
result.add_error(
- '{} has different type than expected '.format(parent) +
- '(have {}, expected {}):\n{}'.format(
- type(list1), type(list2), json_diff(list1, list2)))
+ "{} has different type than expected ".format(parent)
+ + "(have {}, expected {}):\n{}".format(
+ type(list1), type(list2), json_diff(list1, list2)
+ )
+ )
return
# Check list size
if len(list2) > len(list1):
result.add_error(
- '{} too few items '.format(parent) +
- '(have {}, expected {}:\n {})'.format(
- len(list1), len(list2),
- json_diff(list1, list2)))
+ "{} too few items ".format(parent)
+ + "(have {}, expected {}:\n {})".format(
+ len(list1), len(list2), json_diff(list1, list2)
+ )
+ )
return
# List all unmatched items errors
for expected in list2:
matched = False
for value in list1:
- if json_cmp({'json': value}, {'json': expected}) is None:
+ if json_cmp({"json": value}, {"json": expected}) is None:
matched = True
break
# If there are unmatched items, error out.
if unmatched:
result.add_error(
- '{} value is different (\n{})'.format(
- parent, json_diff(list1, list2)))
+ "{} value is different (\n{})".format(parent, json_diff(list1, list2))
+ )
def json_cmp(d1, d2):
Note: key absence can be tested by adding a key with value `None`.
"""
- squeue = [(d1, d2, 'json')]
+ squeue = [(d1, d2, "json")]
result = json_cmp_result()
for s in squeue:
s2_req = set([key for key in nd2 if nd2[key] is not None])
diff = s2_req - s1
if diff != set({}):
- result.add_error('expected key(s) {} in {} (have {}):\n{}'.format(
- str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2)))
+ result.add_error(
+ "expected key(s) {} in {} (have {}):\n{}".format(
+ str(list(diff)), parent, str(list(s1)), json_diff(nd1, nd2)
+ )
+ )
for key in s2.intersection(s1):
# Test for non existence of key in d2
if nd2[key] is None:
- result.add_error('"{}" should not exist in {} (have {}):\n{}'.format(
- key, parent, str(s1), json_diff(nd1[key], nd2[key])))
+ result.add_error(
+ '"{}" should not exist in {} (have {}):\n{}'.format(
+ key, parent, str(s1), json_diff(nd1[key], nd2[key])
+ )
+ )
continue
# If nd1 key is a dict, we have to recurse in it later.
if isinstance(nd2[key], type({})):
if not isinstance(nd1[key], type({})):
result.add_error(
- '{}["{}"] has different type than expected '.format(parent, key) +
- '(have {}, expected {}):\n{}'.format(
- type(nd1[key]), type(nd2[key]), json_diff(nd1[key], nd2[key])))
+ '{}["{}"] has different type than expected '.format(parent, key)
+ + "(have {}, expected {}):\n{}".format(
+ type(nd1[key]),
+ type(nd2[key]),
+ json_diff(nd1[key], nd2[key]),
+ )
+ )
continue
nparent = '{}["{}"]'.format(parent, key)
squeue.append((nd1[key], nd2[key], nparent))
if nd1[key] != nd2[key]:
result.add_error(
'{}["{}"] value is different (\n{})'.format(
- parent, key, json_diff(nd1[key], nd2[key])))
+ parent, key, json_diff(nd1[key], nd2[key])
+ )
+ )
continue
if result.has_errors():
"""
Runs `cmd` in router and compares the output with `expected`.
"""
- return difflines(normalize_text(router.vtysh_cmd(cmd)),
- normalize_text(expected),
- title1="Current output",
- title2="Expected output")
+ return difflines(
+ normalize_text(router.vtysh_cmd(cmd)),
+ normalize_text(expected),
+ title1="Current output",
+ title2="Expected output",
+ )
def router_json_cmp(router, cmd, data):
logger.info(
"'{}' polling started (interval {} secs, maximum wait {} secs)".format(
- func_name, wait, int(wait * count)))
+ func_name, wait, int(wait * count)
+ )
+ )
while count > 0:
result = func()
continue
end_time = time.time()
- logger.info("'{}' succeeded after {:.2f} seconds".format(
- func_name, end_time - start_time))
+ logger.info(
+ "'{}' succeeded after {:.2f} seconds".format(
+ func_name, end_time - start_time
+ )
+ )
return (True, result)
end_time = time.time()
- logger.error("'{}' failed after {:.2f} seconds".format(
- func_name, end_time - start_time))
+ logger.error(
+ "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
+ )
return (False, result)
logger.info(
"'{}' polling started (interval {} secs, maximum wait {} secs)".format(
- func_name, wait, int(wait * count)))
+ func_name, wait, int(wait * count)
+ )
+ )
while count > 0:
result = func()
if not isinstance(result, etype):
- logger.debug("Expected result type '{}' got '{}' instead".format(etype, type(result)))
+ logger.debug(
+ "Expected result type '{}' got '{}' instead".format(etype, type(result))
+ )
time.sleep(wait)
count -= 1
continue
continue
end_time = time.time()
- logger.info("'{}' succeeded after {:.2f} seconds".format(
- func_name, end_time - start_time))
+ logger.info(
+ "'{}' succeeded after {:.2f} seconds".format(
+ func_name, end_time - start_time
+ )
+ )
return (True, result)
end_time = time.time()
- logger.error("'{}' failed after {:.2f} seconds".format(
- func_name, end_time - start_time))
+ logger.error(
+ "'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
+ )
return (False, result)
try:
dpid = hex(dpid)[2:]
- dpid = '0'*(16-len(dpid))+dpid
+ dpid = "0" * (16 - len(dpid)) + dpid
return dpid
except IndexError:
- raise Exception('Unable to derive default datapath ID - '
- 'please either specify a dpid or use a '
- 'canonical switch name such as s23.')
+ raise Exception(
+ "Unable to derive default datapath ID - "
+ "please either specify a dpid or use a "
+ "canonical switch name such as s23."
+ )
+
def pid_exists(pid):
"Check whether pid exists in the current process table."
else:
return True
+
def get_textdiff(text1, text2, title1="", title2="", **opts):
"Returns empty string if same or formatted diff"
- diff = '\n'.join(difflib.unified_diff(text1, text2,
- fromfile=title1, tofile=title2, **opts))
+ diff = "\n".join(
+ difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
+ )
# Clean up line endings
diff = os.linesep.join([s for s in diff.splitlines() if s])
return diff
-def difflines(text1, text2, title1='', title2='', **opts):
+
+def difflines(text1, text2, title1="", title2="", **opts):
"Wrapper for get_textdiff to avoid string transformations."
- text1 = ('\n'.join(text1.rstrip().splitlines()) + '\n').splitlines(1)
- text2 = ('\n'.join(text2.rstrip().splitlines()) + '\n').splitlines(1)
+ text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
+ text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
return get_textdiff(text1, text2, title1, title2, **opts)
+
def get_file(content):
"""
Generates a temporary file in '/tmp' with `content` and returns the file name.
"""
- fde = tempfile.NamedTemporaryFile(mode='w', delete=False)
+ fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
fname = fde.name
fde.write(content)
fde.close()
return fname
+
def normalize_text(text):
"""
Strips formating spaces/tabs, carriage returns and trailing whitespace.
"""
- text = re.sub(r'[ \t]+', ' ', text)
- text = re.sub(r'\r', '', text)
+ text = re.sub(r"[ \t]+", " ", text)
+ text = re.sub(r"\r", "", text)
# Remove whitespace in the middle of text.
- text = re.sub(r'[ \t]+\n', '\n', text)
+ text = re.sub(r"[ \t]+\n", "\n", text)
# Remove whitespace at the end of the text.
text = text.rstrip()
return text
+
def module_present_linux(module, load):
"""
Returns whether `module` is present.
If `load` is true, it will try to load it via modprobe.
"""
- with open('/proc/modules', 'r') as modules_file:
- if module.replace('-','_') in modules_file.read():
+ with open("/proc/modules", "r") as modules_file:
+ if module.replace("-", "_") in modules_file.read():
return True
- cmd = '/sbin/modprobe {}{}'.format('' if load else '-n ',
- module)
+ cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
if os.system(cmd) != 0:
return False
else:
return True
+
def module_present_freebsd(module, load):
return True
+
def module_present(module, load=True):
if sys.platform.startswith("linux"):
return module_present_linux(module, load)
elif sys.platform.startswith("freebsd"):
return module_present_freebsd(module, load)
+
def version_cmp(v1, v2):
"""
Compare two version strings and returns:
Raises `ValueError` if versions are not well formated.
"""
- vregex = r'(?P<whole>\d+(\.(\d+))*)'
+ vregex = r"(?P<whole>\d+(\.(\d+))*)"
v1m = re.match(vregex, v1)
v2m = re.match(vregex, v2)
if v1m is None or v2m is None:
raise ValueError("got a invalid version string")
# Split values
- v1g = v1m.group('whole').split('.')
- v2g = v2m.group('whole').split('.')
+ v1g = v1m.group("whole").split(".")
+ v2g = v2m.group("whole").split(".")
# Get the longest version string
vnum = len(v1g)
return -1
return 0
+
def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
if ifaceaction:
- str_ifaceaction = 'no shutdown'
+ str_ifaceaction = "no shutdown"
else:
- str_ifaceaction = 'shutdown'
+ str_ifaceaction = "shutdown"
if vrf_name == None:
- cmd = 'vtysh -c \"configure terminal\" -c \"interface {0}\" -c \"{1}\"'.format(ifacename, str_ifaceaction)
+ cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
+ ifacename, str_ifaceaction
+ )
else:
- cmd = 'vtysh -c \"configure terminal\" -c \"interface {0} vrf {1}\" -c \"{2}\"'.format(ifacename, vrf_name, str_ifaceaction)
+ cmd = 'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
+ ifacename, vrf_name, str_ifaceaction
+ )
node.run(cmd)
+
def ip4_route_zebra(node, vrf_name=None):
"""
Gets an output of 'show ip route' command. It can be used
with comparing the output to a reference
"""
if vrf_name == None:
- tmp = node.vtysh_cmd('show ip route')
+ tmp = node.vtysh_cmd("show ip route")
else:
- tmp = node.vtysh_cmd('show ip route vrf {0}'.format(vrf_name))
+ tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
lines = output.splitlines()
header_found = False
while lines and (not lines[0].strip() or not header_found):
- if '> - selected route' in lines[0]:
+ if "> - selected route" in lines[0]:
header_found = True
lines = lines[1:]
- return '\n'.join(lines)
+ return "\n".join(lines)
+
def ip6_route_zebra(node, vrf_name=None):
"""
"""
if vrf_name == None:
- tmp = node.vtysh_cmd('show ipv6 route')
+ tmp = node.vtysh_cmd("show ipv6 route")
else:
- tmp = node.vtysh_cmd('show ipv6 route vrf {0}'.format(vrf_name))
+ tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
# Mask out timestamp
output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
# Mask out the link-local addresses
- output = re.sub(r'fe80::[^ ]+,', 'fe80::XXXX:XXXX:XXXX:XXXX,', output)
+ output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
lines = output.splitlines()
header_found = False
while lines and (not lines[0].strip() or not header_found):
- if '> - selected route' in lines[0]:
+ if "> - selected route" in lines[0]:
header_found = True
lines = lines[1:]
- return '\n'.join(lines)
+ return "\n".join(lines)
def proto_name_to_number(protocol):
return {
- 'bgp': '186',
- 'isis': '187',
- 'ospf': '188',
- 'rip': '189',
- 'ripng': '190',
- 'nhrp': '191',
- 'eigrp': '192',
- 'ldp': '193',
- 'sharp': '194',
- 'pbr': '195',
- 'static': '196'
- }.get(protocol, protocol) # default return same as input
+ "bgp": "186",
+ "isis": "187",
+ "ospf": "188",
+ "rip": "189",
+ "ripng": "190",
+ "nhrp": "191",
+ "eigrp": "192",
+ "ldp": "193",
+ "sharp": "194",
+ "pbr": "195",
+ "static": "196",
+ }.get(
+ protocol, protocol
+ ) # default return same as input
def ip4_route(node):
}
}
"""
- output = normalize_text(node.run('ip route')).splitlines()
+ output = normalize_text(node.run("ip route")).splitlines()
result = {}
for line in output:
- columns = line.split(' ')
+ columns = line.split(" ")
route = result[columns[0]] = {}
prev = None
for column in columns:
- if prev == 'dev':
- route['dev'] = column
- if prev == 'via':
- route['via'] = column
- if prev == 'proto':
+ if prev == "dev":
+ route["dev"] = column
+ if prev == "via":
+ route["via"] = column
+ if prev == "proto":
# translate protocol names back to numbers
- route['proto'] = proto_name_to_number(column)
- if prev == 'metric':
- route['metric'] = column
- if prev == 'scope':
- route['scope'] = column
+ route["proto"] = proto_name_to_number(column)
+ if prev == "metric":
+ route["metric"] = column
+ if prev == "scope":
+ route["scope"] = column
prev = column
return result
+
def ip6_route(node):
"""
Gets a structured return of the command 'ip -6 route'. It can be used in
}
}
"""
- output = normalize_text(node.run('ip -6 route')).splitlines()
+ output = normalize_text(node.run("ip -6 route")).splitlines()
result = {}
for line in output:
- columns = line.split(' ')
+ columns = line.split(" ")
route = result[columns[0]] = {}
prev = None
for column in columns:
- if prev == 'dev':
- route['dev'] = column
- if prev == 'via':
- route['via'] = column
- if prev == 'proto':
+ if prev == "dev":
+ route["dev"] = column
+ if prev == "via":
+ route["via"] = column
+ if prev == "proto":
# translate protocol names back to numbers
- route['proto'] = proto_name_to_number(column)
- if prev == 'metric':
- route['metric'] = column
- if prev == 'pref':
- route['pref'] = column
+ route["proto"] = proto_name_to_number(column)
+ if prev == "metric":
+ route["metric"] = column
+ if prev == "pref":
+ route["pref"] = column
prev = column
return result
+
def sleep(amount, reason=None):
"""
Sleep wrapper that registers in the log the amount of sleep
"""
if reason is None:
- logger.info('Sleeping for {} seconds'.format(amount))
+ logger.info("Sleeping for {} seconds".format(amount))
else:
- logger.info(reason + ' ({} seconds)'.format(amount))
+ logger.info(reason + " ({} seconds)".format(amount))
time.sleep(amount)
+
def checkAddressSanitizerError(output, router, component):
"Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
- addressSantizerError = re.search('(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ', output)
+ addressSantizerError = re.search(
+ "(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
+ )
if addressSantizerError:
- sys.stderr.write("%s: %s triggered an exception by AddressSanitizer\n" % (router, component))
+ sys.stderr.write(
+ "%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
+ )
# Sanitizer Error found in log
pidMark = addressSantizerError.group(1)
- addressSantizerLog = re.search('%s(.*)%s' % (pidMark, pidMark), output, re.DOTALL)
+ addressSantizerLog = re.search(
+ "%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
+ )
if addressSantizerLog:
- callingTest = os.path.basename(sys._current_frames().values()[0].f_back.f_back.f_globals['__file__'])
+ callingTest = os.path.basename(
+ sys._current_frames().values()[0].f_back.f_back.f_globals["__file__"]
+ )
callingProc = sys._getframe(2).f_code.co_name
with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
- sys.stderr.write('\n'.join(addressSantizerLog.group(1).splitlines()) + '\n')
+ sys.stderr.write(
+ "\n".join(addressSantizerLog.group(1).splitlines()) + "\n"
+ )
addrSanFile.write("## Error: %s\n\n" % addressSantizerError.group(2))
- addrSanFile.write("### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n" % (callingTest, callingProc, router))
- addrSanFile.write(' '+ '\n '.join(addressSantizerLog.group(1).splitlines()) + '\n')
+ addrSanFile.write(
+ "### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
+ % (callingTest, callingProc, router)
+ )
+ addrSanFile.write(
+ " "
+ + "\n ".join(addressSantizerLog.group(1).splitlines())
+ + "\n"
+ )
addrSanFile.write("\n---------------\n")
return True
return False
+
def addRouter(topo, name):
"Adding a FRRouter (or Quagga) to Topology"
- MyPrivateDirs = ['/etc/frr',
- '/etc/quagga',
- '/var/run/frr',
- '/var/run/quagga',
- '/var/log']
+ MyPrivateDirs = [
+ "/etc/frr",
+ "/etc/quagga",
+ "/var/run/frr",
+ "/var/run/quagga",
+ "/var/log",
+ ]
if sys.platform.startswith("linux"):
return topo.addNode(name, cls=LinuxRouter, privateDirs=MyPrivateDirs)
elif sys.platform.startswith("freebsd"):
return topo.addNode(name, cls=FreeBSDRouter, privateDirs=MyPrivateDirs)
+
def set_sysctl(node, sysctl, value):
"Set a sysctl value and return None on success or an error string"
- valuestr = '{}'.format(value)
+ valuestr = "{}".format(value)
command = "sysctl {0}={1}".format(sysctl, valuestr)
cmdret = node.cmd(command)
- matches = re.search(r'([^ ]+) = ([^\s]+)', cmdret)
+ matches = re.search(r"([^ ]+) = ([^\s]+)", cmdret)
if matches is None:
return cmdret
if matches.group(1) != sysctl:
return None
+
def assert_sysctl(node, sysctl, value):
"Set and assert that the sysctl is set with the specified value."
assert set_sysctl(node, sysctl, value) is None
def __init__(self, name, **params):
super(Router, self).__init__(name, **params)
- self.logdir = params.get('logdir')
+ self.logdir = params.get("logdir")
# Backward compatibility:
# Load configuration defaults like topogen.
- self.config_defaults = configparser.ConfigParser({
- 'verbosity': 'info',
- 'frrdir': '/usr/lib/frr',
- 'quaggadir': '/usr/lib/quagga',
- 'routertype': 'frr',
- 'memleak_path': None,
- })
+ self.config_defaults = configparser.ConfigParser(
+ {
+ "verbosity": "info",
+ "frrdir": "/usr/lib/frr",
+ "quaggadir": "/usr/lib/quagga",
+ "routertype": "frr",
+ "memleak_path": None,
+ }
+ )
self.config_defaults.read(
- os.path.join(os.path.dirname(os.path.realpath(__file__)),
- '../pytest.ini')
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
)
# If this topology is using old API and doesn't have logdir
# specified, then attempt to generate an unique logdir.
if self.logdir is None:
- cur_test = os.environ['PYTEST_CURRENT_TEST']
- self.logdir = ('/tmp/topotests/' +
- cur_test[0:cur_test.find(".py")].replace('/', '.'))
+ cur_test = os.environ["PYTEST_CURRENT_TEST"]
+ self.logdir = "/tmp/topotests/" + cur_test[
+ 0 : cur_test.find(".py")
+ ].replace("/", ".")
# If the logdir is not created, then create it and set the
# appropriated permissions.
if not os.path.isdir(self.logdir):
- os.system('mkdir -p ' + self.logdir + '/' + name)
- os.system('chmod -R go+rw /tmp/topotests')
+ os.system("mkdir -p " + self.logdir + "/" + name)
+ os.system("chmod -R go+rw /tmp/topotests")
self.daemondir = None
self.hasmpls = False
- self.routertype = 'frr'
- self.daemons = {'zebra': 0, 'ripd': 0, 'ripngd': 0, 'ospfd': 0,
- 'ospf6d': 0, 'isisd': 0, 'bgpd': 0, 'pimd': 0,
- 'ldpd': 0, 'eigrpd': 0, 'nhrpd': 0, 'staticd': 0,
- 'bfdd': 0, 'sharpd': 0}
- self.daemons_options = {'zebra': ''}
+ self.routertype = "frr"
+ self.daemons = {
+ "zebra": 0,
+ "ripd": 0,
+ "ripngd": 0,
+ "ospfd": 0,
+ "ospf6d": 0,
+ "isisd": 0,
+ "bgpd": 0,
+ "pimd": 0,
+ "ldpd": 0,
+ "eigrpd": 0,
+ "nhrpd": 0,
+ "staticd": 0,
+ "bfdd": 0,
+ "sharpd": 0,
+ }
+ self.daemons_options = {"zebra": ""}
self.reportCores = True
self.version = None
def _config_frr(self, **params):
"Configure FRR binaries"
- self.daemondir = params.get('frrdir')
+ self.daemondir = params.get("frrdir")
if self.daemondir is None:
- self.daemondir = self.config_defaults.get('topogen', 'frrdir')
+ self.daemondir = self.config_defaults.get("topogen", "frrdir")
- zebra_path = os.path.join(self.daemondir, 'zebra')
+ zebra_path = os.path.join(self.daemondir, "zebra")
if not os.path.isfile(zebra_path):
raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
def _config_quagga(self, **params):
"Configure Quagga binaries"
- self.daemondir = params.get('quaggadir')
+ self.daemondir = params.get("quaggadir")
if self.daemondir is None:
- self.daemondir = self.config_defaults.get('topogen', 'quaggadir')
+ self.daemondir = self.config_defaults.get("topogen", "quaggadir")
- zebra_path = os.path.join(self.daemondir, 'zebra')
+ zebra_path = os.path.join(self.daemondir, "zebra")
if not os.path.isfile(zebra_path):
- raise Exception("Quagga zebra binary doesn't exist at {}".format(zebra_path))
+ raise Exception(
+ "Quagga zebra binary doesn't exist at {}".format(zebra_path)
+ )
# pylint: disable=W0221
# Some params are only meaningful for the parent class.
super(Router, self).config(**params)
# User did not specify the daemons directory, try to autodetect it.
- self.daemondir = params.get('daemondir')
+ self.daemondir = params.get("daemondir")
if self.daemondir is None:
- self.routertype = params.get('routertype',
- self.config_defaults.get(
- 'topogen',
- 'routertype'))
- if self.routertype == 'quagga':
+ self.routertype = params.get(
+ "routertype", self.config_defaults.get("topogen", "routertype")
+ )
+ if self.routertype == "quagga":
self._config_quagga(**params)
else:
self._config_frr(**params)
else:
# Test the provided path
- zpath = os.path.join(self.daemondir, 'zebra')
+ zpath = os.path.join(self.daemondir, "zebra")
if not os.path.isfile(zpath):
- raise Exception('No zebra binary found in {}'.format(zpath))
+ raise Exception("No zebra binary found in {}".format(zpath))
# Allow user to specify routertype when the path was specified.
- if params.get('routertype') is not None:
- self.routertype = params.get('routertype')
+ if params.get("routertype") is not None:
+ self.routertype = params.get("routertype")
- self.cmd('ulimit -c unlimited')
+ self.cmd("ulimit -c unlimited")
# Set ownership of config files
- self.cmd('chown {0}:{0}vty /etc/{0}'.format(self.routertype))
+ self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
def terminate(self):
# Delete Running Quagga or FRR Daemons
# self.cmd('kill -7 `cat %s`' % d.rstrip())
# self.waitOutput()
# Disable forwarding
- set_sysctl(self, 'net.ipv4.ip_forward', 0)
- set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
+ set_sysctl(self, "net.ipv4.ip_forward", 0)
+ set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
super(Router, self).terminate()
- os.system('chmod -R go+rw /tmp/topotests')
+ os.system("chmod -R go+rw /tmp/topotests")
- def stopRouter(self, wait=True, assertOnError=True, minErrorVersion='5.1'):
+ def stopRouter(self, wait=True, assertOnError=True, minErrorVersion="5.1"):
# Stop Running Quagga or FRR Daemons
- rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
+ rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
errors = ""
if re.search(r"No such file or directory", rundaemons):
return errors
if rundaemons is not None:
numRunning = 0
for d in StringIO.StringIO(rundaemons):
- daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
- if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
- logger.info('{}: stopping {}'.format(
- self.name,
- os.path.basename(d.rstrip().rsplit(".", 1)[0])
- ))
- self.cmd('kill -TERM %s' % daemonpid)
+ daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
+ if daemonpid.isdigit() and pid_exists(int(daemonpid)):
+ logger.info(
+ "{}: stopping {}".format(
+ self.name, os.path.basename(d.rstrip().rsplit(".", 1)[0])
+ )
+ )
+ self.cmd("kill -TERM %s" % daemonpid)
self.waitOutput()
if pid_exists(int(daemonpid)):
numRunning += 1
if wait and numRunning > 0:
- sleep(2, '{}: waiting for daemons stopping'.format(self.name))
+ sleep(2, "{}: waiting for daemons stopping".format(self.name))
# 2nd round of kill if daemons didn't exit
for d in StringIO.StringIO(rundaemons):
- daemonpid = self.cmd('cat %s' % d.rstrip()).rstrip()
- if (daemonpid.isdigit() and pid_exists(int(daemonpid))):
- logger.info('{}: killing {}'.format(
- self.name,
- os.path.basename(d.rstrip().rsplit(".", 1)[0])
- ))
- self.cmd('kill -7 %s' % daemonpid)
+ daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
+ if daemonpid.isdigit() and pid_exists(int(daemonpid)):
+ logger.info(
+ "{}: killing {}".format(
+ self.name,
+ os.path.basename(d.rstrip().rsplit(".", 1)[0]),
+ )
+ )
+ self.cmd("kill -7 %s" % daemonpid)
self.waitOutput()
- self.cmd('rm -- {}'.format(d.rstrip()))
+ self.cmd("rm -- {}".format(d.rstrip()))
if wait:
- errors = self.checkRouterCores(reportOnce=True)
- if self.checkRouterVersion('<', minErrorVersion):
- #ignore errors in old versions
- errors = ""
- if assertOnError and len(errors) > 0:
- assert "Errors found - details follow:" == 0, errors
+ errors = self.checkRouterCores(reportOnce=True)
+ if self.checkRouterVersion("<", minErrorVersion):
+ # ignore errors in old versions
+ errors = ""
+ if assertOnError and len(errors) > 0:
+ assert "Errors found - details follow:" == 0, errors
return errors
def removeIPs(self):
for interface in self.intfNames():
- self.cmd('ip address flush', interface)
+ self.cmd("ip address flush", interface)
def checkCapability(self, daemon, param):
if param is not None:
daemon_path = os.path.join(self.daemondir, daemon)
- daemon_search_option = param.replace('-','')
- output = self.cmd('{0} -h | grep {1}'.format(
- daemon_path, daemon_search_option))
+ daemon_search_option = param.replace("-", "")
+ output = self.cmd(
+ "{0} -h | grep {1}".format(daemon_path, daemon_search_option)
+ )
if daemon_search_option not in output:
return False
return True
if param is not None:
self.daemons_options[daemon] = param
if source is None:
- self.cmd('touch /etc/%s/%s.conf' % (self.routertype, daemon))
+ self.cmd("touch /etc/%s/%s.conf" % (self.routertype, daemon))
self.waitOutput()
else:
- self.cmd('cp %s /etc/%s/%s.conf' % (source, self.routertype, daemon))
+ self.cmd("cp %s /etc/%s/%s.conf" % (source, self.routertype, daemon))
self.waitOutput()
- self.cmd('chmod 640 /etc/%s/%s.conf' % (self.routertype, daemon))
+ self.cmd("chmod 640 /etc/%s/%s.conf" % (self.routertype, daemon))
self.waitOutput()
- self.cmd('chown %s:%s /etc/%s/%s.conf' % (self.routertype, self.routertype, self.routertype, daemon))
+ self.cmd(
+ "chown %s:%s /etc/%s/%s.conf"
+ % (self.routertype, self.routertype, self.routertype, daemon)
+ )
self.waitOutput()
- if (daemon == 'zebra') and (self.daemons['staticd'] == 0):
+ if (daemon == "zebra") and (self.daemons["staticd"] == 0):
# Add staticd with zebra - if it exists
- staticd_path = os.path.join(self.daemondir, 'staticd')
+ staticd_path = os.path.join(self.daemondir, "staticd")
if os.path.isfile(staticd_path):
- self.daemons['staticd'] = 1
- self.daemons_options['staticd'] = ''
+ self.daemons["staticd"] = 1
+ self.daemons_options["staticd"] = ""
# Auto-Started staticd has no config, so it will read from zebra config
else:
- logger.info('No daemon {} known'.format(daemon))
+ logger.info("No daemon {} known".format(daemon))
# print "Daemons after:", self.daemons
def startRouter(self, tgen=None):
# Disable integrated-vtysh-config
- self.cmd('echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf' % self.routertype)
- self.cmd('chown %s:%svty /etc/%s/vtysh.conf' % (self.routertype, self.routertype, self.routertype))
+ self.cmd(
+ 'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
+ % self.routertype
+ )
+ self.cmd(
+ "chown %s:%svty /etc/%s/vtysh.conf"
+ % (self.routertype, self.routertype, self.routertype)
+ )
# TODO remove the following lines after all tests are migrated to Topogen.
# Try to find relevant old logfiles in /tmp and delete them
- map(os.remove, glob.glob('{}/{}/*.log'.format(self.logdir, self.name)))
+ map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
# Remove old core files
- map(os.remove, glob.glob('{}/{}/*.dmp'.format(self.logdir, self.name)))
+ map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
# Remove IP addresses from OS first - we have them in zebra.conf
self.removeIPs()
# If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
# No error - but return message and skip all the tests
- if self.daemons['ldpd'] == 1:
- ldpd_path = os.path.join(self.daemondir, 'ldpd')
+ if self.daemons["ldpd"] == 1:
+ ldpd_path = os.path.join(self.daemondir, "ldpd")
if not os.path.isfile(ldpd_path):
logger.info("LDP Test, but no ldpd compiled or installed")
return "LDP Test, but no ldpd compiled or installed"
- if version_cmp(platform.release(), '4.5') < 0:
+ if version_cmp(platform.release(), "4.5") < 0:
logger.info("LDP Test need Linux Kernel 4.5 minimum")
return "LDP Test need Linux Kernel 4.5 minimum"
# Check if have mpls
if tgen != None:
self.hasmpls = tgen.hasmpls
if self.hasmpls != True:
- logger.info("LDP/MPLS Tests will be skipped, platform missing module(s)")
+ logger.info(
+ "LDP/MPLS Tests will be skipped, platform missing module(s)"
+ )
else:
# Test for MPLS Kernel modules available
self.hasmpls = False
- if not module_present('mpls-router'):
- logger.info('MPLS tests will not run (missing mpls-router kernel module)')
- elif not module_present('mpls-iptunnel'):
- logger.info('MPLS tests will not run (missing mpls-iptunnel kernel module)')
+ if not module_present("mpls-router"):
+ logger.info(
+ "MPLS tests will not run (missing mpls-router kernel module)"
+ )
+ elif not module_present("mpls-iptunnel"):
+ logger.info(
+ "MPLS tests will not run (missing mpls-iptunnel kernel module)"
+ )
else:
self.hasmpls = True
if self.hasmpls != True:
return "LDP/MPLS Tests need mpls kernel modules"
- self.cmd('echo 100000 > /proc/sys/net/mpls/platform_labels')
+ self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
- if self.daemons['eigrpd'] == 1:
- eigrpd_path = os.path.join(self.daemondir, 'eigrpd')
+ if self.daemons["eigrpd"] == 1:
+ eigrpd_path = os.path.join(self.daemondir, "eigrpd")
if not os.path.isfile(eigrpd_path):
logger.info("EIGRP Test, but no eigrpd compiled or installed")
return "EIGRP Test, but no eigrpd compiled or installed"
- if self.daemons['bfdd'] == 1:
- bfdd_path = os.path.join(self.daemondir, 'bfdd')
+ if self.daemons["bfdd"] == 1:
+ bfdd_path = os.path.join(self.daemondir, "bfdd")
if not os.path.isfile(bfdd_path):
logger.info("BFD Test, but no bfdd compiled or installed")
return "BFD Test, but no bfdd compiled or installed"
def restartRouter(self):
# Starts actual daemons without init (ie restart)
# cd to per node directory
- self.cmd('cd {}/{}'.format(self.logdir, self.name))
- self.cmd('umask 000')
- #Re-enable to allow for report per run
+ self.cmd("cd {}/{}".format(self.logdir, self.name))
+ self.cmd("umask 000")
+ # Re-enable to allow for report per run
self.reportCores = True
if self.version == None:
- self.version = self.cmd(os.path.join(self.daemondir, 'bgpd')+' -v').split()[2]
- logger.info('{}: running version: {}'.format(self.name,self.version))
+ self.version = self.cmd(
+ os.path.join(self.daemondir, "bgpd") + " -v"
+ ).split()[2]
+ logger.info("{}: running version: {}".format(self.name, self.version))
# Start Zebra first
- if self.daemons['zebra'] == 1:
- zebra_path = os.path.join(self.daemondir, 'zebra')
- zebra_option = self.daemons_options['zebra']
- self.cmd('{0} {1} > zebra.out 2> zebra.err &'.format(
- zebra_path, zebra_option, self.logdir, self.name
- ))
+ if self.daemons["zebra"] == 1:
+ zebra_path = os.path.join(self.daemondir, "zebra")
+ zebra_option = self.daemons_options["zebra"]
+ self.cmd(
+ "{0} {1} > zebra.out 2> zebra.err &".format(
+ zebra_path, zebra_option, self.logdir, self.name
+ )
+ )
self.waitOutput()
- logger.debug('{}: {} zebra started'.format(self, self.routertype))
- sleep(1, '{}: waiting for zebra to start'.format(self.name))
+ logger.debug("{}: {} zebra started".format(self, self.routertype))
+ sleep(1, "{}: waiting for zebra to start".format(self.name))
# Start staticd next if required
- if self.daemons['staticd'] == 1:
- staticd_path = os.path.join(self.daemondir, 'staticd')
- staticd_option = self.daemons_options['staticd']
- self.cmd('{0} {1} > staticd.out 2> staticd.err &'.format(
- staticd_path, staticd_option, self.logdir, self.name
- ))
+ if self.daemons["staticd"] == 1:
+ staticd_path = os.path.join(self.daemondir, "staticd")
+ staticd_option = self.daemons_options["staticd"]
+ self.cmd(
+ "{0} {1} > staticd.out 2> staticd.err &".format(
+ staticd_path, staticd_option, self.logdir, self.name
+ )
+ )
self.waitOutput()
- logger.debug('{}: {} staticd started'.format(self, self.routertype))
- # Fix Link-Local Addresses
+ logger.debug("{}: {} staticd started".format(self, self.routertype))
+ # Fix Link-Local Addresses
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
- self.cmd('for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=\':\'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done')
+ self.cmd(
+ "for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done"
+ )
# Now start all the other daemons
for daemon in self.daemons:
# Skip disabled daemons and zebra
- if self.daemons[daemon] == 0 or daemon == 'zebra' or daemon == 'staticd':
+ if self.daemons[daemon] == 0 or daemon == "zebra" or daemon == "staticd":
continue
daemon_path = os.path.join(self.daemondir, daemon)
- self.cmd('{0} {1} > {2}.out 2> {2}.err &'.format(
- daemon_path, self.daemons_options.get(daemon, ''), daemon
- ))
+ self.cmd(
+ "{0} {1} > {2}.out 2> {2}.err &".format(
+ daemon_path, self.daemons_options.get(daemon, ""), daemon
+ )
+ )
self.waitOutput()
- logger.debug('{}: {} {} started'.format(self, self.routertype, daemon))
+ logger.debug("{}: {} {} started".format(self, self.routertype, daemon))
+
def getStdErr(self, daemon):
- return self.getLog('err', daemon)
+ return self.getLog("err", daemon)
+
def getStdOut(self, daemon):
- return self.getLog('out', daemon)
+ return self.getLog("out", daemon)
+
def getLog(self, log, daemon):
- return self.cmd('cat {}/{}/{}.{}'.format(self.logdir, self.name, daemon, log))
+ return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
def checkRouterCores(self, reportLeaks=True, reportOnce=False):
if reportOnce and not self.reportCores:
reportMade = False
traces = ""
for daemon in self.daemons:
- if (self.daemons[daemon] == 1):
+ if self.daemons[daemon] == 1:
# Look for core file
- corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
- self.logdir, self.name, daemon))
- if (len(corefiles) > 0):
+ corefiles = glob.glob(
+ "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
+ )
+ if len(corefiles) > 0:
daemon_path = os.path.join(self.daemondir, daemon)
- backtrace = subprocess.check_output([
- "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
- ], shell=True)
- sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
+ backtrace = subprocess.check_output(
+ [
+ "gdb {} {} --batch -ex bt 2> /dev/null".format(
+ daemon_path, corefiles[0]
+ )
+ ],
+ shell=True,
+ )
+ sys.stderr.write(
+ "\n%s: %s crashed. Core file found - Backtrace follows:\n"
+ % (self.name, daemon)
+ )
sys.stderr.write("%s" % backtrace)
- traces = traces + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s" % (self.name, daemon, backtrace)
+ traces = (
+ traces
+ + "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
+ % (self.name, daemon, backtrace)
+ )
reportMade = True
elif reportLeaks:
log = self.getStdErr(daemon)
if "memstats" in log:
- sys.stderr.write("%s: %s has memory leaks:\n" % (self.name, daemon))
- traces = traces + "\n%s: %s has memory leaks:\n" % (self.name, daemon)
+ sys.stderr.write(
+ "%s: %s has memory leaks:\n" % (self.name, daemon)
+ )
+ traces = traces + "\n%s: %s has memory leaks:\n" % (
+ self.name,
+ daemon,
+ )
log = re.sub("core_handler: ", "", log)
- log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n ## \1", log)
+ log = re.sub(
+ r"(showing active allocations in memory group [a-zA-Z0-9]+)",
+ r"\n ## \1",
+ log,
+ )
log = re.sub("memstats: ", " ", log)
sys.stderr.write(log)
reportMade = True
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
- if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon):
- sys.stderr.write("%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon))
- traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
+ if checkAddressSanitizerError(
+ self.getStdErr(daemon), self.name, daemon
+ ):
+ sys.stderr.write(
+ "%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
+ )
+ traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
+ self.name,
+ daemon,
+ )
reportMade = True
if reportMade:
self.reportCores = False
global fatal_error
- daemonsRunning = self.cmd('vtysh -c "show logging" | grep "Logging configuration for"')
+ daemonsRunning = self.cmd(
+ 'vtysh -c "show logging" | grep "Logging configuration for"'
+ )
# Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
return "%s: vtysh killed by AddressSanitizer" % (self.name)
if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
if daemon is "staticd":
- sys.stderr.write("You may have a copy of staticd installed but are attempting to test against\n")
- sys.stderr.write("a version of FRR that does not have staticd, please cleanup the install dir\n")
+ sys.stderr.write(
+ "You may have a copy of staticd installed but are attempting to test against\n"
+ )
+ sys.stderr.write(
+ "a version of FRR that does not have staticd, please cleanup the install dir\n"
+ )
# Look for core file
- corefiles = glob.glob('{}/{}/{}_core*.dmp'.format(
- self.logdir, self.name, daemon))
- if (len(corefiles) > 0):
+ corefiles = glob.glob(
+ "{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
+ )
+ if len(corefiles) > 0:
daemon_path = os.path.join(self.daemondir, daemon)
- backtrace = subprocess.check_output([
- "gdb {} {} --batch -ex bt 2> /dev/null".format(daemon_path, corefiles[0])
- ], shell=True)
- sys.stderr.write("\n%s: %s crashed. Core file found - Backtrace follows:\n" % (self.name, daemon))
+ backtrace = subprocess.check_output(
+ [
+ "gdb {} {} --batch -ex bt 2> /dev/null".format(
+ daemon_path, corefiles[0]
+ )
+ ],
+ shell=True,
+ )
+ sys.stderr.write(
+ "\n%s: %s crashed. Core file found - Backtrace follows:\n"
+ % (self.name, daemon)
+ )
sys.stderr.write("%s\n" % backtrace)
else:
# No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
- if os.path.isfile('{}/{}/{}.log'.format(self.logdir, self.name, daemon)):
- log_tail = subprocess.check_output([
- "tail -n20 {}/{}/{}.log 2> /dev/null".format(
- self.logdir, self.name, daemon)
- ], shell=True)
- sys.stderr.write("\nFrom %s %s %s log file:\n" % (self.routertype, self.name, daemon))
+ if os.path.isfile(
+ "{}/{}/{}.log".format(self.logdir, self.name, daemon)
+ ):
+ log_tail = subprocess.check_output(
+ [
+ "tail -n20 {}/{}/{}.log 2> /dev/null".format(
+ self.logdir, self.name, daemon
+ )
+ ],
+ shell=True,
+ )
+ sys.stderr.write(
+ "\nFrom %s %s %s log file:\n"
+ % (self.routertype, self.name, daemon)
+ )
sys.stderr.write("%s\n" % log_tail)
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
- if checkAddressSanitizerError(self.getStdErr(daemon), self.name, daemon):
- return "%s: Daemon %s not running - killed by AddressSanitizer" % (self.name, daemon)
+ if checkAddressSanitizerError(
+ self.getStdErr(daemon), self.name, daemon
+ ):
+ return "%s: Daemon %s not running - killed by AddressSanitizer" % (
+ self.name,
+ daemon,
+ )
return "%s: Daemon %s not running" % (self.name, daemon)
return ""
# Make sure we have version information first
if self.version == None:
- self.version = self.cmd(os.path.join(self.daemondir, 'bgpd')+' -v').split()[2]
- logger.info('{}: running version: {}'.format(self.name,self.version))
+ self.version = self.cmd(
+ os.path.join(self.daemondir, "bgpd") + " -v"
+ ).split()[2]
+ logger.info("{}: running version: {}".format(self.name, self.version))
rversion = self.version
if rversion is None:
return False
result = version_cmp(rversion, version)
- if cmpop == '>=':
+ if cmpop == ">=":
return result >= 0
- if cmpop == '>':
+ if cmpop == ">":
return result > 0
- if cmpop == '=':
+ if cmpop == "=":
return result == 0
- if cmpop == '<':
+ if cmpop == "<":
return result < 0
- if cmpop == '<':
+ if cmpop == "<":
return result < 0
- if cmpop == '<=':
+ if cmpop == "<=":
return result <= 0
def get_ipv6_linklocal(self):
linklocal = []
- ifaces = self.cmd('ip -6 address')
+ ifaces = self.cmd("ip -6 address")
# Fix newlines (make them all the same)
- ifaces = ('\n'.join(ifaces.splitlines()) + '\n').splitlines()
- interface=""
- ll_per_if_count=0
+ ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
+ interface = ""
+ ll_per_if_count = 0
for line in ifaces:
- m = re.search('[0-9]+: ([^:@]+)[@if0-9:]+ <', line)
+ m = re.search("[0-9]+: ([^:@]+)[@if0-9:]+ <", line)
if m:
interface = m.group(1)
ll_per_if_count = 0
- m = re.search('inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link', line)
+ m = re.search(
+ "inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
+ line,
+ )
if m:
local = m.group(1)
ll_per_if_count += 1
- if (ll_per_if_count > 1):
+ if ll_per_if_count > 1:
linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
else:
linklocal += [[interface, local]]
return linklocal
+
def daemon_available(self, daemon):
"Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
daemon_path = os.path.join(self.daemondir, daemon)
if not os.path.isfile(daemon_path):
return False
- if (daemon == 'ldpd'):
- if version_cmp(platform.release(), '4.5') < 0:
+ if daemon == "ldpd":
+ if version_cmp(platform.release(), "4.5") < 0:
return False
- if not module_present('mpls-router', load=False):
+ if not module_present("mpls-router", load=False):
return False
- if not module_present('mpls-iptunnel', load=False):
+ if not module_present("mpls-iptunnel", load=False):
return False
return True
"Return the type of Router (frr or quagga)"
return self.routertype
+
def report_memory_leaks(self, filename_prefix, testscript):
"Report Memory Leaks to file prefixed with given string"
leakfound = False
filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
for daemon in self.daemons:
- if (self.daemons[daemon] == 1):
+ if self.daemons[daemon] == 1:
log = self.getStdErr(daemon)
if "memstats" in log:
# Found memory leak
- logger.info('\nRouter {} {} StdErr Log:\n{}'.format(
- self.name, daemon, log))
+ logger.info(
+ "\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
+ )
if not leakfound:
leakfound = True
# Check if file already exists
leakfile = open(filename, "a")
if not fileexists:
# New file - add header
- leakfile.write("# Memory Leak Detection for topotest %s\n\n" % testscript)
+ leakfile.write(
+ "# Memory Leak Detection for topotest %s\n\n"
+ % testscript
+ )
leakfile.write("## Router %s\n" % self.name)
leakfile.write("### Process %s\n" % daemon)
log = re.sub("core_handler: ", "", log)
- log = re.sub(r"(showing active allocations in memory group [a-zA-Z0-9]+)", r"\n#### \1\n", log)
+ log = re.sub(
+ r"(showing active allocations in memory group [a-zA-Z0-9]+)",
+ r"\n#### \1\n",
+ log,
+ )
log = re.sub("memstats: ", " ", log)
leakfile.write(log)
leakfile.write("\n")
if leakfound:
leakfile.close()
+
class LinuxRouter(Router):
"A Linux Router Node with IPv4/IPv6 forwarding enabled."
def config(self, **params):
Router.config(self, **params)
# Enable forwarding on the router
- assert_sysctl(self, 'net.ipv4.ip_forward', 1)
- assert_sysctl(self, 'net.ipv6.conf.all.forwarding', 1)
+ assert_sysctl(self, "net.ipv4.ip_forward", 1)
+ assert_sysctl(self, "net.ipv6.conf.all.forwarding", 1)
# Enable coredumps
- assert_sysctl(self, 'kernel.core_uses_pid', 1)
- assert_sysctl(self, 'fs.suid_dumpable', 1)
- #this applies to the kernel not the namespace...
- #original on ubuntu 17.x, but apport won't save as in namespace
+ assert_sysctl(self, "kernel.core_uses_pid", 1)
+ assert_sysctl(self, "fs.suid_dumpable", 1)
+ # this applies to the kernel not the namespace...
+ # original on ubuntu 17.x, but apport won't save as in namespace
# |/usr/share/apport/apport %p %s %c %d %P
- corefile = '%e_core-sig_%s-pid_%p.dmp'
- assert_sysctl(self, 'kernel.core_pattern', corefile)
+ corefile = "%e_core-sig_%s-pid_%p.dmp"
+ assert_sysctl(self, "kernel.core_pattern", corefile)
def terminate(self):
"""
Terminate generic LinuxRouter Mininet instance
"""
- set_sysctl(self, 'net.ipv4.ip_forward', 0)
- set_sysctl(self, 'net.ipv6.conf.all.forwarding', 0)
+ set_sysctl(self, "net.ipv4.ip_forward", 0)
+ set_sysctl(self, "net.ipv6.conf.all.forwarding", 0)
Router.terminate(self)
+
class FreeBSDRouter(Router):
"A FreeBSD Router Node with IPv4/IPv6 forwarding enabled."
"A Legacy Switch without OpenFlow"
def __init__(self, name, **params):
- OVSSwitch.__init__(self, name, failMode='standalone', **params)
+ OVSSwitch.__init__(self, name, failMode="standalone", **params)
self.switchIP = None
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
# Import topogen and topotest helpers
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
from lib.topolog import logger
+
# and Finally pytest
import pytest
class OspfSrTopo(Topo):
"Test topology builder"
+
def build(self):
"Build function"
tgen = get_topogen(self)
# Check for mpls
if tgen.hasmpls is not True:
- tgen.set_error('MPLS not available, tests will be skipped')
+ tgen.set_error("MPLS not available, tests will be skipped")
# Create 4 routers
for routern in range(1, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# Interconect router 1 and 2
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
# Interconect router 3 and 2
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r3'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r2"])
# Interconect router 4 and 2
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r4'])
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r4"])
+ switch.add_link(tgen.gears["r2"])
def setup_module(mod):
"Sets up the pytest environment"
- logger.info('\n\n---- Starting OSPF Segment Routing tests ----\n')
+ logger.info("\n\n---- Starting OSPF Segment Routing tests ----\n")
tgen = Topogen(OspfSrTopo, mod.__name__)
tgen.start_topology()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_OSPF,
- os.path.join(CWD, '{}/ospfd.conf'.format(rname))
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
# Initialize all routers.
# Verify that version, MPLS and Segment Routing are OK
for router in router_list.values():
# Check for Version
- if router.has_version('<', '4'):
- tgen.set_error('Unsupported FRR version')
+ if router.has_version("<", "4"):
+ tgen.set_error("Unsupported FRR version")
break
# Check that Segment Routing is available
output = tgen.gears[router.name].vtysh_cmd(
- "show ip ospf database segment-routing json")
+ "show ip ospf database segment-routing json"
+ )
if output.find("Unknown") != -1:
- tgen.set_error('Segment Routing is not available')
+ tgen.set_error("Segment Routing is not available")
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
- logger.info('\n\n---- OSPF Segment Routing tests End ----\n')
+ logger.info("\n\n---- OSPF Segment Routing tests End ----\n")
+
# Shared test function to validate expected output.
def compare_ospf_srdb(rname, expected):
and compare the obtained result with the expected output.
"""
tgen = get_topogen()
- current = tgen.gears[rname].vtysh_cmd(
- 'show ip ospf database segment-routing json')
- return topotest.difflines(current, expected,
- title1="Current output",
- title2="Expected output")
+ current = tgen.gears[rname].vtysh_cmd("show ip ospf database segment-routing json")
+ return topotest.difflines(
+ current, expected, title1="Current output", title2="Expected output"
+ )
def compare_mpls_table(rname, expected):
result with the expected output.
"""
tgen = get_topogen()
- current = tgen.gears[rname].vtysh_cmd('show mpls table json')
- return topotest.difflines(current, expected,
- title1="Current output",
- title2="Expected output")
+ current = tgen.gears[rname].vtysh_cmd("show mpls table json")
+ return topotest.difflines(
+ current, expected, title1="Current output", title2="Expected output"
+ )
def test_ospf_sr():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('--- test OSPF Segment Routing Data Base ---')
+ logger.info("--- test OSPF Segment Routing Data Base ---")
for rnum in range(1, 5):
- router = 'r{}'.format(rnum)
+ router = "r{}".format(rnum)
logger.info('\tRouter "%s"', router)
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/ospf_srdb.json'.format(router))
+ reffile = os.path.join(CWD, "{}/ospf_srdb.json".format(router))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(compare_ospf_srdb, router, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=25, wait=3)
- assert result, (
- 'OSPF did not start Segment Routing on {}:\n{}'
- ).format(router, diff)
+ result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3)
+ assert result, ("OSPF did not start Segment Routing on {}:\n{}").format(
+ router, diff
+ )
def test_ospf_kernel_route():
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- logger.info('--- test OSPF Segment Routing MPLS tables ---')
+ logger.info("--- test OSPF Segment Routing MPLS tables ---")
for rnum in range(1, 5):
- router = 'r{}'.format(rnum)
+ router = "r{}".format(rnum)
logger.info('\tRouter "%s"', router)
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/zebra_mpls.json'.format(router))
+ reffile = os.path.join(CWD, "{}/zebra_mpls.json".format(router))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(compare_mpls_table, router, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=25, wait=3)
- assert result, (
- 'OSPF did not properly instal MPLS table on {}:\n{}'
- ).format(router, diff)
+ result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3)
+ assert result, ("OSPF did not properly instal MPLS table on {}:\n{}").format(
+ router, diff
+ )
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
VRF r1-cust1:
-O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX
+O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, XX:XX:XX
-O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, XX:XX:XX
-O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, XX:XX:XX
+O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX
+O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r1-eth1, XX:XX:XX
-O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, XX:XX:XX
+O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r1-eth1, weight 1, XX:XX:XX
VRF r1-cust1:
-O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, XX:XX:XX
+O 10.0.1.0/24 [110/10] is directly connected, r1-eth0, weight 1, XX:XX:XX
C>* 10.0.1.0/24 is directly connected, r1-eth0, XX:XX:XX
-O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, XX:XX:XX
-O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, XX:XX:XX
+O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r1-eth1, weight 1, XX:XX:XX
+O 10.0.3.0/24 [110/10] is directly connected, r1-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r1-eth1, XX:XX:XX
VRF r2-cust1:
-O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, XX:XX:XX
-O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, XX:XX:XX
+O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX
+O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
C>* 10.0.2.0/24 is directly connected, r2-eth0, XX:XX:XX
-O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, XX:XX:XX
+O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r2-eth1, XX:XX:XX
-O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, XX:XX:XX
+O>* 10.0.10.0/24 [110/20] via 10.0.3.1, r2-eth1, weight 1, XX:XX:XX
VRF r2-cust1:
-O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, XX:XX:XX
-O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, XX:XX:XX
+O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r2-eth1, weight 1, XX:XX:XX
+O 10.0.2.0/24 [110/10] is directly connected, r2-eth0, weight 1, XX:XX:XX
C>* 10.0.2.0/24 is directly connected, r2-eth0, XX:XX:XX
-O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, XX:XX:XX
+O 10.0.3.0/24 [110/10] is directly connected, r2-eth1, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r2-eth1, XX:XX:XX
VRF r3-cust1:
-O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, XX:XX:XX
-O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, XX:XX:XX
-O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, XX:XX:XX
+O>* 10.0.1.0/24 [110/20] via 10.0.3.2, r3-eth0, weight 1, XX:XX:XX
+O>* 10.0.2.0/24 [110/20] via 10.0.3.3, r3-eth0, weight 1, XX:XX:XX
+O 10.0.3.0/24 [110/10] is directly connected, r3-eth0, weight 1, XX:XX:XX
C>* 10.0.3.0/24 is directly connected, r3-eth0, XX:XX:XX
-O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, XX:XX:XX
+O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.10.0/24 is directly connected, r3-eth1, XX:XX:XX
VRF r3-cust1:
-O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, XX:XX:XX
+O 10.0.10.0/24 [110/10] is directly connected, r3-eth1, weight 1, XX:XX:XX
C>* 10.0.10.0/24 is directly connected, r3-eth1, XX:XX:XX
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class OSPFTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 3 routers
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# Create a empty network for router 1
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
# Create a empty network for router 2
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
# Interconect router 1, 2 and 3
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
# Create empty netowrk for router3
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r3"])
def setup_module(mod):
# check for zebra capability
for rname, router in router_list.iteritems():
- if router.check_capability(
- TopoRouter.RD_ZEBRA,
- '--vrfwnetns'
- ) == False:
- return pytest.skip('Skipping OSPF VRF NETNS feature. VRF NETNS backend not available on FRR')
-
- if os.system('ip netns list') != 0:
- return pytest.skip('Skipping OSPF VRF NETNS Test. NETNS not available on System')
+ if router.check_capability(TopoRouter.RD_ZEBRA, "--vrfwnetns") == False:
+ return pytest.skip(
+ "Skipping OSPF VRF NETNS feature. VRF NETNS backend not available on FRR"
+ )
+
+ if os.system("ip netns list") != 0:
+ return pytest.skip(
+ "Skipping OSPF VRF NETNS Test. NETNS not available on System"
+ )
- logger.info('Testing with VRF Namespace support')
+ logger.info("Testing with VRF Namespace support")
- cmds = ['if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi',
- 'ip netns add {0}-cust1',
- 'ip link set dev {0}-eth0 netns {0}-cust1',
- 'ip netns exec {0}-cust1 ifconfig {0}-eth0 up',
- 'ip link set dev {0}-eth1 netns {0}-cust1',
- 'ip netns exec {0}-cust1 ifconfig {0}-eth1 up']
+ cmds = [
+ "if [ -e /var/run/netns/{0}-cust1 ] ; then ip netns del {0}-cust1 ; fi",
+ "ip netns add {0}-cust1",
+ "ip link set dev {0}-eth0 netns {0}-cust1",
+ "ip netns exec {0}-cust1 ifconfig {0}-eth0 up",
+ "ip link set dev {0}-eth1 netns {0}-cust1",
+ "ip netns exec {0}-cust1 ifconfig {0}-eth1 up",
+ ]
for rname, router in router_list.iteritems():
router.load_config(
TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname)),
- '--vrfwnetns'
+ os.path.join(CWD, "{}/zebra.conf".format(rname)),
+ "--vrfwnetns",
)
router.load_config(
- TopoRouter.RD_OSPF,
- os.path.join(CWD, '{}/ospfd.conf'.format(rname))
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
for router in router_list.values():
- if router.has_version('<', '4.0'):
- tgen.set_error('unsupported version')
+ if router.has_version("<", "4.0"):
+ tgen.set_error("unsupported version")
def teardown_module(mod):
# move back rx-eth0 to default VRF
# delete rx-vrf
- cmds = ['ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1',
- 'ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1',
- 'ip netns delete {0}-cust1']
-
+ cmds = [
+ "ip netns exec {0}-cust1 ip link set {0}-eth0 netns 1",
+ "ip netns exec {0}-cust1 ip link set {0}-eth1 netns 1",
+ "ip netns delete {0}-cust1",
+ ]
+
router_list = tgen.routers()
for rname, router in router_list.iteritems():
for cmd in cmds:
tgen.net[rname].cmd(cmd.format(rname))
tgen.stop_topology()
+
# Shared test function to validate expected output.
def compare_show_ip_route_vrf(rname, expected):
"""
result with the expected output.
"""
tgen = get_topogen()
- vrf_name = '{0}-cust1'.format(rname)
+ vrf_name = "{0}-cust1".format(rname)
current = topotest.ip4_route_zebra(tgen.gears[rname], vrf_name)
- ret = topotest.difflines(current, expected,
- title1="Current output",
- title2="Expected output")
+ ret = topotest.difflines(
+ current, expected, title1="Current output", title2="Expected output"
+ )
return ret
+
def test_ospf_convergence():
"Test OSPF daemon convergence"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
for rname, router in tgen.routers().iteritems():
logger.info('Waiting for router "%s" convergence', rname)
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/ospfroute.txt'.format(rname))
+ reffile = os.path.join(CWD, "{}/ospfroute.txt".format(rname))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
- test_func = partial(topotest.router_output_cmp,
- router,
- 'show ip ospf vrf {0}-cust1 route'.format(rname),
- expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=160, wait=0.5)
- assertmsg = 'OSPF did not converge on {}:\n{}'.format(rname, diff)
+ test_func = partial(
+ topotest.router_output_cmp,
+ router,
+ "show ip ospf vrf {0}-cust1 route".format(rname),
+ expected,
+ )
+ result, diff = topotest.run_and_expect(test_func, "", count=160, wait=0.5)
+ assertmsg = "OSPF did not converge on {}:\n{}".format(rname, diff)
assert result, assertmsg
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
rlist = tgen.routers().values()
for router in rlist:
logger.info('Checking OSPF IPv4 kernel routes in "%s"', router.name)
- reffile = os.path.join(CWD, '{}/zebraroute.txt'.format(router.name))
+ reffile = os.path.join(CWD, "{}/zebraroute.txt".format(router.name))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(compare_show_ip_route_vrf, router.name, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=140, wait=0.5)
+ result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5)
assertmsg = 'OSPF IPv4 route mismatch in router "{}": {}'.format(
- router.name, diff)
+ router.name, diff
+ )
assert result, assertmsg
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
for rname, router in tgen.routers().iteritems():
- logger.info('Comparing router "%s" "show ip ospf vrf %s-cust1 json" output', router.name, router.name)
+ logger.info(
+ 'Comparing router "%s" "show ip ospf vrf %s-cust1 json" output',
+ router.name,
+ router.name,
+ )
expected = {
- '{}-cust1'.format(router.name) : {
- 'vrfName': '{}-cust1'.format(router.name),
- 'routerId': '10.0.255.{}'.format(rname[1:]),
- 'tosRoutesOnly': True,
- 'rfc2328Conform': True,
- 'spfScheduleDelayMsecs': 0,
- 'holdtimeMinMsecs': 50,
- 'holdtimeMaxMsecs': 5000,
- 'lsaMinIntervalMsecs': 5000,
- 'lsaMinArrivalMsecs': 1000,
- 'writeMultiplier': 20,
- 'refreshTimerMsecs': 10000,
- 'asbrRouter': 'injectingExternalRoutingInformation',
- 'attachedAreaCounter': 1,
- 'areas': {}
- }
+ "{}-cust1".format(router.name): {
+ "vrfName": "{}-cust1".format(router.name),
+ "routerId": "10.0.255.{}".format(rname[1:]),
+ "tosRoutesOnly": True,
+ "rfc2328Conform": True,
+ "spfScheduleDelayMsecs": 0,
+ "holdtimeMinMsecs": 50,
+ "holdtimeMaxMsecs": 5000,
+ "lsaMinIntervalMsecs": 5000,
+ "lsaMinArrivalMsecs": 1000,
+ "writeMultiplier": 20,
+ "refreshTimerMsecs": 10000,
+ "asbrRouter": "injectingExternalRoutingInformation",
+ "attachedAreaCounter": 1,
+ "areas": {},
}
+ }
# Area specific additional checks
- if router.name == 'r1' or router.name == 'r2' or router.name == 'r3':
- expected['{}-cust1'.format(router.name)]['areas']['0.0.0.0'] = {
- 'areaIfActiveCounter': 2,
- 'areaIfTotalCounter': 2,
- 'authentication': 'authenticationNone',
- 'backbone': True,
- 'lsaAsbrNumber': 0,
- 'lsaNetworkNumber': 1,
- 'lsaNssaNumber': 0,
- 'lsaNumber': 4,
- 'lsaOpaqueAreaNumber': 0,
- 'lsaOpaqueLinkNumber': 0,
- 'lsaRouterNumber': 3,
- 'lsaSummaryNumber': 0,
- 'nbrFullAdjacentCounter': 2,
+ if router.name == "r1" or router.name == "r2" or router.name == "r3":
+ expected["{}-cust1".format(router.name)]["areas"]["0.0.0.0"] = {
+ "areaIfActiveCounter": 2,
+ "areaIfTotalCounter": 2,
+ "authentication": "authenticationNone",
+ "backbone": True,
+ "lsaAsbrNumber": 0,
+ "lsaNetworkNumber": 1,
+ "lsaNssaNumber": 0,
+ "lsaNumber": 4,
+ "lsaOpaqueAreaNumber": 0,
+ "lsaOpaqueLinkNumber": 0,
+ "lsaRouterNumber": 3,
+ "lsaSummaryNumber": 0,
+ "nbrFullAdjacentCounter": 2,
}
- test_func = partial(topotest.router_json_cmp,
- router,
- 'show ip ospf vrf {0}-cust1 json'.format(rname),
- expected)
- _, diff = topotest.run_and_expect(test_func, None,
- count=10, wait=0.5)
+ test_func = partial(
+ topotest.router_json_cmp,
+ router,
+ "show ip ospf vrf {0}-cust1 json".format(rname),
+ expected,
+ )
+ _, diff = topotest.run_and_expect(test_func, None, count=10, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(rname)
assert diff is None, assertmsg
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
# Simulate a network down event on router3 switch3 interface.
- router3 = tgen.gears['r3']
- topotest.interface_set_status(router3, 'r3-eth0', ifaceaction=False, vrf_name='r3-cust1')
+ router3 = tgen.gears["r3"]
+ topotest.interface_set_status(
+ router3, "r3-eth0", ifaceaction=False, vrf_name="r3-cust1"
+ )
# Expect convergence on all routers
for rname, router in tgen.routers().iteritems():
logger.info('Waiting for router "%s" convergence after link failure', rname)
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/ospfroute_down.txt'.format(rname))
+ reffile = os.path.join(CWD, "{}/ospfroute_down.txt".format(rname))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
- test_func = partial(topotest.router_output_cmp,
- router,
- 'show ip ospf vrf {0}-cust1 route'.format(rname),
- expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=140, wait=0.5)
- assertmsg = 'OSPF did not converge on {}:\n{}'.format(rname, diff)
+ test_func = partial(
+ topotest.router_output_cmp,
+ router,
+ "show ip ospf vrf {0}-cust1 route".format(rname),
+ expected,
+ )
+ result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5)
+ assertmsg = "OSPF did not converge on {}:\n{}".format(rname, diff)
assert result, assertmsg
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
rlist = tgen.routers().values()
for router in rlist:
- logger.info('Checking OSPF IPv4 kernel routes in "%s" after link down', router.name)
+ logger.info(
+ 'Checking OSPF IPv4 kernel routes in "%s" after link down', router.name
+ )
- str='{0}-cust1'.format(router.name)
- reffile = os.path.join(CWD, '{}/zebraroutedown.txt'.format(router.name))
+ str = "{0}-cust1".format(router.name)
+ reffile = os.path.join(CWD, "{}/zebraroutedown.txt".format(router.name))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(compare_show_ip_route_vrf, router.name, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=140, wait=0.5)
+ result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5)
assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down: {}'.format(
- router.name, diff)
+ router.name, diff
+ )
assert result, assertmsg
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class OSPFTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
# Create 4 routers
for routern in range(1, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
# Create a empty network for router 1
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
# Create a empty network for router 2
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'])
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"])
# Interconect router 1, 2 and 3
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r2'])
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r2"])
+ switch.add_link(tgen.gears["r3"])
# Create empty netowrk for router3
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['r3'])
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r3"])
# Interconect router 3 and 4
- switch = tgen.add_switch('s5')
- switch.add_link(tgen.gears['r3'])
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r3"])
+ switch.add_link(tgen.gears["r4"])
# Create a empty network for router 4
- switch = tgen.add_switch('s6')
- switch.add_link(tgen.gears['r4'])
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r4"])
+
def setup_module(mod):
"Sets up the pytest environment"
tgen = Topogen(OSPFTopo, mod.__name__)
tgen.start_topology()
- ospf6_config = 'ospf6d.conf'
- if tgen.gears['r1'].has_version('<', '4.0'):
- ospf6_config = 'ospf6d.conf-pre-v4'
+ ospf6_config = "ospf6d.conf"
+ if tgen.gears["r1"].has_version("<", "4.0"):
+ ospf6_config = "ospf6d.conf-pre-v4"
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_OSPF,
- os.path.join(CWD, '{}/ospfd.conf'.format(rname))
+ TopoRouter.RD_OSPF, os.path.join(CWD, "{}/ospfd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_OSPF6,
- os.path.join(CWD, '{}/{}'.format(rname, ospf6_config))
+ TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/{}".format(rname, ospf6_config))
)
# Initialize all routers.
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
result with the expected output.
"""
tgen = get_topogen()
- current = tgen.gears[rname].vtysh_cmd('show ipv6 ospf6 route')
+ current = tgen.gears[rname].vtysh_cmd("show ipv6 ospf6 route")
# Remove the link addresses
- current = re.sub(r'fe80::[^ ]+', 'fe80::xxxx:xxxx:xxxx:xxxx', current)
- expected = re.sub(r'fe80::[^ ]+', 'fe80::xxxx:xxxx:xxxx:xxxx', expected)
+ current = re.sub(r"fe80::[^ ]+", "fe80::xxxx:xxxx:xxxx:xxxx", current)
+ expected = re.sub(r"fe80::[^ ]+", "fe80::xxxx:xxxx:xxxx:xxxx", expected)
# Remove the time
- current = re.sub(r'\d+:\d{2}:\d{2}', '', current)
- expected = re.sub(r'\d+:\d{2}:\d{2}', '', expected)
+ current = re.sub(r"\d+:\d{2}:\d{2}", "", current)
+ expected = re.sub(r"\d+:\d{2}:\d{2}", "", expected)
+
+ return topotest.difflines(
+ topotest.normalize_text(current),
+ topotest.normalize_text(expected),
+ title1="Current output",
+ title2="Expected output",
+ )
- return topotest.difflines(topotest.normalize_text(current),
- topotest.normalize_text(expected),
- title1="Current output",
- title2="Expected output")
def test_ospf_convergence():
"Test OSPF daemon convergence"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
for router, rnode in tgen.routers().iteritems():
logger.info('Waiting for router "%s" convergence', router)
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/ospfroute.txt'.format(router))
+ reffile = os.path.join(CWD, "{}/ospfroute.txt".format(router))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 80 seconds.
test_func = partial(
- topotest.router_output_cmp, rnode, 'show ip ospf route', expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=160, wait=0.5)
- assert result, 'OSPF did not converge on {}:\n{}'.format(router, diff)
+ topotest.router_output_cmp, rnode, "show ip ospf route", expected
+ )
+ result, diff = topotest.run_and_expect(test_func, "", count=160, wait=0.5)
+ assert result, "OSPF did not converge on {}:\n{}".format(router, diff)
+
def test_ospf_kernel_route():
"Test OSPF kernel route installation"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
rlist = tgen.routers().values()
for router in rlist:
routes = topotest.ip4_route(router)
expected = {
- '10.0.1.0/24': {},
- '10.0.2.0/24': {},
- '10.0.3.0/24': {},
- '10.0.10.0/24': {},
- '172.16.0.0/24': {},
- '172.16.1.0/24': {},
+ "10.0.1.0/24": {},
+ "10.0.2.0/24": {},
+ "10.0.3.0/24": {},
+ "10.0.10.0/24": {},
+ "172.16.0.0/24": {},
+ "172.16.1.0/24": {},
}
assertmsg = 'OSPF IPv4 route mismatch in router "{}"'.format(router.name)
assert topotest.json_cmp(routes, expected) is None, assertmsg
+
def test_ospf6_convergence():
"Test OSPF6 daemon convergence"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
- ospf6route_file = '{}/ospf6route_ecmp.txt'
+ ospf6route_file = "{}/ospf6route_ecmp.txt"
for rnum in range(1, 5):
- router = 'r{}'.format(rnum)
+ router = "r{}".format(rnum)
logger.info('Waiting for router "%s" IPv6 OSPF convergence', router)
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(compare_show_ipv6_ospf6, router, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=25, wait=3)
+ result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3)
if (not result) and (rnum == 1):
# Didn't match the new ECMP version - try the old pre-ECMP format
- ospf6route_file = '{}/ospf6route.txt'
+ ospf6route_file = "{}/ospf6route.txt"
# Load expected results from the command
reffile = os.path.join(CWD, ospf6route_file.format(router))
expected = open(reffile).read()
test_func = partial(compare_show_ipv6_ospf6, router, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=1, wait=3)
+ result, diff = topotest.run_and_expect(test_func, "", count=1, wait=3)
if not result:
# Didn't match the old version - switch back to new ECMP version
# and fail
- ospf6route_file = '{}/ospf6route_ecmp.txt'
+ ospf6route_file = "{}/ospf6route_ecmp.txt"
# Load expected results from the command
reffile = os.path.join(CWD, ospf6route_file.format(router))
expected = open(reffile).read()
test_func = partial(compare_show_ipv6_ospf6, router, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=1, wait=3)
+ result, diff = topotest.run_and_expect(test_func, "", count=1, wait=3)
+
+ assert result, "OSPF6 did not converge on {}:\n{}".format(router, diff)
- assert result, 'OSPF6 did not converge on {}:\n{}'.format(router, diff)
def test_ospf6_kernel_route():
"Test OSPF kernel route installation"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
rlist = tgen.routers().values()
for router in rlist:
routes = topotest.ip6_route(router)
expected = {
- '2001:db8:1::/64': {},
- '2001:db8:2::/64': {},
- '2001:db8:3::/64': {},
- '2001:db8:100::/64': {},
- '2001:db8:200::/64': {},
- '2001:db8:300::/64': {},
+ "2001:db8:1::/64": {},
+ "2001:db8:2::/64": {},
+ "2001:db8:3::/64": {},
+ "2001:db8:100::/64": {},
+ "2001:db8:200::/64": {},
+ "2001:db8:300::/64": {},
}
assertmsg = 'OSPF IPv6 route mismatch in router "{}"'.format(router.name)
assert topotest.json_cmp(routes, expected) is None, assertmsg
+
def test_ospf_json():
"Test 'show ip ospf json' output for coherency."
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
for rnum in range(1, 5):
- router = tgen.gears['r{}'.format(rnum)]
+ router = tgen.gears["r{}".format(rnum)]
logger.info('Comparing router "%s" "show ip ospf json" output', router.name)
expected = {
- 'routerId': '10.0.255.{}'.format(rnum),
- 'tosRoutesOnly': True,
- 'rfc2328Conform': True,
- 'spfScheduleDelayMsecs': 0,
- 'holdtimeMinMsecs': 50,
- 'holdtimeMaxMsecs': 5000,
- 'lsaMinIntervalMsecs': 5000,
- 'lsaMinArrivalMsecs': 1000,
- 'writeMultiplier': 20,
- 'refreshTimerMsecs': 10000,
- 'asbrRouter': 'injectingExternalRoutingInformation',
- 'attachedAreaCounter': 1,
- 'areas': {}
+ "routerId": "10.0.255.{}".format(rnum),
+ "tosRoutesOnly": True,
+ "rfc2328Conform": True,
+ "spfScheduleDelayMsecs": 0,
+ "holdtimeMinMsecs": 50,
+ "holdtimeMaxMsecs": 5000,
+ "lsaMinIntervalMsecs": 5000,
+ "lsaMinArrivalMsecs": 1000,
+ "writeMultiplier": 20,
+ "refreshTimerMsecs": 10000,
+ "asbrRouter": "injectingExternalRoutingInformation",
+ "attachedAreaCounter": 1,
+ "areas": {},
}
# Area specific additional checks
- if router.name == 'r1' or router.name == 'r2' or router.name == 'r3':
- expected['areas']['0.0.0.0'] = {
- 'areaIfActiveCounter': 2,
- 'areaIfTotalCounter': 2,
- 'authentication': 'authenticationNone',
- 'backbone': True,
- 'lsaAsbrNumber': 1,
- 'lsaNetworkNumber': 1,
- 'lsaNssaNumber': 0,
- 'lsaNumber': 7,
- 'lsaOpaqueAreaNumber': 0,
- 'lsaOpaqueLinkNumber': 0,
- 'lsaRouterNumber': 3,
- 'lsaSummaryNumber': 2,
- 'nbrFullAdjacentCounter': 2,
+ if router.name == "r1" or router.name == "r2" or router.name == "r3":
+ expected["areas"]["0.0.0.0"] = {
+ "areaIfActiveCounter": 2,
+ "areaIfTotalCounter": 2,
+ "authentication": "authenticationNone",
+ "backbone": True,
+ "lsaAsbrNumber": 1,
+ "lsaNetworkNumber": 1,
+ "lsaNssaNumber": 0,
+ "lsaNumber": 7,
+ "lsaOpaqueAreaNumber": 0,
+ "lsaOpaqueLinkNumber": 0,
+ "lsaRouterNumber": 3,
+ "lsaSummaryNumber": 2,
+ "nbrFullAdjacentCounter": 2,
}
- if router.name == 'r3' or router.name == 'r4':
- expected['areas']['0.0.0.1'] = {
- 'areaIfActiveCounter': 1,
- 'areaIfTotalCounter': 1,
- 'authentication': 'authenticationNone',
- 'lsaAsbrNumber': 2,
- 'lsaNetworkNumber': 1,
- 'lsaNssaNumber': 0,
- 'lsaNumber': 9,
- 'lsaOpaqueAreaNumber': 0,
- 'lsaOpaqueLinkNumber': 0,
- 'lsaRouterNumber': 2,
- 'lsaSummaryNumber': 4,
- 'nbrFullAdjacentCounter': 1,
+ if router.name == "r3" or router.name == "r4":
+ expected["areas"]["0.0.0.1"] = {
+ "areaIfActiveCounter": 1,
+ "areaIfTotalCounter": 1,
+ "authentication": "authenticationNone",
+ "lsaAsbrNumber": 2,
+ "lsaNetworkNumber": 1,
+ "lsaNssaNumber": 0,
+ "lsaNumber": 9,
+ "lsaOpaqueAreaNumber": 0,
+ "lsaOpaqueLinkNumber": 0,
+ "lsaRouterNumber": 2,
+ "lsaSummaryNumber": 4,
+ "nbrFullAdjacentCounter": 1,
}
# r4 has more interfaces for area 0.0.0.1
- if router.name == 'r4':
- expected['areas']['0.0.0.1'].update({
- 'areaIfActiveCounter': 2,
- 'areaIfTotalCounter': 2,
- })
+ if router.name == "r4":
+ expected["areas"]["0.0.0.1"].update(
+ {"areaIfActiveCounter": 2, "areaIfTotalCounter": 2,}
+ )
# router 3 has an additional area
- if router.name == 'r3':
- expected['attachedAreaCounter'] = 2
+ if router.name == "r3":
+ expected["attachedAreaCounter"] = 2
- output = router.vtysh_cmd('show ip ospf json', isjson=True)
+ output = router.vtysh_cmd("show ip ospf json", isjson=True)
result = topotest.json_cmp(output, expected)
- assert result is None, '"{}" JSON output mismatches the expected result'.format(router.name)
+ assert result is None, '"{}" JSON output mismatches the expected result'.format(
+ router.name
+ )
+
def test_ospf_link_down():
"Test OSPF convergence after a link goes down"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
# Simulate a network down event on router3 switch3 interface.
- router3 = tgen.gears['r3']
- router3.peer_link_enable('r3-eth0', False)
+ router3 = tgen.gears["r3"]
+ router3.peer_link_enable("r3-eth0", False)
# Expect convergence on all routers
for router, rnode in tgen.routers().iteritems():
logger.info('Waiting for router "%s" convergence after link failure', router)
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/ospfroute_down.txt'.format(router))
+ reffile = os.path.join(CWD, "{}/ospfroute_down.txt".format(router))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 80 seconds.
test_func = partial(
- topotest.router_output_cmp, rnode, 'show ip ospf route', expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=140, wait=0.5)
- assert result, 'OSPF did not converge on {}:\n{}'.format(router, diff)
+ topotest.router_output_cmp, rnode, "show ip ospf route", expected
+ )
+ result, diff = topotest.run_and_expect(test_func, "", count=140, wait=0.5)
+ assert result, "OSPF did not converge on {}:\n{}".format(router, diff)
+
def test_ospf_link_down_kernel_route():
"Test OSPF kernel route installation"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
rlist = tgen.routers().values()
for router in rlist:
- logger.info('Checking OSPF IPv4 kernel routes in "%s" after link down', router.name)
+ logger.info(
+ 'Checking OSPF IPv4 kernel routes in "%s" after link down', router.name
+ )
routes = topotest.ip4_route(router)
expected = {
- '10.0.1.0/24': {},
- '10.0.2.0/24': {},
- '10.0.3.0/24': {},
- '10.0.10.0/24': {},
- '172.16.0.0/24': {},
- '172.16.1.0/24': {},
+ "10.0.1.0/24": {},
+ "10.0.2.0/24": {},
+ "10.0.3.0/24": {},
+ "10.0.10.0/24": {},
+ "172.16.0.0/24": {},
+ "172.16.1.0/24": {},
}
- if router.name == 'r1' or router.name == 'r2':
- expected.update({
- '10.0.10.0/24': None,
- '172.16.0.0/24': None,
- '172.16.1.0/24': None,
- })
- elif router.name == 'r3' or router.name == 'r4':
- expected.update({
- '10.0.1.0/24': None,
- '10.0.2.0/24': None,
- })
+ if router.name == "r1" or router.name == "r2":
+ expected.update(
+ {"10.0.10.0/24": None, "172.16.0.0/24": None, "172.16.1.0/24": None,}
+ )
+ elif router.name == "r3" or router.name == "r4":
+ expected.update(
+ {"10.0.1.0/24": None, "10.0.2.0/24": None,}
+ )
# Route '10.0.3.0' is no longer available for r4 since it is down.
- if router.name == 'r4':
- expected.update({
- '10.0.3.0/24': None,
- })
- assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down'.format(router.name)
+ if router.name == "r4":
+ expected.update(
+ {"10.0.3.0/24": None,}
+ )
+ assertmsg = 'OSPF IPv4 route mismatch in router "{}" after link down'.format(
+ router.name
+ )
assert topotest.json_cmp(routes, expected) is None, assertmsg
+
def test_ospf6_link_down():
"Test OSPF6 daemon convergence after link goes down"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
for rnum in range(1, 5):
- router = 'r{}'.format(rnum)
+ router = "r{}".format(rnum)
- logger.info('Waiting for router "%s" IPv6 OSPF convergence after link down', router)
+ logger.info(
+ 'Waiting for router "%s" IPv6 OSPF convergence after link down', router
+ )
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/ospf6route_down.txt'.format(router))
+ reffile = os.path.join(CWD, "{}/ospf6route_down.txt".format(router))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
test_func = partial(compare_show_ipv6_ospf6, router, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=25, wait=3)
- assert result, 'OSPF6 did not converge on {}:\n{}'.format(router, diff)
+ result, diff = topotest.run_and_expect(test_func, "", count=25, wait=3)
+ assert result, "OSPF6 did not converge on {}:\n{}".format(router, diff)
+
def test_ospf6_link_down_kernel_route():
"Test OSPF kernel route installation"
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
rlist = tgen.routers().values()
for router in rlist:
- logger.info('Checking OSPF IPv6 kernel routes in "%s" after link down', router.name)
+ logger.info(
+ 'Checking OSPF IPv6 kernel routes in "%s" after link down', router.name
+ )
routes = topotest.ip6_route(router)
expected = {
- '2001:db8:1::/64': {},
- '2001:db8:2::/64': {},
- '2001:db8:3::/64': {},
- '2001:db8:100::/64': {},
- '2001:db8:200::/64': {},
- '2001:db8:300::/64': {},
+ "2001:db8:1::/64": {},
+ "2001:db8:2::/64": {},
+ "2001:db8:3::/64": {},
+ "2001:db8:100::/64": {},
+ "2001:db8:200::/64": {},
+ "2001:db8:300::/64": {},
}
- if router.name == 'r1' or router.name == 'r2':
- expected.update({
- '2001:db8:100::/64': None,
- '2001:db8:200::/64': None,
- '2001:db8:300::/64': None,
- })
- elif router.name == 'r3' or router.name == 'r4':
- expected.update({
- '2001:db8:1::/64': None,
- '2001:db8:2::/64': None,
- })
+ if router.name == "r1" or router.name == "r2":
+ expected.update(
+ {
+ "2001:db8:100::/64": None,
+ "2001:db8:200::/64": None,
+ "2001:db8:300::/64": None,
+ }
+ )
+ elif router.name == "r3" or router.name == "r4":
+ expected.update(
+ {"2001:db8:1::/64": None, "2001:db8:2::/64": None,}
+ )
# Route '2001:db8:3::/64' is no longer available for r4 since it is down.
- if router.name == 'r4':
- expected.update({
- '2001:db8:3::/64': None,
- })
- assertmsg = 'OSPF IPv6 route mismatch in router "{}" after link down'.format(router.name)
+ if router.name == "r4":
+ expected.update(
+ {"2001:db8:3::/64": None,}
+ )
+ assertmsg = 'OSPF IPv6 route mismatch in router "{}" after link down'.format(
+ router.name
+ )
assert topotest.json_cmp(routes, expected) is None, assertmsg
+
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
-O fc00:1:1:1::/64 [110/10] is directly connected, r1-stubnet, XX:XX:XX
-O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX
-O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX
-O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX
-O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, XX:XX:XX
-O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX
-O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX
-O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX
-O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, XX:XX:XX
+O fc00:1:1:1::/64 [110/10] is directly connected, r1-stubnet, weight 1, XX:XX:XX
+O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX
+O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX
+O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX
+O fc00:a:a:a::/64 [110/10] is directly connected, r1-sw5, weight 1, XX:XX:XX
+O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX
+O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX
+O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX
+O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r1-sw5, weight 1, XX:XX:XX
-O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX
-O fc00:2:2:2::/64 [110/10] is directly connected, r2-stubnet, XX:XX:XX
-O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX
-O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX
-O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, XX:XX:XX
-O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX
-O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX
-O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX
-O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, XX:XX:XX
+O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX
+O fc00:2:2:2::/64 [110/10] is directly connected, r2-stubnet, weight 1, XX:XX:XX
+O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX
+O>* fc00:4:4:4::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX
+O fc00:a:a:a::/64 [110/10] is directly connected, r2-sw5, weight 1, XX:XX:XX
+O>* fc00:b:b:b::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX
+O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX
+O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX
+O>* fc00:4444:4444:4444::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r2-sw5, weight 1, XX:XX:XX
-O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX
-O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX
-O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, XX:XX:XX
-O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, XX:XX:XX
-O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, XX:XX:XX
-O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, XX:XX:XX
-O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX
-O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, XX:XX:XX
-O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, XX:XX:XX
+O>* fc00:1:1:1::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX
+O>* fc00:2:2:2::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX
+O fc00:3:3:3::/64 [110/10] is directly connected, r3-stubnet, weight 1, XX:XX:XX
+O>* fc00:4:4:4::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX
+O fc00:a:a:a::/64 [110/10] is directly connected, r3-sw5, weight 1, XX:XX:XX
+O fc00:b:b:b::/64 [110/10] is directly connected, r3-sw6, weight 1, XX:XX:XX
+O>* fc00:1111:1111:1111::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX
+O>* fc00:2222:2222:2222::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw5, weight 1, XX:XX:XX
+O>* fc00:4444:4444:4444::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r3-sw6, weight 1, XX:XX:XX
-O>* fc00:1:1:1::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX
-O>* fc00:2:2:2::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX
-O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX
-O fc00:4:4:4::/64 [110/10] is directly connected, r4-stubnet, XX:XX:XX
-O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX
-O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, XX:XX:XX
-O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX
-O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX
-O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, XX:XX:XX
+O>* fc00:1:1:1::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX
+O>* fc00:2:2:2::/64 [110/30] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX
+O>* fc00:3:3:3::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX
+O fc00:4:4:4::/64 [110/10] is directly connected, r4-stubnet, weight 1, XX:XX:XX
+O>* fc00:a:a:a::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX
+O fc00:b:b:b::/64 [110/10] is directly connected, r4-sw6, weight 1, XX:XX:XX
+O>* fc00:1111:1111:1111::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX
+O>* fc00:2222:2222:2222::/64 [110/20] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX
+O>* fc00:3333:3333:3333::/64 [110/10] via fe80::XXXX:XXXX:XXXX:XXXX, r4-sw6, weight 1, XX:XX:XX
# Save the Current Working Directory to find configuration files later.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
##
#####################################################
+
class NetworkTopo(Topo):
"OSPFv3 (IPv6) Test Topology 1"
# Create 4 routers
for routern in range(1, 5):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
#
# Wire up the switches and routers
#
# Create a empty network for router 1
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'], nodeif='r1-stubnet')
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"], nodeif="r1-stubnet")
# Create a empty network for router 2
- switch = tgen.add_switch('s2')
- switch.add_link(tgen.gears['r2'], nodeif='r2-stubnet')
+ switch = tgen.add_switch("s2")
+ switch.add_link(tgen.gears["r2"], nodeif="r2-stubnet")
# Create a empty network for router 3
- switch = tgen.add_switch('s3')
- switch.add_link(tgen.gears['r3'], nodeif='r3-stubnet')
+ switch = tgen.add_switch("s3")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-stubnet")
# Create a empty network for router 4
- switch = tgen.add_switch('s4')
- switch.add_link(tgen.gears['r4'], nodeif='r4-stubnet')
+ switch = tgen.add_switch("s4")
+ switch.add_link(tgen.gears["r4"], nodeif="r4-stubnet")
# Interconnect routers 1, 2, and 3
- switch = tgen.add_switch('s5')
- switch.add_link(tgen.gears['r1'], nodeif='r1-sw5')
- switch.add_link(tgen.gears['r2'], nodeif='r2-sw5')
- switch.add_link(tgen.gears['r3'], nodeif='r3-sw5')
+ switch = tgen.add_switch("s5")
+ switch.add_link(tgen.gears["r1"], nodeif="r1-sw5")
+ switch.add_link(tgen.gears["r2"], nodeif="r2-sw5")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-sw5")
# Interconnect routers 3 and 4
- switch = tgen.add_switch('s6')
- switch.add_link(tgen.gears['r3'], nodeif='r3-sw6')
- switch.add_link(tgen.gears['r4'], nodeif='r4-sw6')
+ switch = tgen.add_switch("s6")
+ switch.add_link(tgen.gears["r3"], nodeif="r3-sw6")
+ switch.add_link(tgen.gears["r4"], nodeif="r4-sw6")
#####################################################
##
#####################################################
+
def setup_module(mod):
"Sets up the pytest environment"
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_OSPF6,
- os.path.join(CWD, '{}/ospf6d.conf'.format(rname))
+ TopoRouter.RD_OSPF6, os.path.join(CWD, "{}/ospf6d.conf".format(rname))
)
# Initialize all routers.
pytest.skip(tgen.errors)
# For debugging, uncomment the next line
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
# Wait for OSPF6 to converge (All Neighbors in either Full or TwoWay State)
logger.info("Waiting for OSPF6 convergence")
# Set up for regex
- pat1 = re.compile('^[0-9]')
- pat2 = re.compile('Full')
+ pat1 = re.compile("^[0-9]")
+ pat2 = re.compile("Full")
timeout = 60
while timeout > 0:
# Look for any node not yet converged
for router, rnode in tgen.routers().iteritems():
- resStr = rnode.vtysh_cmd('show ipv6 ospf neigh')
+ resStr = rnode.vtysh_cmd("show ipv6 ospf neigh")
isConverged = False
break
if isConverged == False:
- logger.info('Waiting for {}'.format(router))
+ logger.info("Waiting for {}".format(router))
sys.stdout.flush()
break
if isConverged:
- logger.info('Done')
+ logger.info("Done")
break
else:
sleep(5)
if timeout == 0:
# Bail out with error if a router fails to converge
- ospfStatus = rnode.vtysh_cmd('show ipv6 ospf neigh')
+ ospfStatus = rnode.vtysh_cmd("show ipv6 ospf neigh")
assert False, "OSPFv6 did not converge:\n{}".format(ospfStatus)
logger.info("OSPFv3 converged.")
if tgen.routers_have_failure():
assert tgen.errors == "", tgen.errors
+
def compare_show_ipv6(rname, expected):
"""
Calls 'show ipv6 route' for router `rname` and compare the obtained
# Use just the 'O'spf lines of the output
linearr = []
for line in current.splitlines():
- if re.match('^O', line):
+ if re.match("^O", line):
linearr.append(line)
- current = '\n'.join(linearr)
+ current = "\n".join(linearr)
+
+ return topotest.difflines(
+ topotest.normalize_text(current),
+ topotest.normalize_text(expected),
+ title1="Current output",
+ title2="Expected output",
+ )
- return topotest.difflines(topotest.normalize_text(current),
- topotest.normalize_text(expected),
- title1="Current output",
- title2="Expected output")
def test_ospfv3_routingTable():
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
# For debugging, uncomment the next line
# tgen.mininet_cli()
logger.info('Waiting for router "%s" convergence', router)
# Load expected results from the command
- reffile = os.path.join(CWD, '{}/show_ipv6_route.ref'.format(router))
+ reffile = os.path.join(CWD, "{}/show_ipv6_route.ref".format(router))
expected = open(reffile).read()
# Run test function until we get an result. Wait at most 60 seconds.
- test_func = partial(
- compare_show_ipv6, router, expected)
- result, diff = topotest.run_and_expect(test_func, '',
- count=120, wait=0.5)
- assert result, 'OSPFv3 did not converge on {}:\n{}'.format(router, diff)
+ test_func = partial(compare_show_ipv6, router, expected)
+ result, diff = topotest.run_and_expect(test_func, "", count=120, wait=0.5)
+ assert result, "OSPFv3 did not converge on {}:\n{}".format(router, diff)
def test_linux_ipv6_kernel_routingTable():
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
# Verify Linux Kernel Routing Table
logger.info("Verifying Linux IPv6 Kernel Routing Table")
# each run and we need to translate them
linklocals = []
for i in range(1, 5):
- linklocals += tgen.net['r{}'.format(i)].get_ipv6_linklocal()
+ linklocals += tgen.net["r{}".format(i)].get_ipv6_linklocal()
# Now compare the routing tables (after substituting link-local addresses)
for i in range(1, 5):
# Actual output from router
- actual = tgen.gears['r{}'.format(i)].run('ip -6 route').rstrip()
+ actual = tgen.gears["r{}".format(i)].run("ip -6 route").rstrip()
if "nhid" in actual:
- refTableFile = os.path.join(CWD, 'r{}/ip_6_address.nhg.ref'.format(i))
+ refTableFile = os.path.join(CWD, "r{}/ip_6_address.nhg.ref".format(i))
else:
- refTableFile = os.path.join(CWD, 'r{}/ip_6_address.ref'.format(i))
+ refTableFile = os.path.join(CWD, "r{}/ip_6_address.ref".format(i))
if os.path.isfile(refTableFile):
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines())).splitlines(1)
+ expected = ("\n".join(expected.splitlines())).splitlines(1)
# Mask out Link-Local mac addresses
for ll in linklocals:
actual = re.sub(r"[ ]+proto [0-9a-z]+ +", " proto XXXX ", actual)
actual = re.sub(r"[ ]+nhid [0-9]+ +", " nhid XXXX ", actual)
# Remove ff00::/8 routes (seen on some kernels - not from FRR)
- actual = re.sub(r'ff00::/8.*', '', actual)
+ actual = re.sub(r"ff00::/8.*", "", actual)
# Strip empty lines
actual = actual.lstrip()
actual = actual.rstrip()
- actual = re.sub(r' +', ' ', actual)
+ actual = re.sub(r" +", " ", actual)
filtered_lines = []
for line in sorted(actual.splitlines()):
- if line.startswith('fe80::/64 ') \
- or line.startswith('unreachable fe80::/64 '):
+ if line.startswith("fe80::/64 ") or line.startswith(
+ "unreachable fe80::/64 "
+ ):
continue
filtered_lines.append(line)
- actual = '\n'.join(filtered_lines).splitlines(1)
+ actual = "\n".join(filtered_lines).splitlines(1)
# Print Actual table
# logger.info("Router r%s table" % i)
# logger.info(line.rstrip())
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual OSPFv3 IPv6 routing table",
- title2="expected OSPFv3 IPv6 routing table")
+ title2="expected OSPFv3 IPv6 routing table",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed Linux IPv6 Kernel Routing Table Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed Linux IPv6 Kernel Routing Table Check:\n%s\n"
+ % (i, diff)
+ )
failures += 1
else:
logger.info("r%s ok" % i)
- assert failures == 0, "Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s" % (i, diff)
+ assert failures == 0, (
+ "Linux Kernel IPv6 Routing Table verification failed for router r%s:\n%s"
+ % (i, diff)
+ )
def test_shutdown_check_stderr():
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- logger.info("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n")
- pytest.skip('Skipping test for Stderr output')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ logger.info(
+ "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n"
+ )
+ pytest.skip("Skipping test for Stderr output")
net = tgen.net
logger.info("******************************************")
for i in range(1, 5):
- net['r%s' % i].stopRouter()
- log = net['r%s' % i].getStdErr('ospf6d')
+ net["r%s" % i].stopRouter()
+ log = net["r%s" % i].getStdErr("ospf6d")
if log:
logger.info("\nRouter r%s OSPF6d StdErr Log:\n%s" % (i, log))
- log = net['r%s' % i].getStdErr('zebra')
+ log = net["r%s" % i].getStdErr("zebra")
if log:
logger.info("\nRouter r%s Zebra StdErr Log:\n%s" % (i, log))
def test_shutdown_check_memleak():
"Run the memory leak test and report results."
- if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None:
- logger.info("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)")
- pytest.skip('Skipping test for memory leaks')
+ if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None:
+ logger.info(
+ "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)"
+ )
+ pytest.skip("Skipping test for memory leaks")
tgen = get_topogen()
net = tgen.net
for i in range(1, 5):
- net['r%s' % i].stopRouter()
- net['r%s' % i].report_memory_leaks(
- os.environ.get('TOPOTESTS_CHECK_MEMLEAK'),
- os.path.basename(__file__))
+ net["r%s" % i].stopRouter()
+ net["r%s" % i].report_memory_leaks(
+ os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__)
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
# To suppress tracebacks, either use the following pytest call or
# add "--tb=no" to cli
def ifname_to_ifindex(ifname):
output = subprocess.check_output("ip link show %s" % ifname, shell=True)
- first_line = output.split('\n')[0]
- re_index = re.search('^(\d+):', first_line)
+ first_line = output.split("\n")[0]
+ re_index = re.search("^(\d+):", first_line)
if re_index:
return int(re_index.group(1))
# Thou shalt be root
if os.geteuid() != 0:
- sys.stderr.write('ERROR: You must have root privileges\n')
+ sys.stderr.write("ERROR: You must have root privileges\n")
sys.exit(1)
-logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)5s: %(message)s')
+logging.basicConfig(
+ level=logging.DEBUG, format="%(asctime)s %(levelname)5s: %(message)s"
+)
# Color the errors and warnings in red
-logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR))
-logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING))
+logging.addLevelName(
+ logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR)
+)
+logging.addLevelName(
+ logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING)
+)
log = logging.getLogger(__name__)
-parser = argparse.ArgumentParser(description='Multicast RX utility',
- version='1.0.0')
-parser.add_argument('group', help='Multicast IP')
-parser.add_argument('ifname', help='Interface name')
-parser.add_argument('--port', help='UDP port', default=1000)
-parser.add_argument('--sleep', help='Time to sleep before we stop waiting',
- default = 5)
+parser = argparse.ArgumentParser(description="Multicast RX utility", version="1.0.0")
+parser.add_argument("group", help="Multicast IP")
+parser.add_argument("ifname", help="Interface name")
+parser.add_argument("--port", help="UDP port", default=1000)
+parser.add_argument("--sleep", help="Time to sleep before we stop waiting", default=5)
args = parser.parse_args()
# Create the datagram socket
if newpid == 0:
ifindex = ifname_to_ifindex(args.ifname)
- mreq = struct.pack("=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex)
+ mreq = struct.pack(
+ "=4sLL", socket.inet_aton(args.group), socket.INADDR_ANY, ifindex
+ )
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
time.sleep(float(args.sleep))
sock.close()
import time
-logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)5s: %(message)s')
+logging.basicConfig(
+ level=logging.DEBUG, format="%(asctime)s %(levelname)5s: %(message)s"
+)
# Color the errors and warnings in red
-logging.addLevelName(logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR))
-logging.addLevelName(logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING))
+logging.addLevelName(
+ logging.ERROR, "\033[91m %s\033[0m" % logging.getLevelName(logging.ERROR)
+)
+logging.addLevelName(
+ logging.WARNING, "\033[91m%s\033[0m" % logging.getLevelName(logging.WARNING)
+)
log = logging.getLogger(__name__)
-parser = argparse.ArgumentParser(description='Multicast packet generator', version='1.0.0')
-parser.add_argument('group', help='Multicast IP')
-parser.add_argument('ifname', help='Interface name')
-parser.add_argument('--port', type=int, help='UDP port number', default=1000)
-parser.add_argument('--ttl', type=int, help='time-to-live', default=20)
-parser.add_argument('--count', type=int, help='Packets to send', default=1)
-parser.add_argument('--interval', type=int, help='ms between packets', default=100)
+parser = argparse.ArgumentParser(
+ description="Multicast packet generator", version="1.0.0"
+)
+parser.add_argument("group", help="Multicast IP")
+parser.add_argument("ifname", help="Interface name")
+parser.add_argument("--port", type=int, help="UDP port number", default=1000)
+parser.add_argument("--ttl", type=int, help="time-to-live", default=20)
+parser.add_argument("--count", type=int, help="Packets to send", default=1)
+parser.add_argument("--interval", type=int, help="ms between packets", default=100)
args = parser.parse_args()
# Create the datagram socket
# https://github.com/sivel/bonding/issues/10
#
# Bind our socket to ifname
-sock.setsockopt(socket.SOL_SOCKET,
- 25,
- struct.pack("%ds" % len(args.ifname), args.ifname))
+sock.setsockopt(
+ socket.SOL_SOCKET, 25, struct.pack("%ds" % len(args.ifname), args.ifname)
+)
# We need to make sure our sendto() finishes before we close the socket
sock.setblocking(1)
# Set the time-to-live
-sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack('b', args.ttl))
+sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, struct.pack("b", args.ttl))
ms = args.interval / 1000.0
# Send data to the multicast group
for x in xrange(args.count):
- log.info('TX multicast UDP packet to %s:%d on %s' % (args.group, args.port, args.ifname))
- sent = sock.sendto('foobar %d' % x, (args.group, args.port))
+ log.info(
+ "TX multicast UDP packet to %s:%d on %s" % (args.group, args.port, args.ifname)
+ )
+ sent = sock.sendto("foobar %d" % x, (args.group, args.port))
if args.count > 1 and ms:
time.sleep(ms)
from functools import partial
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from mininet.topo import Topo
+
class PIMTopo(Topo):
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
for routern in range(1, 4):
- tgen.add_router('r{}'.format(routern))
+ tgen.add_router("r{}".format(routern))
- tgen.add_router('rp')
+ tgen.add_router("rp")
# rp ------ r1 -------- r2
# \
# r1 <- sw1 -> r2
# r1-eth0 <-> r2-eth0
# 10.0.20.0/24
- sw = tgen.add_switch('sw1')
- sw.add_link(tgen.gears['r1'])
- sw.add_link(tgen.gears['r2'])
+ sw = tgen.add_switch("sw1")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r2"])
# r1 <- sw2 -> rp
# r1-eth1 <-> rp-eth0
# 10.0.30.0/24
- sw = tgen.add_switch('sw2')
- sw.add_link(tgen.gears['r1'])
- sw.add_link(tgen.gears['rp'])
+ sw = tgen.add_switch("sw2")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["rp"])
# 10.0.40.0/24
- sw = tgen.add_switch('sw3')
- sw.add_link(tgen.gears['r1'])
- sw.add_link(tgen.gears['r3'])
+ sw = tgen.add_switch("sw3")
+ sw.add_link(tgen.gears["r1"])
+ sw.add_link(tgen.gears["r3"])
+
def setup_module(mod):
"Sets up the pytest environment"
# For all registered routers, load the zebra configuration file
for rname, router in tgen.routers().iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_PIM,
- os.path.join(CWD, '{}/pimd.conf'.format(rname))
+ TopoRouter.RD_PIM, os.path.join(CWD, "{}/pimd.conf".format(rname))
)
router.load_config(
- TopoRouter.RD_BGP,
- os.path.join(CWD, '{}/bgpd.conf'.format(rname))
- )
+ TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
+ )
# After loading the configurations, this function loads configured daemons.
tgen.start_router()
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
def teardown_module(mod):
# This function tears down the whole topology.
tgen.stop_topology()
+
def test_pim_rp_setup():
"Ensure basic routing has come up and the rp has an outgoing interface"
- #Ensure rp and r1 establish pim neighbor ship and bgp has come up
- #Finally ensure that the rp has an outgoing interface on r1
+ # Ensure rp and r1 establish pim neighbor ship and bgp has come up
+ # Finally ensure that the rp has an outgoing interface on r1
tgen = get_topogen()
- r1 = tgen.gears['r1']
- json_file = '{}/{}/rp-info.json'.format(CWD, r1.name)
+ r1 = tgen.gears["r1"]
+ json_file = "{}/{}/rp-info.json".format(CWD, r1.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- r1, 'show ip pim rp-info json', expected)
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip pim rp-info json", expected
+ )
_, result = topotest.run_and_expect(test_func, None, count=15, wait=5)
assertmsg = '"{}" JSON output mismatches'.format(r1.name)
assert result is None, assertmsg
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
+
def test_pim_send_mcast_stream():
"Establish a Multicast stream from r2 -> r1 and then ensure S,G is created as appropriate"
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- rp = tgen.gears['rp']
- r3 = tgen.gears['r3']
- r2 = tgen.gears['r2']
- r1 = tgen.gears['r1']
+ rp = tgen.gears["rp"]
+ r3 = tgen.gears["r3"]
+ r2 = tgen.gears["r2"]
+ r1 = tgen.gears["r1"]
# Let's establish a S,G stream from r2 -> r1
CWD = os.path.dirname(os.path.realpath(__file__))
- r2.run("{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r2-eth0 > /tmp/bar".format(CWD))
+ r2.run(
+ "{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r2-eth0 > /tmp/bar".format(
+ CWD
+ )
+ )
# And from r3 -> r1
- r3.run("{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r3-eth0 > /tmp/bar".format(CWD))
+ r3.run(
+ "{}/mcast-tx.py --ttl 5 --count 5 --interval 10 229.1.1.1 r3-eth0 > /tmp/bar".format(
+ CWD
+ )
+ )
# Let's see that it shows up and we have established some basic state
out = r1.vtysh_cmd("show ip pim upstream json", isjson=True)
expected = {
- '229.1.1.1': {
- '10.0.20.2': {
- 'firstHopRouter': 1,
- 'joinState': 'NotJoined',
- 'regState': 'RegPrune',
- 'inboundInterface': 'r1-eth0',
+ "229.1.1.1": {
+ "10.0.20.2": {
+ "firstHopRouter": 1,
+ "joinState": "NotJoined",
+ "regState": "RegPrune",
+ "inboundInterface": "r1-eth0",
}
}
}
- assert topotest.json_cmp(out, expected) is None, 'failed to converge pim'
- #tgen.mininet_cli()
+ assert topotest.json_cmp(out, expected) is None, "failed to converge pim"
+ # tgen.mininet_cli()
+
def test_pim_rp_sees_stream():
"Ensure that the RP sees the stream and has acted accordingly"
tgen = get_topogen()
- rp = tgen.gears['rp']
- json_file = '{}/{}/upstream.json'.format(CWD, rp.name)
+ rp = tgen.gears["rp"]
+ json_file = "{}/{}/upstream.json".format(CWD, rp.name)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- rp, 'show ip pim upstream json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=20, wait=.5)
+ test_func = partial(
+ topotest.router_json_cmp, rp, "show ip pim upstream json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=20, wait=0.5)
assertmsg = '"{}" JSON output mismatches'.format(rp.name)
assert result is None, assertmsg
+
def test_pim_igmp_report():
"Send a igmp report from r2->r1 and ensure that the *,G state is created on r1"
logger.info("Send a igmp report from r2-r1 and ensure *,G created")
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
- r2 = tgen.gears['r2']
- r1 = tgen.gears['r1']
+ r2 = tgen.gears["r2"]
+ r1 = tgen.gears["r1"]
# Let's send a igmp report from r2->r1
CWD = os.path.dirname(os.path.realpath(__file__))
out = r1.vtysh_cmd("show ip pim upstream json", isjson=True)
expected = {
- '229.1.1.2': {
- '*': {
- 'sourceIgmp': 1,
- 'joinState': 'Joined',
- 'regState': 'RegNoInfo',
- 'sptBit': 0,
+ "229.1.1.2": {
+ "*": {
+ "sourceIgmp": 1,
+ "joinState": "Joined",
+ "regState": "RegNoInfo",
+ "sptBit": 0,
}
}
}
- assert topotest.json_cmp(out, expected) is None, 'failed to converge pim'
+ assert topotest.json_cmp(out, expected) is None, "failed to converge pim"
def test_memory_leak():
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
-R>* 192.168.2.0/24 [120/3] via 193.1.1.2, r1-eth1
-R>* 192.168.3.0/24 [120/3] via 193.1.1.2, r1-eth1
-R>* 193.1.2.0/24 [120/2] via 193.1.1.2, r1-eth1
+R>* 192.168.2.0/24 [120/3] via 193.1.1.2, r1-eth1, weight 1
+R>* 192.168.3.0/24 [120/3] via 193.1.1.2, r1-eth1, weight 1
+R>* 193.1.2.0/24 [120/2] via 193.1.1.2, r1-eth1, weight 1
-R>* 192.168.2.0/24 [120/2] via 193.1.2.2, r2-eth1
-R>* 192.168.3.0/24 [120/2] via 193.1.2.2, r2-eth1
+R>* 192.168.2.0/24 [120/2] via 193.1.2.2, r2-eth1, weight 1
+R>* 192.168.3.0/24 [120/2] via 193.1.2.2, r2-eth1, weight 1
-R>* 193.1.1.0/26 [120/2] via 193.1.2.1, r3-eth1
+R>* 193.1.1.0/26 [120/2] via 193.1.2.1, r3-eth1, weight 1
##
#####################################################
+
class NetworkTopo(Topo):
"RIP Topology 1"
router = {}
#
# Setup Main Router
- router[1] = topotest.addRouter(self, 'r1')
+ router[1] = topotest.addRouter(self, "r1")
#
# Setup RIP Routers
for i in range(2, 4):
- router[i] = topotest.addRouter(self, 'r%s' % i)
+ router[i] = topotest.addRouter(self, "r%s" % i)
#
# Setup Switches
switch = {}
#
# On main router
# First switch is for a dummy interface (for local network)
- switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch)
- self.addLink(switch[1], router[1], intfName2='r1-eth0')
+ switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
+ self.addLink(switch[1], router[1], intfName2="r1-eth0")
#
# Switches for RIP
# switch 2 switch is for connection to RIP router
- switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch)
- self.addLink(switch[2], router[1], intfName2='r1-eth1')
- self.addLink(switch[2], router[2], intfName2='r2-eth0')
+ switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch)
+ self.addLink(switch[2], router[1], intfName2="r1-eth1")
+ self.addLink(switch[2], router[2], intfName2="r2-eth0")
# switch 3 is between RIP routers
- switch[3] = self.addSwitch('sw3', cls=topotest.LegacySwitch)
- self.addLink(switch[3], router[2], intfName2='r2-eth1')
- self.addLink(switch[3], router[3], intfName2='r3-eth1')
+ switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch)
+ self.addLink(switch[3], router[2], intfName2="r2-eth1")
+ self.addLink(switch[3], router[3], intfName2="r3-eth1")
# switch 4 is stub on remote RIP router
- switch[4] = self.addSwitch('sw4', cls=topotest.LegacySwitch)
- self.addLink(switch[4], router[3], intfName2='r3-eth0')
-
+ switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch)
+ self.addLink(switch[4], router[3], intfName2="r3-eth0")
#####################################################
##
#####################################################
+
def setup_module(module):
global topo, net
print("******************************************\n")
print("Cleanup old Mininet runs")
- os.system('sudo mn -c > /dev/null 2>&1')
+ os.system("sudo mn -c > /dev/null 2>&1")
thisDir = os.path.dirname(os.path.realpath(__file__))
topo = NetworkTopo()
# Starting Routers
#
for i in range(1, 4):
- net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i))
- net['r%s' % i].loadConf('ripd', '%s/r%s/ripd.conf' % (thisDir, i))
- net['r%s' % i].startRouter()
+ net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
+ net["r%s" % i].loadConf("ripd", "%s/r%s/ripd.conf" % (thisDir, i))
+ net["r%s" % i].startRouter()
# For debugging after starting Quagga/FRR daemons, uncomment the next line
# CLI(net)
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
print("\n\n** Check if FRR/Quagga is running on each Router node")
# Make sure that all daemons are running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
# Make sure that all daemons are still running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 4):
- refTableFile = '%s/r%s/rip_status.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/rip_status.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ip rip status" 2> /dev/null').rstrip()
- # Drop time in next due
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ip rip status" 2> /dev/null')
+ .rstrip()
+ )
+ # Drop time in next due
actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual)
# Drop time in last update
actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual IP RIP status",
- title2="expected IP RIP status")
+ title2="expected IP RIP status",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed IP RIP status check:\n%s\n' % (i, diff))
+ sys.stderr.write("r%s failed IP RIP status check:\n%s\n" % (i, diff))
failures += 1
else:
print("r%s ok" % i)
# Make sure that all daemons are still running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 4):
- refTableFile = '%s/r%s/show_ip_rip.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_ip_rip.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ip rip" 2> /dev/null').rstrip()
+ actual = net["r%s" % i].cmd('vtysh -c "show ip rip" 2> /dev/null').rstrip()
# Drop Time
actual = re.sub(r"[0-9][0-9]:[0-5][0-9]", "XX:XX", actual)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual SHOW IP RIP",
- title2="expected SHOW IP RIP")
+ title2="expected SHOW IP RIP",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed SHOW IP RIP check:\n%s\n' % (i, diff))
+ sys.stderr.write("r%s failed SHOW IP RIP check:\n%s\n" % (i, diff))
failures += 1
else:
print("r%s ok" % i)
# Make sure that all daemons are still running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 4):
- refTableFile = '%s/r%s/show_ip_route.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_ip_route.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ip route" 2> /dev/null | grep "^R"')
+ .rstrip()
+ )
# Drop timers on end of line (older Quagga Versions)
actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual Zebra IPv4 routing table",
- title2="expected Zebra IPv4 routing table")
+ title2="expected Zebra IPv4 routing table",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed Zebra IPv4 Routing Table Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed Zebra IPv4 Routing Table Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "Zebra IPv4 Routing Table verification failed for router r%s:\n%s" % (i, diff)
+ assert failures == 0, (
+ "Zebra IPv4 Routing Table verification failed for router r%s:\n%s"
+ % (i, diff)
+ )
# Make sure that all daemons are still running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- pytest.skip('Skipping test for Stderr output and memory leaks')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ pytest.skip("Skipping test for Stderr output and memory leaks")
thisDir = os.path.dirname(os.path.realpath(__file__))
print("\n\n** Verifing unexpected STDERR output from daemons")
print("******************************************\n")
- net['r1'].stopRouter()
+ net["r1"].stopRouter()
- log = net['r1'].getStdErr('ripd')
+ log = net["r1"].getStdErr("ripd")
if log:
print("\nRIPd StdErr Log:\n" + log)
- log = net['r1'].getStdErr('zebra')
+ log = net["r1"].getStdErr("zebra")
if log:
print("\nZebra StdErr Log:\n" + log)
-if __name__ == '__main__':
+if __name__ == "__main__":
- setLogLevel('info')
+ setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
-R>* fc00:6::/62 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1
-R>* fc00:7::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1
-R>* fc00:7:1111::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1
+R>* fc00:6::/62 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1
+R>* fc00:7::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1
+R>* fc00:7:1111::/64 [120/3] via fe80::XXXX:XXXX:XXXX:XXXX, r1-eth1, weight 1
-R>* fc00:7::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1
-R>* fc00:7:1111::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1
+R>* fc00:7::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1, weight 1
+R>* fc00:7:1111::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r2-eth1, weight 1
-R>* fc00:5::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1
+R>* fc00:5::/64 [120/2] via fe80::XXXX:XXXX:XXXX:XXXX, r3-eth1, weight 1
##
#####################################################
+
class NetworkTopo(Topo):
"RIPng Topology 1"
router = {}
#
# Setup Main Router
- router[1] = topotest.addRouter(self, 'r1')
+ router[1] = topotest.addRouter(self, "r1")
#
# Setup RIPng Routers
for i in range(2, 4):
- router[i] = topotest.addRouter(self, 'r%s' % i)
+ router[i] = topotest.addRouter(self, "r%s" % i)
# Setup Switches
switch = {}
#
# On main router
# First switch is for a dummy interface (for local network)
- switch[1] = self.addSwitch('sw1', cls=topotest.LegacySwitch)
- self.addLink(switch[1], router[1], intfName2='r1-eth0')
+ switch[1] = self.addSwitch("sw1", cls=topotest.LegacySwitch)
+ self.addLink(switch[1], router[1], intfName2="r1-eth0")
#
# Switches for RIPng
# switch 2 switch is for connection to RIP router
- switch[2] = self.addSwitch('sw2', cls=topotest.LegacySwitch)
- self.addLink(switch[2], router[1], intfName2='r1-eth1')
- self.addLink(switch[2], router[2], intfName2='r2-eth0')
+ switch[2] = self.addSwitch("sw2", cls=topotest.LegacySwitch)
+ self.addLink(switch[2], router[1], intfName2="r1-eth1")
+ self.addLink(switch[2], router[2], intfName2="r2-eth0")
# switch 3 is between RIP routers
- switch[3] = self.addSwitch('sw3', cls=topotest.LegacySwitch)
- self.addLink(switch[3], router[2], intfName2='r2-eth1')
- self.addLink(switch[3], router[3], intfName2='r3-eth1')
+ switch[3] = self.addSwitch("sw3", cls=topotest.LegacySwitch)
+ self.addLink(switch[3], router[2], intfName2="r2-eth1")
+ self.addLink(switch[3], router[3], intfName2="r3-eth1")
# switch 4 is stub on remote RIP router
- switch[4] = self.addSwitch('sw4', cls=topotest.LegacySwitch)
- self.addLink(switch[4], router[3], intfName2='r3-eth0')
-
+ switch[4] = self.addSwitch("sw4", cls=topotest.LegacySwitch)
+ self.addLink(switch[4], router[3], intfName2="r3-eth0")
#####################################################
##
#####################################################
+
def setup_module(module):
global topo, net
print("******************************************\n")
print("Cleanup old Mininet runs")
- os.system('sudo mn -c > /dev/null 2>&1')
+ os.system("sudo mn -c > /dev/null 2>&1")
thisDir = os.path.dirname(os.path.realpath(__file__))
topo = NetworkTopo()
# Starting Routers
#
for i in range(1, 4):
- net['r%s' % i].loadConf('zebra', '%s/r%s/zebra.conf' % (thisDir, i))
- net['r%s' % i].loadConf('ripngd', '%s/r%s/ripngd.conf' % (thisDir, i))
- net['r%s' % i].startRouter()
+ net["r%s" % i].loadConf("zebra", "%s/r%s/zebra.conf" % (thisDir, i))
+ net["r%s" % i].loadConf("ripngd", "%s/r%s/ripngd.conf" % (thisDir, i))
+ net["r%s" % i].startRouter()
# For debugging after starting Quagga/FRR daemons, uncomment the next line
# CLI(net)
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
print("\n\n** Check if FRR/Quagga is running on each Router node")
# Starting Routers
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
# Make sure that all daemons are running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
- #CLI(net)
+ # CLI(net)
def test_ripng_status():
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 4):
- refTableFile = '%s/r%s/ripng_status.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/ripng_status.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ipv6 ripng status" 2> /dev/null')
+ .rstrip()
+ )
# Mask out Link-Local mac address portion. They are random...
actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual)
- # Drop time in next due
+ # Drop time in next due
actual = re.sub(r"in [0-9]+ seconds", "in XX seconds", actual)
# Drop time in last update
actual = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", actual)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual IPv6 RIPng status",
- title2="expected IPv6 RIPng status")
+ title2="expected IPv6 RIPng status",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed IPv6 RIPng status check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed IPv6 RIPng status check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % (i, diff)
+ assert failures == 0, "IPv6 RIPng status failed for router r%s:\n%s" % (
+ i,
+ diff,
+ )
# Make sure that all daemons are running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 4):
- refTableFile = '%s/r%s/show_ipv6_ripng.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_ipv6_ripng.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ipv6 ripng" 2> /dev/null').rstrip()
+ actual = (
+ net["r%s" % i].cmd('vtysh -c "show ipv6 ripng" 2> /dev/null').rstrip()
+ )
# Drop Time
actual = re.sub(r" [0-9][0-9]:[0-5][0-9]", " XX:XX", actual)
# Mask out Link-Local mac address portion. They are random...
- actual = re.sub(r" fe80::[0-9a-f: ]+", " fe80::XXXX:XXXX:XXXX:XXXX ", actual)
+ actual = re.sub(
+ r" fe80::[0-9a-f: ]+", " fe80::XXXX:XXXX:XXXX:XXXX ", actual
+ )
# Remove trailing spaces on all lines
- actual = '\n'.join([line.rstrip() for line in actual.splitlines()])
+ actual = "\n".join([line.rstrip() for line in actual.splitlines()])
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual SHOW IPv6 RIPng",
- title2="expected SHOW IPv6 RIPng")
+ title2="expected SHOW IPv6 RIPng",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed SHOW IPv6 RIPng check:\n%s\n' % (i, diff))
+ sys.stderr.write("r%s failed SHOW IPv6 RIPng check:\n%s\n" % (i, diff))
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "SHOW IPv6 RIPng failed for router r%s:\n%s" % (i, diff)
+ assert failures == 0, "SHOW IPv6 RIPng failed for router r%s:\n%s" % (
+ i,
+ diff,
+ )
# Make sure that all daemons are running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
thisDir = os.path.dirname(os.path.realpath(__file__))
print("******************************************\n")
failures = 0
for i in range(1, 4):
- refTableFile = '%s/r%s/show_ipv6_route.ref' % (thisDir, i)
+ refTableFile = "%s/r%s/show_ipv6_route.ref" % (thisDir, i)
if os.path.isfile(refTableFile):
# Read expected result from file
expected = open(refTableFile).read().rstrip()
# Fix newlines (make them all the same)
- expected = ('\n'.join(expected.splitlines()) + '\n').splitlines(1)
+ expected = ("\n".join(expected.splitlines()) + "\n").splitlines(1)
# Actual output from router
- actual = net['r%s' % i].cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"').rstrip()
+ actual = (
+ net["r%s" % i]
+ .cmd('vtysh -c "show ipv6 route" 2> /dev/null | grep "^R"')
+ .rstrip()
+ )
# Mask out Link-Local mac address portion. They are random...
actual = re.sub(r" fe80::[0-9a-f:]+", " fe80::XXXX:XXXX:XXXX:XXXX", actual)
# Drop timers on end of line (older Quagga Versions)
actual = re.sub(r", [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "", actual)
# Fix newlines (make them all the same)
- actual = ('\n'.join(actual.splitlines()) + '\n').splitlines(1)
+ actual = ("\n".join(actual.splitlines()) + "\n").splitlines(1)
# Generate Diff
- diff = topotest.get_textdiff(actual, expected,
+ diff = topotest.get_textdiff(
+ actual,
+ expected,
title1="actual Zebra IPv6 routing table",
- title2="expected Zebra IPv6 routing table")
+ title2="expected Zebra IPv6 routing table",
+ )
# Empty string if it matches, otherwise diff contains unified diff
if diff:
- sys.stderr.write('r%s failed Zebra IPv6 Routing Table Check:\n%s\n' % (i, diff))
+ sys.stderr.write(
+ "r%s failed Zebra IPv6 Routing Table Check:\n%s\n" % (i, diff)
+ )
failures += 1
else:
print("r%s ok" % i)
- assert failures == 0, "Zebra IPv6 Routing Table verification failed for router r%s:\n%s" % (i, diff)
+ assert failures == 0, (
+ "Zebra IPv6 Routing Table verification failed for router r%s:\n%s"
+ % (i, diff)
+ )
# Make sure that all daemons are running
for i in range(1, 4):
- fatal_error = net['r%s' % i].checkRouterRunning()
+ fatal_error = net["r%s" % i].checkRouterRunning()
assert fatal_error == "", fatal_error
# For debugging after starting FRR/Quagga daemons, uncomment the next line
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
- if os.environ.get('TOPOTESTS_CHECK_STDERR') is None:
- print("SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n")
- pytest.skip('Skipping test for Stderr output')
+ if os.environ.get("TOPOTESTS_CHECK_STDERR") is None:
+ print(
+ "SKIPPED final check on StdErr output: Disabled (TOPOTESTS_CHECK_STDERR undefined)\n"
+ )
+ pytest.skip("Skipping test for Stderr output")
thisDir = os.path.dirname(os.path.realpath(__file__))
print("\n\n** Verifying unexpected STDERR output from daemons")
print("******************************************\n")
- net['r1'].stopRouter()
+ net["r1"].stopRouter()
- log = net['r1'].getStdErr('ripngd')
+ log = net["r1"].getStdErr("ripngd")
if log:
print("\nRIPngd StdErr Log:\n" + log)
- log = net['r1'].getStdErr('zebra')
+ log = net["r1"].getStdErr("zebra")
if log:
print("\nZebra StdErr Log:\n" + log)
global net
# Skip if previous fatal error condition is raised
- if (fatal_error != ""):
+ if fatal_error != "":
pytest.skip(fatal_error)
- if os.environ.get('TOPOTESTS_CHECK_MEMLEAK') is None:
- print("SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n")
- pytest.skip('Skipping test for memory leaks')
-
+ if os.environ.get("TOPOTESTS_CHECK_MEMLEAK") is None:
+ print(
+ "SKIPPED final check on Memory leaks: Disabled (TOPOTESTS_CHECK_MEMLEAK undefined)\n"
+ )
+ pytest.skip("Skipping test for memory leaks")
+
thisDir = os.path.dirname(os.path.realpath(__file__))
- net['r1'].stopRouter()
- net['r1'].report_memory_leaks(os.environ.get('TOPOTESTS_CHECK_MEMLEAK'), os.path.basename(__file__))
+ net["r1"].stopRouter()
+ net["r1"].report_memory_leaks(
+ os.environ.get("TOPOTESTS_CHECK_MEMLEAK"), os.path.basename(__file__)
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
- setLogLevel('info')
+ setLogLevel("info")
# To suppress tracebacks, either use the following pytest call or add "--tb=no" to cli
# retval = pytest.main(["-s", "--tb=no"])
retval = pytest.main(["-s"])
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
-sys.path.append(os.path.join(CWD, '../'))
+sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
# Required to instantiate the topology builder class.
from mininet.topo import Topo
+
class ZebraTopo(Topo):
"Test topology builder"
+
def build(self, *_args, **_opts):
"Build function"
tgen = get_topogen(self)
- tgen.add_router('r1')
+ tgen.add_router("r1")
# Create a empty network for router 1
- switch = tgen.add_switch('s1')
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r1'])
- switch.add_link(tgen.gears['r1'])
+ switch = tgen.add_switch("s1")
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r1"])
+ switch.add_link(tgen.gears["r1"])
+
def setup_module(mod):
"Sets up the pytest environment"
router_list = tgen.routers()
for rname, router in router_list.iteritems():
router.load_config(
- TopoRouter.RD_ZEBRA,
- os.path.join(CWD, '{}/zebra.conf'.format(rname))
+ TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
# Initialize all routers.
tgen.start_router()
+
def teardown_module(mod):
"Teardown the pytest environment"
tgen = get_topogen()
tgen.stop_topology()
+
def test_zebra_kernel_admin_distance():
"Test some basic kernel routes added that should be accepted"
logger.info("Test some basic kernel routes that should be accepted")
tgen = get_topogen()
if tgen.routers_have_failure():
- pytest.skip('skipped because of router(s) failure')
+ pytest.skip("skipped because of router(s) failure")
- r1 = tgen.gears['r1']
+ r1 = tgen.gears["r1"]
# Route with 255/8192 metric
- r1.run('ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272')
+ r1.run("ip route add 4.5.1.0/24 via 192.168.210.2 dev r1-eth0 metric 4278198272")
# Route with 1/1 metric
- r1.run('ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217')
+ r1.run("ip route add 4.5.2.0/24 via 192.168.211.2 dev r1-eth1 metric 16777217")
# Route with 10/1 metric
- r1.run('ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161')
+ r1.run("ip route add 4.5.3.0/24 via 192.168.212.2 dev r1-eth2 metric 167772161")
# Same route with a 160/1 metric
- r1.run('ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561')
+ r1.run("ip route add 4.5.3.0/24 via 192.168.213.2 dev r1-eth3 metric 2684354561")
- #Currently I believe we have a bug here with the same route and different
- #metric. That needs to be properly resolved. Making a note for
- #coming back around later and fixing this.
- #tgen.mininet_cli()
+ # Currently I believe we have a bug here with the same route and different
+ # metric. That needs to be properly resolved. Making a note for
+ # coming back around later and fixing this.
+ # tgen.mininet_cli()
for i in range(1, 2):
- json_file = '{}/r1/v4_route_{}.json'.format(CWD, i)
+ json_file = "{}/r1/v4_route_{}.json".format(CWD, i)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- r1,
- 'show ip route 4.5.{}.0 json'.format(i),
- expected)
- _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5)
+ test_func = partial(
+ topotest.router_json_cmp,
+ r1,
+ "show ip route 4.5.{}.0 json".format(i),
+ expected,
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
assertmsg = '"r1" JSON output mismatches'
assert result is None, assertmsg
- #tgen.mininet_cli()
+ # tgen.mininet_cli()
+
def test_zebra_kernel_override():
"Test that a FRR route with a lower admin distance takes over"
logger.info("Test kernel override with a better admin distance")
tgen = get_topogen()
- if (tgen.routers_have_failure()):
+ if tgen.routers_have_failure():
ptyest.skip("skipped because of preview test failure")
- r1 = tgen.gears['r1']
+ r1 = tgen.gears["r1"]
r1.vtysh_cmd("conf\nip route 4.5.1.0/24 192.168.216.3")
- json_file = '{}/r1/v4_route_1_static_override.json'.format(CWD)
+ json_file = "{}/r1/v4_route_1_static_override.json".format(CWD)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- r1, 'show ip route 4.5.1.0 json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5)
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
assert result is None, '"r1" JSON output mismatches'
- logger.info("Test that the removal of the static route allows the kernel to take back over")
+ logger.info(
+ "Test that the removal of the static route allows the kernel to take back over"
+ )
r1.vtysh_cmd("conf\nno ip route 4.5.1.0/24 192.168.216.3")
- json_file = '{}/r1/v4_route_1.json'.format(CWD)
+ json_file = "{}/r1/v4_route_1.json".format(CWD)
expected = json.loads(open(json_file).read())
- test_func = partial(topotest.router_json_cmp,
- r1, 'show ip route 4.5.1.0 json', expected)
- _, result = topotest.run_and_expect(test_func, None, count=2, wait=.5)
+ test_func = partial(
+ topotest.router_json_cmp, r1, "show ip route 4.5.1.0 json", expected
+ )
+ _, result = topotest.run_and_expect(test_func, None, count=2, wait=0.5)
assert result is None, '"r1" JSON output mismatches'
"Run the memory leak test and report results."
tgen = get_topogen()
if not tgen.is_memleak_enabled():
- pytest.skip('Memory leak test/report is disabled')
+ pytest.skip("Memory leak test/report is disabled")
tgen.report_memory_leaks()
-if __name__ == '__main__':
+
+if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
--- /dev/null
+// spatch -sp_file tools/coccinelle/cast_to_larger_sizes.cocci --recursive-includes ./
+
+@r@
+typedef uint8_t;
+typedef uint16_t;
+typedef uint32_t;
+typedef uint64_t;
+uint8_t *i8;
+position p;
+@@
+
+ \(
+ (uint64_t *) i8@p\|(uint32_t *) i8@p\|(uint16_t *) i8@p
+ \)
+
+@script:python@
+p << r.p;
+@@
+
+coccilib.report.print_report(p[0],"Bad typecast to larger size")
if (argc != 1)
usage(EXIT_FAILURE);
- yang_init();
+ yang_init(false);
if (search_path)
ly_ctx_set_searchdir(ly_native_ctx, search_path);
if (argc != 1)
usage(EXIT_FAILURE);
- yang_init();
+ yang_init(false);
/* Load YANG module. */
module = yang_module_load(argv[0]);
strlcat(integrate_sav, CONF_BACKUP_EXT, integrate_sav_sz);
/* Move current configuration file to backup config file. */
- if (unlink(integrate_sav) != 0) {
- vty_out(vty, "Warning: %s unlink failed\n", integrate_sav);
- }
- if (rename(fbackup, integrate_sav) != 0) {
- vty_out(vty, "Error renaming %s to %s\n", fbackup,
- integrate_sav);
- }
+ if (unlink(integrate_sav) != 0 && errno != ENOENT)
+ vty_out(vty, "Unlink failed for %s: %s\n", integrate_sav,
+ strerror(errno));
+ if (rename(fbackup, integrate_sav) != 0 && errno != ENOENT)
+ vty_out(vty, "Error renaming %s to %s: %s\n", fbackup,
+ integrate_sav, strerror(errno));
free(integrate_sav);
}
description
"This module defines a model for managing FRR isisd daemon.";
+ revision 2020-04-06 {
+ description
+ "Group LSP timers in a container so that they can be displayed and
+ configured together";
+ }
revision 2019-12-17 {
description
"Changed default area is-type to level-1-2";
revision 2019-09-09 {
description
"Changed interface references to use
- frr-interface:interface-ref typedef";
+ frr-interface:interface-ref typedef";
}
revision 2018-07-26 {
description
}
grouping interface-config {
- description "Interface configuration grouping";
-
+ description
+ "Interface configuration grouping";
leaf area-tag {
type string;
mandatory true;
leaf bfd-monitoring {
type boolean;
- default false;
- description "Monitor IS-IS peers on this circuit.";
+ default "false";
+ description
+ "Monitor IS-IS peers on this circuit.";
}
container csnp-interval {
leaf network-type {
type network-type;
- default "broadcast";
must "(. = \"point-to-point\") or (. = \"broadcast\")";
+ default "broadcast";
description
"Explicitly configured type of IS-IS circuit (broadcast or point-to-point).";
}
}
grouping adjacency-state {
+ description
+ "Adjacency state";
container adjacencies {
config false;
+ description
+ "This container lists the adjacencies of
+ the local node.";
list adjacency {
+ description
+ "List of operational adjacencies.";
leaf neighbor-sys-type {
type level;
description
"Level capability of neighboring system";
}
+
leaf neighbor-sysid {
type system-id;
description
"The system-id of the neighbor";
}
+
leaf neighbor-extended-circuit-id {
type extended-circuit-id;
description
"Circuit ID of the neighbor";
}
+
leaf neighbor-snpa {
type snpa;
description
"SNPA of the neighbor";
}
+
leaf hold-timer {
type uint16;
- units seconds;
+ units "seconds";
description
"The holding time in seconds for this
adjacency. This value is based on
received hello PDUs and the elapsed
time since receipt.";
}
+
leaf neighbor-priority {
type uint8 {
range "0 .. 127";
"Priority of the neighboring IS for becoming
the DIS.";
}
+
leaf state {
type adj-state-type;
description
"This leaf describes the state of the interface.";
}
-
- description
- "List of operational adjacencies.";
}
- description
- "This container lists the adjacencies of
- the local node.";
}
- description
- "Adjacency state";
}
grouping event-counters {
+ description
+ "Grouping for IS-IS interface event counters";
container event-counters {
config false;
+ description
+ "IS-IS interface event counters.";
leaf adjacency-changes {
type uint32;
description
"The number of times an adjacency state change has
occurred on this interface.";
}
+
leaf adjacency-number {
type uint32;
description
"The number of adjacencies on this interface.";
}
+
leaf init-fails {
type uint32;
description
as PPP NCP failures. Failures to form an
adjacency are counted by adjacency-rejects.";
}
+
leaf adjacency-rejects {
type uint32;
description
"The number of times an adjacency has been
rejected on this interface.";
}
+
leaf id-len-mismatch {
type uint32;
description
field length different from that for this
system has been received on this interface.";
}
+
leaf max-area-addresses-mismatch {
type uint32;
description
max area address field differing from that of
this system.";
}
+
leaf authentication-type-fails {
type uint32;
description
"Number of authentication type mismatches.";
}
+
leaf authentication-fails {
type uint32;
description
"Number of authentication key failures.";
}
- description "IS-IS interface event counters.";
}
- description
- "Grouping for IS-IS interface event counters";
}
grouping interface-state {
description
"IS-IS interface operational state.";
uses adjacency-state;
+
uses event-counters;
}
"MTU of an LSP.";
}
- container refresh-interval {
+ container timers {
description
- "";
- leaf level-1 {
- type uint16;
- units "seconds";
- default "900";
+ "LSP-related timers";
+ container level-1 {
description
- "LSP refresh interval for level-1.";
- }
+ "Level-1 LSP-related timers";
+ leaf refresh-interval {
+ type uint16;
+ units "seconds";
+ default "900";
+ description
+ "LSP refresh interval for level-1.";
+ }
- leaf level-2 {
- type uint16;
- units "seconds";
- default "900";
- description
- "LSP refresh interval for level-2.";
- }
- }
+ leaf maximum-lifetime {
+ type uint16 {
+ range "350..65535";
+ }
+ units "seconds";
+ must ". >= ../refresh-interval + 300";
+ default "1200";
+ description
+ "Maximum LSP lifetime for level-1.";
+ }
- container maximum-lifetime {
- description
- "Maximum LSP lifetime.";
- leaf level-1 {
- type uint16 {
- range "350..65535";
+ leaf generation-interval {
+ type uint16 {
+ range "1..120";
+ }
+ units "seconds";
+ must ". < ../refresh-interval";
+ default "30";
+ description
+ "Minimum time allowed before level-1 LSP retransmissions.";
}
- units "seconds";
- must ". >= ../../refresh-interval/level-1 + 300";
- default "1200";
- description
- "Maximum LSP lifetime for level-1.";
}
- leaf level-2 {
- type uint16 {
- range "350..65535";
- }
- units "seconds";
- must ". >= ../../refresh-interval/level-2 + 300";
- default "1200";
+ container level-2 {
description
- "Maximum LSP lifetime for level-2.";
- }
- }
+ "Level-2 LSP-related timers";
+ leaf refresh-interval {
+ type uint16;
+ units "seconds";
+ default "900";
+ description
+ "LSP refresh interval for level-2.";
+ }
- container generation-interval {
- description
- "Minimum LSP regeneration interval.";
- leaf level-1 {
- type uint16 {
- range "1..120";
+ leaf maximum-lifetime {
+ type uint16 {
+ range "350..65535";
+ }
+ units "seconds";
+ must ". >= ../refresh-interval + 300";
+ default "1200";
+ description
+ "Maximum LSP lifetime for level-2.";
}
- units "seconds";
- must ". < ../../refresh-interval/level-1";
- default "30";
- description
- "Minimum time allowed before level-1 LSP retransmissions.";
- }
- leaf level-2 {
- type uint16 {
- range "1..120";
+ leaf generation-interval {
+ type uint16 {
+ range "1..120";
+ }
+ units "seconds";
+ must ". < ../refresh-interval";
+ default "30";
+ description
+ "Minimum time allowed before level-2 LSP retransmissions.";
}
- units "seconds";
- must ". < ../../refresh-interval/level-2";
- default "30";
- description
- "Minimum time allowed before level-2 LSP retransmissions.";
}
}
}
description
"IS-IS interface parameters.";
uses interface-config;
+
uses interface-state;
}
}
case interface {
when "./condition = 'interface'";
leaf interface {
- type frr-interface:interface-ref;
+ type string;
}
}
case access-list-num {
int lm_get_chunk_response(struct label_manager_chunk *lmc, uint8_t proto,
uint16_t instance, vrf_id_t vrf_id)
{
+ if (!lmc)
+ flog_err(EC_ZEBRA_LM_CANNOT_ASSIGN_CHUNK,
+ "Unable to assign Label Chunk to %s instance %u",
+ zebra_route_string(proto), instance);
+ else if (IS_ZEBRA_DEBUG_PACKET)
+ zlog_debug("Assigned Label Chunk %u - %u to %s instance %u",
+ lmc->start, lmc->end, zebra_route_string(proto),
+ instance);
+
struct zserv *client = zserv_find_client(proto, instance);
if (!client) {
zlog_err("%s: could not find client for daemon %s instance %u",
case 'a':
allow_delete = 1;
break;
- case 'e':
- zrouter.multipath_num = atoi(optarg);
- if (zrouter.multipath_num > MULTIPATH_NUM
- || zrouter.multipath_num <= 0) {
+ case 'e': {
+ unsigned long int parsed_multipath =
+ strtoul(optarg, NULL, 10);
+ if (parsed_multipath == 0
+ || parsed_multipath > MULTIPATH_NUM
+ || parsed_multipath > UINT32_MAX) {
flog_err(
EC_ZEBRA_BAD_MULTIPATH_NUM,
- "Multipath Number specified must be less than %d and greater than 0",
+ "Multipath Number specified must be less than %u and greater than 0",
MULTIPATH_NUM);
return 1;
}
+ zrouter.multipath_num = parsed_multipath;
break;
+ }
case 'o':
vrf_default_name_configured = optarg;
break;
/* call hook to get a chunk using wrapper */
lm_get_chunk_call(&lmc, proto, instance, keep, size, base, vrf_id);
- if (!lmc)
- flog_err(
- EC_ZEBRA_LM_CANNOT_ASSIGN_CHUNK,
- "Unable to assign Label Chunk of size %u to %s instance %u",
- size, zebra_route_string(proto), instance);
- else
- if (IS_ZEBRA_DEBUG_PACKET)
- zlog_debug("Assigned Label Chunk %u - %u to %s instance %u",
- lmc->start, lmc->end,
- zebra_route_string(proto), instance);
-
stream_failure:
return;
}
json_labels);
}
+ if (nexthop->weight)
+ json_object_int_add(json_nexthop, "weight",
+ nexthop->weight);
+
json_object_array_add(json_nexthops, json_nexthop);
}
show_route_nexthop_helper(vty, re, nexthop);
+ if (nexthop->weight)
+ vty_out(vty, ", weight %u", nexthop->weight);
+
vty_out(vty, ", %s\n", up_str);
/* Check for backup info */
{
struct zebra_vrf *zvrf = zebra_vrf_get_evpn();
- if (CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) {
+ if (zvrf && CHECK_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG)) {
if (IS_ZEBRA_DEBUG_VXLAN)
zlog_debug("VxLAN SG updates to PIM, stop");
UNSET_FLAG(zvrf->flags, ZEBRA_PIM_SEND_VXLAN_SG);